aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/Kconfig19
-rw-r--r--arch/powerpc/Kconfig.debug7
-rw-r--r--arch/powerpc/Makefile1
-rw-r--r--arch/powerpc/boot/.gitignore1
-rw-r--r--arch/powerpc/boot/Makefile11
-rw-r--r--arch/powerpc/boot/dts/a4m072.dts168
-rw-r--r--arch/powerpc/boot/dts/bluestone.dts129
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi8
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi16
-rw-r--r--arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi4
-rw-r--r--arch/powerpc/boot/dts/fsl/p1010si-post.dtsi6
-rw-r--r--arch/powerpc/boot/dts/fsl/p1020si-post.dtsi10
-rw-r--r--arch/powerpc/boot/dts/fsl/p1021si-post.dtsi7
-rw-r--r--arch/powerpc/boot/dts/fsl/p1022si-post.dtsi15
-rw-r--r--arch/powerpc/boot/dts/fsl/p1023si-post.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/p2020si-post.dtsi7
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/p3060si-post.dtsi6
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi2
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi3
-rw-r--r--arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi10
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi6
-rw-r--r--arch/powerpc/boot/dts/ge_imp3a.dts255
-rw-r--r--arch/powerpc/boot/dts/mpc836x_mds.dts4
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dts6
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds.dtsi93
-rw-r--r--arch/powerpc/boot/dts/mpc8536ds_36b.dts8
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dts306
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds.dtsi306
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds_32b.dts86
-rw-r--r--arch/powerpc/boot/dts/mpc8548cds_36b.dts86
-rw-r--r--arch/powerpc/boot/dts/mpc8572ds.dtsi50
-rw-r--r--arch/powerpc/boot/dts/p1010rdb.dtsi4
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc.dtsi247
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc_32b.dts90
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc_36b.dts90
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc_camp_core0.dts64
-rw-r--r--arch/powerpc/boot/dts/p1020rdb-pc_camp_core1.dts142
-rw-r--r--arch/powerpc/boot/dts/p1020rdb.dtsi13
-rw-r--r--arch/powerpc/boot/dts/p1021mds.dts3
-rw-r--r--arch/powerpc/boot/dts/p1021rdb.dts96
-rw-r--r--arch/powerpc/boot/dts/p1021rdb.dtsi236
-rw-r--r--arch/powerpc/boot/dts/p1021rdb_36b.dts96
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dts274
-rw-r--r--arch/powerpc/boot/dts/p1022ds.dtsi234
-rw-r--r--arch/powerpc/boot/dts/p1022ds_32b.dts103
-rw-r--r--arch/powerpc/boot/dts/p1022ds_36b.dts103
-rw-r--r--arch/powerpc/boot/dts/p1025rdb.dtsi286
-rw-r--r--arch/powerpc/boot/dts/p1025rdb_32b.dts135
-rw-r--r--arch/powerpc/boot/dts/p1025rdb_36b.dts88
-rw-r--r--arch/powerpc/boot/dts/p2020ds.dtsi3
-rw-r--r--arch/powerpc/boot/dts/p2020rdb-pc.dtsi241
-rw-r--r--arch/powerpc/boot/dts/p2020rdb-pc_32b.dts96
-rw-r--r--arch/powerpc/boot/dts/p2020rdb-pc_36b.dts96
-rw-r--r--arch/powerpc/boot/dts/p2020rdb.dts7
-rwxr-xr-xarch/powerpc/boot/wrapper22
-rw-r--r--arch/powerpc/configs/85xx/ge_imp3a_defconfig257
-rw-r--r--arch/powerpc/configs/86xx/gef_ppc9a_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc310_defconfig1
-rw-r--r--arch/powerpc/configs/86xx/gef_sbc610_defconfig2
-rw-r--r--arch/powerpc/configs/iseries_defconfig236
-rw-r--r--arch/powerpc/configs/mpc5200_defconfig27
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/configs/ppc64_defconfig5
-rw-r--r--arch/powerpc/include/asm/abs_addr.h21
-rw-r--r--arch/powerpc/include/asm/atomic.h59
-rw-r--r--arch/powerpc/include/asm/cputable.h12
-rw-r--r--arch/powerpc/include/asm/device.h3
-rw-r--r--arch/powerpc/include/asm/dma.h4
-rw-r--r--arch/powerpc/include/asm/eeh.h134
-rw-r--r--arch/powerpc/include/asm/eeh_event.h33
-rw-r--r--arch/powerpc/include/asm/ehv_pic.h2
-rw-r--r--arch/powerpc/include/asm/exception-64s.h113
-rw-r--r--arch/powerpc/include/asm/fadump.h218
-rw-r--r--arch/powerpc/include/asm/firmware.h9
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h6
-rw-r--r--arch/powerpc/include/asm/highmem.h2
-rw-r--r--arch/powerpc/include/asm/hw_irq.h63
-rw-r--r--arch/powerpc/include/asm/i8259.h2
-rw-r--r--arch/powerpc/include/asm/iommu.h1
-rw-r--r--arch/powerpc/include/asm/irq.h253
-rw-r--r--arch/powerpc/include/asm/irqflags.h37
-rw-r--r--arch/powerpc/include/asm/iseries/alpaca.h31
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call.h111
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_event.h201
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_sc.h50
-rw-r--r--arch/powerpc/include/asm/iseries/hv_call_xm.h61
-rw-r--r--arch/powerpc/include/asm/iseries/hv_lp_config.h128
-rw-r--r--arch/powerpc/include/asm/iseries/hv_lp_event.h162
-rw-r--r--arch/powerpc/include/asm/iseries/hv_types.h112
-rw-r--r--arch/powerpc/include/asm/iseries/iommu.h37
-rw-r--r--arch/powerpc/include/asm/iseries/it_lp_queue.h78
-rw-r--r--arch/powerpc/include/asm/iseries/lpar_map.h85
-rw-r--r--arch/powerpc/include/asm/iseries/mf.h51
-rw-r--r--arch/powerpc/include/asm/iseries/vio.h265
-rw-r--r--arch/powerpc/include/asm/jump_label.h2
-rw-r--r--arch/powerpc/include/asm/keylargo.h2
-rw-r--r--arch/powerpc/include/asm/kvm.h46
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h98
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_32.h6
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_64.h180
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h52
-rw-r--r--arch/powerpc/include/asm/kvm_host.h90
-rw-r--r--arch/powerpc/include/asm/kvm_para.h41
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h25
-rw-r--r--arch/powerpc/include/asm/lppaca.h8
-rw-r--r--arch/powerpc/include/asm/machdep.h4
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h6
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h14
-rw-r--r--arch/powerpc/include/asm/mpic.h11
-rw-r--r--arch/powerpc/include/asm/mpic_msgr.h132
-rw-r--r--arch/powerpc/include/asm/paca.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h16
-rw-r--r--arch/powerpc/include/asm/pci.h9
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h4
-rw-r--r--arch/powerpc/include/asm/phyp_dump.h47
-rw-r--r--arch/powerpc/include/asm/ppc-opcode.h4
-rw-r--r--arch/powerpc/include/asm/ppc-pci.h96
-rw-r--r--arch/powerpc/include/asm/ppc_asm.h2
-rw-r--r--arch/powerpc/include/asm/ptrace.h20
-rw-r--r--arch/powerpc/include/asm/reg.h27
-rw-r--r--arch/powerpc/include/asm/reg_booke.h1
-rw-r--r--arch/powerpc/include/asm/rtas.h34
-rw-r--r--arch/powerpc/include/asm/smp.h1
-rw-r--r--arch/powerpc/include/asm/socket.h4
-rw-r--r--arch/powerpc/include/asm/spinlock.h5
-rw-r--r--arch/powerpc/include/asm/system.h38
-rw-r--r--arch/powerpc/include/asm/thread_info.h9
-rw-r--r--arch/powerpc/include/asm/time.h15
-rw-r--r--arch/powerpc/include/asm/udbg.h1
-rw-r--r--arch/powerpc/include/asm/vio.h10
-rw-r--r--arch/powerpc/include/asm/xics.h2
-rw-r--r--arch/powerpc/kernel/Makefile10
-rw-r--r--arch/powerpc/kernel/asm-offsets.c32
-rw-r--r--arch/powerpc/kernel/cputable.c20
-rw-r--r--arch/powerpc/kernel/crash.c2
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/entry_32.S2
-rw-r--r--arch/powerpc/kernel/entry_64.S248
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S236
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S324
-rw-r--r--arch/powerpc/kernel/fadump.c1315
-rw-r--r--arch/powerpc/kernel/head_32.S4
-rw-r--r--arch/powerpc/kernel/head_40x.S4
-rw-r--r--arch/powerpc/kernel/head_64.S62
-rw-r--r--arch/powerpc/kernel/head_8xx.S4
-rw-r--r--arch/powerpc/kernel/head_booke.h4
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S2
-rw-r--r--arch/powerpc/kernel/ibmebus.c2
-rw-r--r--arch/powerpc/kernel/idle.c14
-rw-r--r--arch/powerpc/kernel/idle_book3e.S25
-rw-r--r--arch/powerpc/kernel/idle_power4.S24
-rw-r--r--arch/powerpc/kernel/idle_power7.S23
-rw-r--r--arch/powerpc/kernel/iommu.c8
-rw-r--r--arch/powerpc/kernel/irq.c829
-rw-r--r--arch/powerpc/kernel/isa-bridge.c3
-rw-r--r--arch/powerpc/kernel/kvm.c307
-rw-r--r--arch/powerpc/kernel/kvm_emul.S112
-rw-r--r--arch/powerpc/kernel/legacy_serial.c2
-rw-r--r--arch/powerpc/kernel/lparcfg.c108
-rw-r--r--arch/powerpc/kernel/misc.S1
-rw-r--r--arch/powerpc/kernel/of_platform.c6
-rw-r--r--arch/powerpc/kernel/paca.c12
-rw-r--r--arch/powerpc/kernel/pci-common.c101
-rw-r--r--arch/powerpc/kernel/pci_32.c6
-rw-r--r--arch/powerpc/kernel/pci_64.c7
-rw-r--r--arch/powerpc/kernel/pci_of_scan.c12
-rw-r--r--arch/powerpc/kernel/pmc.c1
-rw-r--r--arch/powerpc/kernel/process.c33
-rw-r--r--arch/powerpc/kernel/prom.c98
-rw-r--r--arch/powerpc/kernel/prom_init.c17
-rw-r--r--arch/powerpc/kernel/rtas.c39
-rw-r--r--arch/powerpc/kernel/rtas_pci.c13
-rw-r--r--arch/powerpc/kernel/setup-common.c14
-rw-r--r--arch/powerpc/kernel/setup_64.c2
-rw-r--r--arch/powerpc/kernel/signal.c25
-rw-r--r--arch/powerpc/kernel/signal.h2
-rw-r--r--arch/powerpc/kernel/signal_32.c11
-rw-r--r--arch/powerpc/kernel/sysfs.c7
-rw-r--r--arch/powerpc/kernel/time.c116
-rw-r--r--arch/powerpc/kernel/traps.c6
-rw-r--r--arch/powerpc/kernel/udbg.c3
-rw-r--r--arch/powerpc/kernel/vdso.c14
-rw-r--r--arch/powerpc/kernel/vio.c30
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S5
-rw-r--r--arch/powerpc/kvm/Kconfig1
-rw-r--r--arch/powerpc/kvm/book3s.c57
-rw-r--r--arch/powerpc/kvm/book3s_32_mmu_host.c21
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_host.c66
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c919
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c8
-rw-r--r--arch/powerpc/kvm/book3s_hv.c466
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c209
-rw-r--r--arch/powerpc/kvm/book3s_hv_rm_mmu.c835
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S176
-rw-r--r--arch/powerpc/kvm/book3s_paired_singles.c9
-rw-r--r--arch/powerpc/kvm/book3s_pr.c182
-rw-r--r--arch/powerpc/kvm/booke.c150
-rw-r--r--arch/powerpc/kvm/booke.h4
-rw-r--r--arch/powerpc/kvm/booke_emulate.c23
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S18
-rw-r--r--arch/powerpc/kvm/e500.c32
-rw-r--r--arch/powerpc/kvm/e500_emulate.c38
-rw-r--r--arch/powerpc/kvm/e500_tlb.c775
-rw-r--r--arch/powerpc/kvm/e500_tlb.h80
-rw-r--r--arch/powerpc/kvm/emulate.c61
-rw-r--r--arch/powerpc/kvm/powerpc.c148
-rw-r--r--arch/powerpc/kvm/trace.h62
-rw-r--r--arch/powerpc/lib/locks.c24
-rw-r--r--arch/powerpc/mm/dma-noncoherent.c5
-rw-r--r--arch/powerpc/mm/fault.c181
-rw-r--r--arch/powerpc/mm/fsl_booke_mmu.c19
-rw-r--r--arch/powerpc/mm/hash_utils_64.c20
-rw-r--r--arch/powerpc/mm/hugetlbpage.c9
-rw-r--r--arch/powerpc/mm/icswx.c23
-rw-r--r--arch/powerpc/mm/icswx.h6
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/pgtable_32.c2
-rw-r--r--arch/powerpc/mm/slb.c6
-rw-r--r--arch/powerpc/mm/slb_low.S16
-rw-r--r--arch/powerpc/mm/stab.c9
-rw-r--r--arch/powerpc/oprofile/common.c3
-rw-r--r--arch/powerpc/perf/Makefile14
-rw-r--r--arch/powerpc/perf/callchain.c (renamed from arch/powerpc/kernel/perf_callchain.c)2
-rw-r--r--arch/powerpc/perf/core-book3s.c (renamed from arch/powerpc/kernel/perf_event.c)64
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c (renamed from arch/powerpc/kernel/perf_event_fsl_emb.c)0
-rw-r--r--arch/powerpc/perf/e500-pmu.c (renamed from arch/powerpc/kernel/e500-pmu.c)0
-rw-r--r--arch/powerpc/perf/mpc7450-pmu.c (renamed from arch/powerpc/kernel/mpc7450-pmu.c)0
-rw-r--r--arch/powerpc/perf/power4-pmu.c (renamed from arch/powerpc/kernel/power4-pmu.c)1
-rw-r--r--arch/powerpc/perf/power5+-pmu.c (renamed from arch/powerpc/kernel/power5+-pmu.c)0
-rw-r--r--arch/powerpc/perf/power5-pmu.c (renamed from arch/powerpc/kernel/power5-pmu.c)0
-rw-r--r--arch/powerpc/perf/power6-pmu.c (renamed from arch/powerpc/kernel/power6-pmu.c)2
-rw-r--r--arch/powerpc/perf/power7-pmu.c (renamed from arch/powerpc/kernel/power7-pmu.c)0
-rw-r--r--arch/powerpc/perf/ppc970-pmu.c (renamed from arch/powerpc/kernel/ppc970-pmu.c)3
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/currituck.c2
-rw-r--r--arch/powerpc/platforms/44x/iss4xx.c3
-rw-r--r--arch/powerpc/platforms/44x/ppc44x_simple.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc5121_ads_cpld.c12
-rw-r--r--arch/powerpc/platforms/52xx/media5200.c15
-rw-r--r--arch/powerpc/platforms/52xx/mpc5200_simple.c1
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_common.c10
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_gpt.c16
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_pic.c12
-rw-r--r--arch/powerpc/platforms/82xx/pq2ads-pci-pic.c14
-rw-r--r--arch/powerpc/platforms/85xx/Kconfig27
-rw-r--r--arch/powerpc/platforms/85xx/Makefile1
-rw-r--r--arch/powerpc/platforms/85xx/corenet_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/ge_imp3a.c246
-rw-r--r--arch/powerpc/platforms/85xx/ksi8560.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc8536_ds.c4
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ads.c3
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_cds.c84
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_ds.c6
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_mds.c40
-rw-r--r--arch/powerpc/platforms/85xx/mpc85xx_rdb.c222
-rw-r--r--arch/powerpc/platforms/85xx/p1010rdb.c5
-rw-r--r--arch/powerpc/platforms/85xx/p1022_ds.c208
-rw-r--r--arch/powerpc/platforms/85xx/p1023_rds.c5
-rw-r--r--arch/powerpc/platforms/85xx/sbc8548.c3
-rw-r--r--arch/powerpc/platforms/85xx/sbc8560.c3
-rw-r--r--arch/powerpc/platforms/85xx/socrates.c3
-rw-r--r--arch/powerpc/platforms/85xx/socrates_fpga_pic.c15
-rw-r--r--arch/powerpc/platforms/85xx/stx_gp3.c3
-rw-r--r--arch/powerpc/platforms/85xx/tqm85xx.c2
-rw-r--r--arch/powerpc/platforms/85xx/xes_mpc85xx.c4
-rw-r--r--arch/powerpc/platforms/86xx/Kconfig3
-rw-r--r--arch/powerpc/platforms/86xx/Makefile7
-rw-r--r--arch/powerpc/platforms/86xx/gef_gpio.c171
-rw-r--r--arch/powerpc/platforms/86xx/gef_ppc9a.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc310.c2
-rw-r--r--arch/powerpc/platforms/86xx/gef_sbc610.c2
-rw-r--r--arch/powerpc/platforms/86xx/pic.c5
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig11
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/axon_msi.c29
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c2
-rw-r--r--arch/powerpc/platforms/cell/beat_interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c16
-rw-r--r--arch/powerpc/platforms/cell/setup.c3
-rw-r--r--arch/powerpc/platforms/cell/spider-pic.c14
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c15
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/flipper-pic.c24
-rw-r--r--arch/powerpc/platforms/embedded6xx/hlwd-pic.c29
-rw-r--r--arch/powerpc/platforms/embedded6xx/holly.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/storcenter.c3
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig38
-rw-r--r--arch/powerpc/platforms/iseries/Makefile9
-rw-r--r--arch/powerpc/platforms/iseries/call_hpt.h102
-rw-r--r--arch/powerpc/platforms/iseries/call_pci.h309
-rw-r--r--arch/powerpc/platforms/iseries/call_sm.h37
-rw-r--r--arch/powerpc/platforms/iseries/dt.c643
-rw-r--r--arch/powerpc/platforms/iseries/exception.S311
-rw-r--r--arch/powerpc/platforms/iseries/exception.h58
-rw-r--r--arch/powerpc/platforms/iseries/htab.c257
-rw-r--r--arch/powerpc/platforms/iseries/hvcall.S94
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c35
-rw-r--r--arch/powerpc/platforms/iseries/hvlpconfig.c39
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c260
-rw-r--r--arch/powerpc/platforms/iseries/ipl_parms.h68
-rw-r--r--arch/powerpc/platforms/iseries/irq.c400
-rw-r--r--arch/powerpc/platforms/iseries/irq.h13
-rw-r--r--arch/powerpc/platforms/iseries/it_exp_vpd_panel.h51
-rw-r--r--arch/powerpc/platforms/iseries/it_lp_naca.h80
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c21
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c318
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c341
-rw-r--r--arch/powerpc/platforms/iseries/main_store.h165
-rw-r--r--arch/powerpc/platforms/iseries/mf.c1275
-rw-r--r--arch/powerpc/platforms/iseries/misc.S26
-rw-r--r--arch/powerpc/platforms/iseries/naca.h24
-rw-r--r--arch/powerpc/platforms/iseries/pci.c919
-rw-r--r--arch/powerpc/platforms/iseries/pci.h58
-rw-r--r--arch/powerpc/platforms/iseries/proc.c120
-rw-r--r--arch/powerpc/platforms/iseries/processor_vpd.h85
-rw-r--r--arch/powerpc/platforms/iseries/release_data.h63
-rw-r--r--arch/powerpc/platforms/iseries/setup.c722
-rw-r--r--arch/powerpc/platforms/iseries/setup.h27
-rw-r--r--arch/powerpc/platforms/iseries/smp.c88
-rw-r--r--arch/powerpc/platforms/iseries/spcomm_area.h34
-rw-r--r--arch/powerpc/platforms/iseries/vio.c556
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c677
-rw-r--r--arch/powerpc/platforms/iseries/vpd_areas.h88
-rw-r--r--arch/powerpc/platforms/maple/pci.c2
-rw-r--r--arch/powerpc/platforms/maple/setup.c2
-rw-r--r--arch/powerpc/platforms/pasemi/pci.c3
-rw-r--r--arch/powerpc/platforms/pasemi/setup.c2
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c42
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/powerpc/platforms/powermac/pic.c27
-rw-r--r--arch/powerpc/platforms/powermac/smp.c9
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c48
-rw-r--r--arch/powerpc/platforms/powernv/pci.c28
-rw-r--r--arch/powerpc/platforms/powernv/setup.c1
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c11
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig5
-rw-r--r--arch/powerpc/platforms/pseries/Makefile4
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c1059
-rw-r--r--arch/powerpc/platforms/pseries/eeh_cache.c44
-rw-r--r--arch/powerpc/platforms/pseries/eeh_dev.c102
-rw-r--r--arch/powerpc/platforms/pseries/eeh_driver.c213
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c55
-rw-r--r--arch/powerpc/platforms/pseries/eeh_pseries.c565
-rw-r--r--arch/powerpc/platforms/pseries/eeh_sysfs.c25
-rw-r--r--arch/powerpc/platforms/pseries/io_event_irq.c68
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c29
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c1
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci_dlpar.c5
-rw-r--r--arch/powerpc/platforms/pseries/phyp_dump.c513
-rw-r--r--arch/powerpc/platforms/pseries/processor_idle.c18
-rw-r--r--arch/powerpc/platforms/pseries/ras.c195
-rw-r--r--arch/powerpc/platforms/pseries/setup.c15
-rw-r--r--arch/powerpc/platforms/pseries/suspend.c6
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig1
-rw-r--r--arch/powerpc/platforms/wsp/ics.c2
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c26
-rw-r--r--arch/powerpc/platforms/wsp/smp.c2
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c9
-rw-r--r--arch/powerpc/sysdev/Kconfig4
-rw-r--r--arch/powerpc/sysdev/Makefile4
-rw-r--r--arch/powerpc/sysdev/cpm1.c9
-rw-r--r--arch/powerpc/sysdev/cpm2_pic.c23
-rw-r--r--arch/powerpc/sysdev/ehv_pic.c14
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_cache_sram.c1
-rw-r--r--arch/powerpc/sysdev/fsl_85xx_l2ctlr.c4
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c11
-rw-r--r--arch/powerpc/sysdev/fsl_msi.h2
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c53
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c4
-rw-r--r--arch/powerpc/sysdev/fsl_rmu.c42
-rw-r--r--arch/powerpc/sysdev/ge/Makefile1
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.c (renamed from arch/powerpc/platforms/86xx/gef_pic.c)17
-rw-r--r--arch/powerpc/sysdev/ge/ge_pic.h (renamed from arch/powerpc/platforms/86xx/gef_pic.h)0
-rw-r--r--arch/powerpc/sysdev/i8259.c15
-rw-r--r--arch/powerpc/sysdev/ipic.c31
-rw-r--r--arch/powerpc/sysdev/ipic.h2
-rw-r--r--arch/powerpc/sysdev/mpc8xx_pic.c11
-rw-r--r--arch/powerpc/sysdev/mpic.c121
-rw-r--r--arch/powerpc/sysdev/mpic_msgr.c282
-rw-r--r--arch/powerpc/sysdev/mpic_msi.c6
-rw-r--r--arch/powerpc/sysdev/mv64x60_pic.c11
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c70
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c26
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.h2
-rw-r--r--arch/powerpc/sysdev/tsi108_pci.c13
-rw-r--r--arch/powerpc/sysdev/uic.c26
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c25
-rw-r--r--arch/powerpc/sysdev/xilinx_intc.c19
-rw-r--r--arch/powerpc/xmon/ppc-opc.c1
-rw-r--r--arch/powerpc/xmon/spu-opc.c1
-rw-r--r--arch/powerpc/xmon/xmon.c33
403 files changed, 14531 insertions, 16686 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1919634a9b32..d219ebecabf0 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -134,7 +134,9 @@ config PPC
134 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 134 select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64
135 select HAVE_GENERIC_HARDIRQS 135 select HAVE_GENERIC_HARDIRQS
136 select HAVE_SPARSE_IRQ 136 select HAVE_SPARSE_IRQ
137 select SPARSE_IRQ
137 select IRQ_PER_CPU 138 select IRQ_PER_CPU
139 select IRQ_DOMAIN
138 select GENERIC_IRQ_SHOW 140 select GENERIC_IRQ_SHOW
139 select GENERIC_IRQ_SHOW_LEVEL 141 select GENERIC_IRQ_SHOW_LEVEL
140 select IRQ_FORCED_THREADING 142 select IRQ_FORCED_THREADING
@@ -376,13 +378,16 @@ config CRASH_DUMP
376 The same kernel binary can be used as production kernel and dump 378 The same kernel binary can be used as production kernel and dump
377 capture kernel. 379 capture kernel.
378 380
379config PHYP_DUMP 381config FA_DUMP
380 bool "Hypervisor-assisted dump (EXPERIMENTAL)" 382 bool "Firmware-assisted dump"
381 depends on PPC_PSERIES && EXPERIMENTAL 383 depends on PPC64 && PPC_RTAS && CRASH_DUMP
382 help 384 help
383 Hypervisor-assisted dump is meant to be a kdump replacement 385 A robust mechanism to get reliable kernel crash dump with
384 offering robustness and speed not possible without system 386 assistance from firmware. This approach does not use kexec,
385 hypervisor assistance. 387 instead firmware assists in booting the kdump kernel
388 while preserving memory contents. Firmware-assisted dump
389 is meant to be a kdump replacement offering robustness and
390 speed not possible without system firmware assistance.
386 391
387 If unsure, say "N" 392 If unsure, say "N"
388 393
@@ -611,7 +616,7 @@ endmenu
611 616
612config ISA_DMA_API 617config ISA_DMA_API
613 bool 618 bool
614 default !PPC_ISERIES || PCI 619 default PCI
615 620
616menu "Bus options" 621menu "Bus options"
617 622
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 4ccb2a009f74..72d55dbc6119 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -196,13 +196,6 @@ config PPC_EARLY_DEBUG_MAPLE
196 help 196 help
197 Select this to enable early debugging for Maple. 197 Select this to enable early debugging for Maple.
198 198
199config PPC_EARLY_DEBUG_ISERIES
200 bool "iSeries HV Console"
201 depends on PPC_ISERIES
202 help
203 Select this to enable early debugging for legacy iSeries. You need
204 to hit "Ctrl-x Ctrl-x" to see the messages on the console.
205
206config PPC_EARLY_DEBUG_PAS_REALMODE 199config PPC_EARLY_DEBUG_PAS_REALMODE
207 bool "PA Semi real mode" 200 bool "PA Semi real mode"
208 depends on PPC_PASEMI 201 depends on PPC_PASEMI
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index b8b105c01c64..6524c6e21896 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -157,6 +157,7 @@ core-y += arch/powerpc/kernel/ \
157 arch/powerpc/net/ 157 arch/powerpc/net/
158core-$(CONFIG_XMON) += arch/powerpc/xmon/ 158core-$(CONFIG_XMON) += arch/powerpc/xmon/
159core-$(CONFIG_KVM) += arch/powerpc/kvm/ 159core-$(CONFIG_KVM) += arch/powerpc/kvm/
160core-$(CONFIG_PERF_EVENTS) += arch/powerpc/perf/
160 161
161drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/ 162drivers-$(CONFIG_OPROFILE) += arch/powerpc/oprofile/
162 163
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore
index 12da77ec0228..1c1aadc8c48f 100644
--- a/arch/powerpc/boot/.gitignore
+++ b/arch/powerpc/boot/.gitignore
@@ -27,7 +27,6 @@ zImage.bin.*
27zImage.chrp 27zImage.chrp
28zImage.coff 28zImage.coff
29zImage.holly 29zImage.holly
30zImage.iseries
31zImage.*lds 30zImage.*lds
32zImage.miboot 31zImage.miboot
33zImage.pmac 32zImage.pmac
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile
index 8844a17ce8ed..e8461cb18d04 100644
--- a/arch/powerpc/boot/Makefile
+++ b/arch/powerpc/boot/Makefile
@@ -184,7 +184,6 @@ image-$(CONFIG_PPC_EFIKA) += zImage.chrp
184image-$(CONFIG_PPC_PMAC) += zImage.pmac 184image-$(CONFIG_PPC_PMAC) += zImage.pmac
185image-$(CONFIG_PPC_HOLLY) += dtbImage.holly 185image-$(CONFIG_PPC_HOLLY) += dtbImage.holly
186image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800 186image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800
187image-$(CONFIG_PPC_ISERIES) += zImage.iseries
188image-$(CONFIG_DEFAULT_UIMAGE) += uImage 187image-$(CONFIG_DEFAULT_UIMAGE) += uImage
189image-$(CONFIG_EPAPR_BOOT) += zImage.epapr 188image-$(CONFIG_EPAPR_BOOT) += zImage.epapr
190 189
@@ -247,7 +246,7 @@ image-$(CONFIG_ASP834x) += dtbImage.asp834x-redboot
247image-$(CONFIG_MPC8540_ADS) += cuImage.mpc8540ads 246image-$(CONFIG_MPC8540_ADS) += cuImage.mpc8540ads
248image-$(CONFIG_MPC8560_ADS) += cuImage.mpc8560ads 247image-$(CONFIG_MPC8560_ADS) += cuImage.mpc8560ads
249image-$(CONFIG_MPC85xx_CDS) += cuImage.mpc8541cds \ 248image-$(CONFIG_MPC85xx_CDS) += cuImage.mpc8541cds \
250 cuImage.mpc8548cds \ 249 cuImage.mpc8548cds_32b \
251 cuImage.mpc8555cds 250 cuImage.mpc8555cds
252image-$(CONFIG_MPC85xx_MDS) += cuImage.mpc8568mds 251image-$(CONFIG_MPC85xx_MDS) += cuImage.mpc8568mds
253image-$(CONFIG_MPC85xx_DS) += cuImage.mpc8544ds \ 252image-$(CONFIG_MPC85xx_DS) += cuImage.mpc8544ds \
@@ -311,12 +310,6 @@ $(obj)/dtbImage.%: vmlinux $(wrapperbits) $(obj)/%.dtb
311$(obj)/vmlinux.strip: vmlinux 310$(obj)/vmlinux.strip: vmlinux
312 $(STRIP) -s -R .comment $< -o $@ 311 $(STRIP) -s -R .comment $< -o $@
313 312
314# The iseries hypervisor won't take an ET_DYN executable, so this
315# changes the type (byte 17) in the file to ET_EXEC (2).
316$(obj)/zImage.iseries: vmlinux
317 $(STRIP) -s -R .comment $< -o $@
318 printf "\x02" | dd of=$@ conv=notrunc bs=1 seek=17
319
320$(obj)/uImage: vmlinux $(wrapperbits) 313$(obj)/uImage: vmlinux $(wrapperbits)
321 $(call if_changed,wrap,uboot) 314 $(call if_changed,wrap,uboot)
322 315
@@ -364,7 +357,7 @@ install: $(CONFIGURE) $(addprefix $(obj)/, $(image-y))
364# anything not in $(targets) 357# anything not in $(targets)
365clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \ 358clean-files += $(image-) $(initrd-) cuImage.* dtbImage.* treeImage.* \
366 zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \ 359 zImage zImage.initrd zImage.chrp zImage.coff zImage.holly \
367 zImage.iseries zImage.miboot zImage.pmac zImage.pseries \ 360 zImage.miboot zImage.pmac zImage.pseries \
368 zImage.maple simpleImage.* otheros.bld *.dtb 361 zImage.maple simpleImage.* otheros.bld *.dtb
369 362
370# clean up files cached by wrapper 363# clean up files cached by wrapper
diff --git a/arch/powerpc/boot/dts/a4m072.dts b/arch/powerpc/boot/dts/a4m072.dts
new file mode 100644
index 000000000000..fabe7b7d5f13
--- /dev/null
+++ b/arch/powerpc/boot/dts/a4m072.dts
@@ -0,0 +1,168 @@
1/*
2 * a4m072 board Device Tree Source
3 *
4 * Copyright (C) 2011 DENX Software Engineering GmbH
5 * Heiko Schocher <hs@denx.de>
6 *
7 * Copyright (C) 2007 Semihalf
8 * Marian Balakowicz <m8@semihalf.com>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16/include/ "mpc5200b.dtsi"
17
18/ {
19 model = "anonymous,a4m072";
20 compatible = "anonymous,a4m072";
21
22 soc5200@f0000000 {
23 #address-cells = <1>;
24 #size-cells = <1>;
25 compatible = "fsl,mpc5200b-immr";
26 ranges = <0 0xf0000000 0x0000c000>;
27 reg = <0xf0000000 0x00000100>;
28 bus-frequency = <0>; /* From boot loader */
29 system-frequency = <0>; /* From boot loader */
30
31 cdm@200 {
32 fsl,init-ext-48mhz-en = <0x0>;
33 fsl,init-fd-enable = <0x01>;
34 fsl,init-fd-counters = <0x3333>;
35 };
36
37 timer@600 {
38 fsl,has-wdt;
39 };
40
41 gpt3: timer@630 { /* General Purpose Timer in GPIO mode */
42 compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
43 gpio-controller;
44 #gpio-cells = <2>;
45 };
46
47 gpt4: timer@640 { /* General Purpose Timer in GPIO mode */
48 compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
49 gpio-controller;
50 #gpio-cells = <2>;
51 };
52
53 gpt5: timer@650 { /* General Purpose Timer in GPIO mode */
54 compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio";
55 gpio-controller;
56 #gpio-cells = <2>;
57 };
58
59 spi@f00 {
60 status = "disabled";
61 };
62
63 psc@2000 {
64 compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
65 reg = <0x2000 0x100>;
66 interrupts = <2 1 0>;
67 };
68
69 psc@2200 {
70 compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
71 reg = <0x2200 0x100>;
72 interrupts = <2 2 0>;
73 };
74
75 psc@2400 {
76 compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
77 reg = <0x2400 0x100>;
78 interrupts = <2 3 0>;
79 };
80
81 psc@2600 {
82 status = "disabled";
83 };
84
85 psc@2800 {
86 status = "disabled";
87 };
88
89 psc@2c00 {
90 compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart";
91 reg = <0x2c00 0x100>;
92 interrupts = <2 4 0>;
93 };
94
95 ethernet@3000 {
96 phy-handle = <&phy0>;
97 };
98
99 mdio@3000 {
100 phy0: ethernet-phy@1f {
101 reg = <0x1f>;
102 interrupts = <1 2 0>; /* IRQ 2 active low */
103 };
104 };
105
106 i2c@3d00 {
107 status = "disabled";
108 };
109
110 i2c@3d40 {
111 hwmon@2e {
112 compatible = "nsc,lm87";
113 reg = <0x2e>;
114 };
115 rtc@51 {
116 compatible = "nxp,rtc8564";
117 reg = <0x51>;
118 };
119 };
120 };
121
122 localbus {
123 compatible = "fsl,mpc5200b-lpb","simple-bus";
124 #address-cells = <2>;
125 #size-cells = <1>;
126 ranges = <0 0 0xfe000000 0x02000000
127 1 0 0x62000000 0x00400000
128 2 0 0x64000000 0x00200000
129 3 0 0x66000000 0x01000000
130 6 0 0x68000000 0x01000000
131 7 0 0x6a000000 0x00000004>;
132
133 flash@0,0 {
134 compatible = "cfi-flash";
135 reg = <0 0 0x02000000>;
136 bank-width = <2>;
137 #size-cells = <1>;
138 #address-cells = <1>;
139 };
140 sram0@1,0 {
141 compatible = "mtd-ram";
142 reg = <1 0x00000 0x00400000>;
143 bank-width = <2>;
144 };
145 };
146
147 pci@f0000d00 {
148 #interrupt-cells = <1>;
149 #size-cells = <2>;
150 #address-cells = <3>;
151 device_type = "pci";
152 compatible = "fsl,mpc5200-pci";
153 reg = <0xf0000d00 0x100>;
154 interrupt-map-mask = <0xf800 0 0 7>;
155 interrupt-map = <
156 /* IDSEL 0x16 */
157 0xc000 0 0 1 &mpc5200_pic 1 3 3
158 0xc000 0 0 2 &mpc5200_pic 1 3 3
159 0xc000 0 0 3 &mpc5200_pic 1 3 3
160 0xc000 0 0 4 &mpc5200_pic 1 3 3>;
161 clock-frequency = <0>; /* From boot loader */
162 interrupts = <2 8 0 2 9 0 2 10 0>;
163 bus-range = <0 0>;
164 ranges = <0x42000000 0 0x80000000 0x80000000 0 0x10000000
165 0x02000000 0 0x90000000 0x90000000 0 0x10000000
166 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>;
167 };
168};
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts
index 2a56a0dbd1f7..7bda373f10ef 100644
--- a/arch/powerpc/boot/dts/bluestone.dts
+++ b/arch/powerpc/boot/dts/bluestone.dts
@@ -33,7 +33,7 @@
33 aliases { 33 aliases {
34 ethernet0 = &EMAC0; 34 ethernet0 = &EMAC0;
35 serial0 = &UART0; 35 serial0 = &UART0;
36 //serial1 = &UART1; --gcl missing UART1 label 36 serial1 = &UART1;
37 }; 37 };
38 38
39 cpus { 39 cpus {
@@ -52,7 +52,7 @@
52 d-cache-size = <32768>; 52 d-cache-size = <32768>;
53 dcr-controller; 53 dcr-controller;
54 dcr-access-method = "native"; 54 dcr-access-method = "native";
55 //next-level-cache = <&L2C0>; --gcl missing L2C0 label 55 next-level-cache = <&L2C0>;
56 }; 56 };
57 }; 57 };
58 58
@@ -117,6 +117,16 @@
117 dcr-reg = <0x00c 0x002>; 117 dcr-reg = <0x00c 0x002>;
118 }; 118 };
119 119
120 L2C0: l2c {
121 compatible = "ibm,l2-cache-apm82181", "ibm,l2-cache";
122 dcr-reg = <0x020 0x008
123 0x030 0x008>;
124 cache-line-size = <32>;
125 cache-size = <262144>;
126 interrupt-parent = <&UIC1>;
127 interrupts = <11 1>;
128 };
129
120 plb { 130 plb {
121 compatible = "ibm,plb4"; 131 compatible = "ibm,plb4";
122 #address-cells = <2>; 132 #address-cells = <2>;
@@ -182,6 +192,53 @@
182 reg = <0x001a0000 0x00060000>; 192 reg = <0x001a0000 0x00060000>;
183 }; 193 };
184 }; 194 };
195
196 ndfc@1,0 {
197 compatible = "ibm,ndfc";
198 reg = <0x00000003 0x00000000 0x00002000>;
199 ccr = <0x00001000>;
200 bank-settings = <0x80002222>;
201 #address-cells = <1>;
202 #size-cells = <1>;
203 /* 2Gb Nand Flash */
204 nand {
205 #address-cells = <1>;
206 #size-cells = <1>;
207
208 partition@0 {
209 label = "firmware";
210 reg = <0x00000000 0x00C00000>;
211 };
212 partition@c00000 {
213 label = "environment";
214 reg = <0x00C00000 0x00B00000>;
215 };
216 partition@1700000 {
217 label = "kernel";
218 reg = <0x01700000 0x00E00000>;
219 };
220 partition@2500000 {
221 label = "root";
222 reg = <0x02500000 0x08200000>;
223 };
224 partition@a700000 {
225 label = "device-tree";
226 reg = <0x0A700000 0x00B00000>;
227 };
228 partition@b200000 {
229 label = "config";
230 reg = <0x0B200000 0x00D00000>;
231 };
232 partition@bf00000 {
233 label = "diag";
234 reg = <0x0BF00000 0x00C00000>;
235 };
236 partition@cb00000 {
237 label = "vendor";
238 reg = <0x0CB00000 0x3500000>;
239 };
240 };
241 };
185 }; 242 };
186 243
187 UART0: serial@ef600300 { 244 UART0: serial@ef600300 {
@@ -195,11 +252,36 @@
195 interrupts = <0x1 0x4>; 252 interrupts = <0x1 0x4>;
196 }; 253 };
197 254
255 UART1: serial@ef600400 {
256 device_type = "serial";
257 compatible = "ns16550";
258 reg = <0xef600400 0x00000008>;
259 virtual-reg = <0xef600400>;
260 clock-frequency = <0>; /* Filled in by U-Boot */
261 current-speed = <0>; /* Filled in by U-Boot */
262 interrupt-parent = <&UIC0>;
263 interrupts = <0x1 0x4>;
264 };
265
198 IIC0: i2c@ef600700 { 266 IIC0: i2c@ef600700 {
199 compatible = "ibm,iic"; 267 compatible = "ibm,iic";
200 reg = <0xef600700 0x00000014>; 268 reg = <0xef600700 0x00000014>;
201 interrupt-parent = <&UIC0>; 269 interrupt-parent = <&UIC0>;
202 interrupts = <0x2 0x4>; 270 interrupts = <0x2 0x4>;
271 #address-cells = <1>;
272 #size-cells = <0>;
273 rtc@68 {
274 compatible = "stm,m41t80";
275 reg = <0x68>;
276 interrupt-parent = <&UIC0>;
277 interrupts = <0x9 0x8>;
278 };
279 sttm@4C {
280 compatible = "adm,adm1032";
281 reg = <0x4C>;
282 interrupt-parent = <&UIC1>;
283 interrupts = <0x1E 0x8>; /* CPU_THERNAL_L */
284 };
203 }; 285 };
204 286
205 IIC1: i2c@ef600800 { 287 IIC1: i2c@ef600800 {
@@ -222,7 +304,7 @@
222 304
223 EMAC0: ethernet@ef600c00 { 305 EMAC0: ethernet@ef600c00 {
224 device_type = "network"; 306 device_type = "network";
225 compatible = "ibm,emac4sync"; 307 compatible = "ibm,emac-apm821xx", "ibm,emac4sync";
226 interrupt-parent = <&EMAC0>; 308 interrupt-parent = <&EMAC0>;
227 interrupts = <0x0 0x1>; 309 interrupts = <0x0 0x1>;
228 #interrupt-cells = <1>; 310 #interrupt-cells = <1>;
@@ -250,5 +332,46 @@
250 }; 332 };
251 }; 333 };
252 334
335 PCIE0: pciex@d00000000 {
336 device_type = "pci";
337 #interrupt-cells = <1>;
338 #size-cells = <2>;
339 #address-cells = <3>;
340 compatible = "ibm,plb-pciex-apm821xx", "ibm,plb-pciex";
341 primary;
342 port = <0x0>; /* port number */
343 reg = <0x0000000d 0x00000000 0x20000000 /* Config space access */
344 0x0000000c 0x08010000 0x00001000>; /* Registers */
345 dcr-reg = <0x100 0x020>;
346 sdr-base = <0x300>;
347
348 /* Outbound ranges, one memory and one IO,
349 * later cannot be changed
350 */
351 ranges = <0x02000000 0x00000000 0x80000000 0x0000000e 0x00000000 0x00000000 0x80000000
352 0x02000000 0x00000000 0x00000000 0x0000000f 0x00000000 0x00000000 0x00100000
353 0x01000000 0x00000000 0x00000000 0x0000000f 0x80000000 0x00000000 0x00010000>;
354
355 /* Inbound 2GB range starting at 0 */
356 dma-ranges = <0x42000000 0x0 0x0 0x0 0x0 0x0 0x80000000>;
357
358 /* This drives busses 40 to 0x7f */
359 bus-range = <0x40 0x7f>;
360
361 /* Legacy interrupts (note the weird polarity, the bridge seems
362 * to invert PCIe legacy interrupts).
363 * We are de-swizzling here because the numbers are actually for
364 * port of the root complex virtual P2P bridge. But I want
365 * to avoid putting a node for it in the tree, so the numbers
366 * below are basically de-swizzled numbers.
367 * The real slot is on idsel 0, so the swizzling is 1:1
368 */
369 interrupt-map-mask = <0x0 0x0 0x0 0x7>;
370 interrupt-map = <
371 0x0 0x0 0x0 0x1 &UIC3 0xc 0x4 /* swizzled int A */
372 0x0 0x0 0x0 0x2 &UIC3 0xd 0x4 /* swizzled int B */
373 0x0 0x0 0x0 0x3 &UIC3 0xe 0x4 /* swizzled int C */
374 0x0 0x0 0x0 0x4 &UIC3 0xf 0x4 /* swizzled int D */>;
375 };
253 }; 376 };
254}; 377};
diff --git a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
index 89af62637707..c8b2daa40ac8 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8536si-post.dtsi
@@ -202,7 +202,7 @@
202/include/ "pq3-etsec1-timer-0.dtsi" 202/include/ "pq3-etsec1-timer-0.dtsi"
203 203
204 usb@22000 { 204 usb@22000 {
205 compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph"; 205 compatible = "fsl-usb2-mph-v1.2", "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
206 reg = <0x22000 0x1000>; 206 reg = <0x22000 0x1000>;
207 #address-cells = <1>; 207 #address-cells = <1>;
208 #size-cells = <0>; 208 #size-cells = <0>;
@@ -210,7 +210,7 @@
210 }; 210 };
211 211
212 usb@23000 { 212 usb@23000 {
213 compatible = "fsl,mpc8536-usb2-mph", "fsl-usb2-mph"; 213 compatible = "fsl-usb2-mph-v1.2", "fsl,mpc8536-usb2-mph", "fsl-usb2-mph";
214 reg = <0x23000 0x1000>; 214 reg = <0x23000 0x1000>;
215 #address-cells = <1>; 215 #address-cells = <1>;
216 #size-cells = <0>; 216 #size-cells = <0>;
@@ -236,6 +236,10 @@
236 }; 236 };
237 237
238/include/ "pq3-esdhc-0.dtsi" 238/include/ "pq3-esdhc-0.dtsi"
239 sdhc@2e000 {
240 compatible = "fsl,mpc8536-esdhc", "fsl,esdhc";
241 };
242
239/include/ "pq3-sec3.0-0.dtsi" 243/include/ "pq3-sec3.0-0.dtsi"
240/include/ "pq3-mpic.dtsi" 244/include/ "pq3-mpic.dtsi"
241/include/ "pq3-mpic-timer-B.dtsi" 245/include/ "pq3-mpic-timer-B.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
index 9d8023a69d7d..579d76cb8e32 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-post.dtsi
@@ -89,6 +89,21 @@
89 }; 89 };
90}; 90};
91 91
92&rio {
93 compatible = "fsl,srio";
94 interrupts = <48 2 0 0>;
95 #address-cells = <2>;
96 #size-cells = <2>;
97 fsl,srio-rmu-handle = <&rmu>;
98 ranges;
99
100 port1 {
101 #address-cells = <2>;
102 #size-cells = <2>;
103 cell-index = <1>;
104 };
105};
106
92&soc { 107&soc {
93 #address-cells = <1>; 108 #address-cells = <1>;
94 #size-cells = <1>; 109 #size-cells = <1>;
@@ -134,6 +149,7 @@
134 149
135/include/ "pq3-sec2.1-0.dtsi" 150/include/ "pq3-sec2.1-0.dtsi"
136/include/ "pq3-mpic.dtsi" 151/include/ "pq3-mpic.dtsi"
152/include/ "pq3-rmu-0.dtsi"
137 153
138 global-utilities@e0000 { 154 global-utilities@e0000 {
139 compatible = "fsl,mpc8548-guts"; 155 compatible = "fsl,mpc8548-guts";
diff --git a/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
index 289f1218d755..720422d83529 100644
--- a/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
+++ b/arch/powerpc/boot/dts/fsl/mpc8548si-pre.dtsi
@@ -43,7 +43,9 @@
43 serial0 = &serial0; 43 serial0 = &serial0;
44 serial1 = &serial1; 44 serial1 = &serial1;
45 ethernet0 = &enet0; 45 ethernet0 = &enet0;
46 ethernet1 = &enet2; 46 ethernet1 = &enet1;
47 ethernet2 = &enet2;
48 ethernet3 = &enet3;
47 pci0 = &pci0; 49 pci0 = &pci0;
48 pci1 = &pci1; 50 pci1 = &pci1;
49 pci2 = &pci2; 51 pci2 = &pci2;
diff --git a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
index bd9e163c764b..0bde9ee8afaf 100644
--- a/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1010si-post.dtsi
@@ -156,9 +156,13 @@
156 156
157/include/ "pq3-dma-0.dtsi" 157/include/ "pq3-dma-0.dtsi"
158/include/ "pq3-usb2-dr-0.dtsi" 158/include/ "pq3-usb2-dr-0.dtsi"
159 usb@22000 {
160 compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
161 };
159/include/ "pq3-esdhc-0.dtsi" 162/include/ "pq3-esdhc-0.dtsi"
160 sdhc@2e000 { 163 sdhc@2e000 {
161 fsl,sdhci-auto-cmd12; 164 compatible = "fsl,p1010-esdhc", "fsl,esdhc";
165 sdhci,auto-cmd12;
162 }; 166 };
163 167
164/include/ "pq3-sec4.4-0.dtsi" 168/include/ "pq3-sec4.4-0.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
index fc924c5ffebe..68cc5e7f6477 100644
--- a/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1020si-post.dtsi
@@ -142,9 +142,19 @@
142 142
143/include/ "pq3-dma-0.dtsi" 143/include/ "pq3-dma-0.dtsi"
144/include/ "pq3-usb2-dr-0.dtsi" 144/include/ "pq3-usb2-dr-0.dtsi"
145 usb@22000 {
146 compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
147 };
145/include/ "pq3-usb2-dr-1.dtsi" 148/include/ "pq3-usb2-dr-1.dtsi"
149 usb@23000 {
150 compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
151 };
146 152
147/include/ "pq3-esdhc-0.dtsi" 153/include/ "pq3-esdhc-0.dtsi"
154 sdhc@2e000 {
155 compatible = "fsl,p1020-esdhc", "fsl,esdhc";
156 sdhci,auto-cmd12;
157 };
148/include/ "pq3-sec3.3-0.dtsi" 158/include/ "pq3-sec3.3-0.dtsi"
149 159
150/include/ "pq3-mpic.dtsi" 160/include/ "pq3-mpic.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
index 38ba54d1e32e..4252ef85fb7a 100644
--- a/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1021si-post.dtsi
@@ -142,8 +142,15 @@
142 142
143/include/ "pq3-dma-0.dtsi" 143/include/ "pq3-dma-0.dtsi"
144/include/ "pq3-usb2-dr-0.dtsi" 144/include/ "pq3-usb2-dr-0.dtsi"
145 usb@22000 {
146 compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
147 };
145 148
146/include/ "pq3-esdhc-0.dtsi" 149/include/ "pq3-esdhc-0.dtsi"
150 sdhc@2e000 {
151 sdhci,auto-cmd12;
152 };
153
147/include/ "pq3-sec3.3-0.dtsi" 154/include/ "pq3-sec3.3-0.dtsi"
148 155
149/include/ "pq3-mpic.dtsi" 156/include/ "pq3-mpic.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
index 16239b199d0a..06216b8c0af5 100644
--- a/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1022si-post.dtsi
@@ -35,7 +35,11 @@
35&lbc { 35&lbc {
36 #address-cells = <2>; 36 #address-cells = <2>;
37 #size-cells = <1>; 37 #size-cells = <1>;
38 compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus"; 38 /*
39 * The localbus on the P1022 is not a simple-bus because of the eLBC
40 * pin muxing when the DIU is enabled.
41 */
42 compatible = "fsl,p1022-elbc", "fsl,elbc";
39 interrupts = <19 2 0 0>; 43 interrupts = <19 2 0 0>;
40}; 44};
41 45
@@ -199,11 +203,18 @@
199 203
200/include/ "pq3-dma-0.dtsi" 204/include/ "pq3-dma-0.dtsi"
201/include/ "pq3-usb2-dr-0.dtsi" 205/include/ "pq3-usb2-dr-0.dtsi"
206 usb@22000 {
207 compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
208 };
202/include/ "pq3-usb2-dr-1.dtsi" 209/include/ "pq3-usb2-dr-1.dtsi"
210 usb@23000 {
211 compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
212 };
203 213
204/include/ "pq3-esdhc-0.dtsi" 214/include/ "pq3-esdhc-0.dtsi"
205 sdhc@2e000 { 215 sdhc@2e000 {
206 fsl,sdhci-auto-cmd12; 216 compatible = "fsl,p1022-esdhc", "fsl,esdhc";
217 sdhci,auto-cmd12;
207 }; 218 };
208 219
209/include/ "pq3-sec3.3-0.dtsi" 220/include/ "pq3-sec3.3-0.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
index b06bb4cc1fe8..941fa159cefb 100644
--- a/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p1023si-post.dtsi
@@ -142,6 +142,9 @@
142 142
143/include/ "pq3-dma-0.dtsi" 143/include/ "pq3-dma-0.dtsi"
144/include/ "pq3-usb2-dr-0.dtsi" 144/include/ "pq3-usb2-dr-0.dtsi"
145 usb@22000 {
146 compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
147 };
145 148
146 crypto: crypto@300000 { 149 crypto: crypto@300000 {
147 compatible = "fsl,sec-v4.2", "fsl,sec-v4.0"; 150 compatible = "fsl,sec-v4.2", "fsl,sec-v4.0";
diff --git a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
index c041050561a7..884e01bcb243 100644
--- a/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2020si-post.dtsi
@@ -171,6 +171,9 @@
171 171
172/include/ "pq3-dma-0.dtsi" 172/include/ "pq3-dma-0.dtsi"
173/include/ "pq3-usb2-dr-0.dtsi" 173/include/ "pq3-usb2-dr-0.dtsi"
174 usb@22000 {
175 compatible = "fsl-usb2-dr-v1.6", "fsl-usb2-dr";
176 };
174/include/ "pq3-etsec1-0.dtsi" 177/include/ "pq3-etsec1-0.dtsi"
175/include/ "pq3-etsec1-timer-0.dtsi" 178/include/ "pq3-etsec1-timer-0.dtsi"
176 179
@@ -182,6 +185,10 @@
182/include/ "pq3-etsec1-1.dtsi" 185/include/ "pq3-etsec1-1.dtsi"
183/include/ "pq3-etsec1-2.dtsi" 186/include/ "pq3-etsec1-2.dtsi"
184/include/ "pq3-esdhc-0.dtsi" 187/include/ "pq3-esdhc-0.dtsi"
188 sdhc@2e000 {
189 compatible = "fsl,p2020-esdhc", "fsl,esdhc";
190 };
191
185/include/ "pq3-sec3.1-0.dtsi" 192/include/ "pq3-sec3.1-0.dtsi"
186/include/ "pq3-mpic.dtsi" 193/include/ "pq3-mpic.dtsi"
187/include/ "pq3-mpic-timer-B.dtsi" 194/include/ "pq3-mpic-timer-B.dtsi"
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 234a399ddeb2..531eab82c6c9 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -309,12 +309,14 @@
309/include/ "qoriq-gpio-0.dtsi" 309/include/ "qoriq-gpio-0.dtsi"
310/include/ "qoriq-usb2-mph-0.dtsi" 310/include/ "qoriq-usb2-mph-0.dtsi"
311 usb0: usb@210000 { 311 usb0: usb@210000 {
312 compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
312 phy_type = "utmi"; 313 phy_type = "utmi";
313 port0; 314 port0;
314 }; 315 };
315 316
316/include/ "qoriq-usb2-dr-0.dtsi" 317/include/ "qoriq-usb2-dr-0.dtsi"
317 usb1: usb@211000 { 318 usb1: usb@211000 {
319 compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
318 dr_mode = "host"; 320 dr_mode = "host";
319 phy_type = "utmi"; 321 phy_type = "utmi";
320 }; 322 };
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index d41d08de7f7e..af4ebc8009e3 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -336,12 +336,14 @@
336/include/ "qoriq-gpio-0.dtsi" 336/include/ "qoriq-gpio-0.dtsi"
337/include/ "qoriq-usb2-mph-0.dtsi" 337/include/ "qoriq-usb2-mph-0.dtsi"
338 usb0: usb@210000 { 338 usb0: usb@210000 {
339 compatible = "fsl-usb2-mph-v1.6", "fsl-usb2-mph";
339 phy_type = "utmi"; 340 phy_type = "utmi";
340 port0; 341 port0;
341 }; 342 };
342 343
343/include/ "qoriq-usb2-dr-0.dtsi" 344/include/ "qoriq-usb2-dr-0.dtsi"
344 usb1: usb@211000 { 345 usb1: usb@211000 {
346 compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
345 dr_mode = "host"; 347 dr_mode = "host";
346 phy_type = "utmi"; 348 phy_type = "utmi";
347 }; 349 };
diff --git a/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
index a63edd195ae5..b3e56929eee2 100644
--- a/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3060si-post.dtsi
@@ -291,6 +291,12 @@
291/include/ "qoriq-duart-1.dtsi" 291/include/ "qoriq-duart-1.dtsi"
292/include/ "qoriq-gpio-0.dtsi" 292/include/ "qoriq-gpio-0.dtsi"
293/include/ "qoriq-usb2-mph-0.dtsi" 293/include/ "qoriq-usb2-mph-0.dtsi"
294 usb@210000 {
295 compatible = "fsl-usb2-mph-v2.2", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
296 };
294/include/ "qoriq-usb2-dr-0.dtsi" 297/include/ "qoriq-usb2-dr-0.dtsi"
298 usb@211000 {
299 compatible = "fsl-usb2-dr-v2.2", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
300 };
295/include/ "qoriq-sec4.1-0.dtsi" 301/include/ "qoriq-sec4.1-0.dtsi"
296}; 302};
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 914074b91a85..64b6abea8464 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -339,12 +339,14 @@
339/include/ "qoriq-gpio-0.dtsi" 339/include/ "qoriq-gpio-0.dtsi"
340/include/ "qoriq-usb2-mph-0.dtsi" 340/include/ "qoriq-usb2-mph-0.dtsi"
341 usb0: usb@210000 { 341 usb0: usb@210000 {
342 compatible = "fsl-usb2-mph-v1.6", "fsl,mpc85xx-usb2-mph", "fsl-usb2-mph";
342 phy_type = "utmi"; 343 phy_type = "utmi";
343 port0; 344 port0;
344 }; 345 };
345 346
346/include/ "qoriq-usb2-dr-0.dtsi" 347/include/ "qoriq-usb2-dr-0.dtsi"
347 usb1: usb@211000 { 348 usb1: usb@211000 {
349 compatible = "fsl-usb2-dr-v1.6", "fsl,mpc85xx-usb2-dr", "fsl-usb2-dr";
348 dr_mode = "host"; 350 dr_mode = "host";
349 phy_type = "utmi"; 351 phy_type = "utmi";
350 }; 352 };
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
index a1979ae334a7..3b0650a98478 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-0.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * PQ3 eTSEC device tree stub [ @ offsets 0x24000 ] 2 * PQ3 eTSEC device tree stub [ @ offsets 0x24000 ]
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011-2012 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@ ethernet@24000 {
41 compatible = "gianfar"; 41 compatible = "gianfar";
42 reg = <0x24000 0x1000>; 42 reg = <0x24000 0x1000>;
43 ranges = <0x0 0x24000 0x1000>; 43 ranges = <0x0 0x24000 0x1000>;
44 fsl,magic-packet;
44 local-mac-address = [ 00 00 00 00 00 00 ]; 45 local-mac-address = [ 00 00 00 00 00 00 ];
45 interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>; 46 interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>;
46}; 47};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
index 4c4fdde1ec2a..96693b41f0f1 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-1.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * PQ3 eTSEC device tree stub [ @ offsets 0x25000 ] 2 * PQ3 eTSEC device tree stub [ @ offsets 0x25000 ]
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011-2012 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@ ethernet@25000 {
41 compatible = "gianfar"; 41 compatible = "gianfar";
42 reg = <0x25000 0x1000>; 42 reg = <0x25000 0x1000>;
43 ranges = <0x0 0x25000 0x1000>; 43 ranges = <0x0 0x25000 0x1000>;
44 fsl,magic-packet;
44 local-mac-address = [ 00 00 00 00 00 00 ]; 45 local-mac-address = [ 00 00 00 00 00 00 ];
45 interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>; 46 interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>;
46}; 47};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
index 4b8ab438668a..6b3fab19da1f 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-2.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * PQ3 eTSEC device tree stub [ @ offsets 0x26000 ] 2 * PQ3 eTSEC device tree stub [ @ offsets 0x26000 ]
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011-2012 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@ ethernet@26000 {
41 compatible = "gianfar"; 41 compatible = "gianfar";
42 reg = <0x26000 0x1000>; 42 reg = <0x26000 0x1000>;
43 ranges = <0x0 0x26000 0x1000>; 43 ranges = <0x0 0x26000 0x1000>;
44 fsl,magic-packet;
44 local-mac-address = [ 00 00 00 00 00 00 ]; 45 local-mac-address = [ 00 00 00 00 00 00 ];
45 interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>; 46 interrupts = <31 2 0 0 32 2 0 0 33 2 0 0>;
46}; 47};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi b/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
index 40c9137729ae..0da592d93ddd 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-etsec1-3.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * PQ3 eTSEC device tree stub [ @ offsets 0x27000 ] 2 * PQ3 eTSEC device tree stub [ @ offsets 0x27000 ]
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011-2012 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -41,6 +41,7 @@ ethernet@27000 {
41 compatible = "gianfar"; 41 compatible = "gianfar";
42 reg = <0x27000 0x1000>; 42 reg = <0x27000 0x1000>;
43 ranges = <0x0 0x27000 0x1000>; 43 ranges = <0x0 0x27000 0x1000>;
44 fsl,magic-packet;
44 local-mac-address = [ 00 00 00 00 00 00 ]; 45 local-mac-address = [ 00 00 00 00 00 00 ];
45 interrupts = <37 2 0 0 38 2 0 0 39 2 0 0>; 46 interrupts = <37 2 0 0 38 2 0 0 39 2 0 0>;
46}; 47};
diff --git a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
index 5c8046065844..fdedf7b1fe0f 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-mpic.dtsi
@@ -39,6 +39,9 @@ mpic: pic@40000 {
39 reg = <0x40000 0x40000>; 39 reg = <0x40000 0x40000>;
40 compatible = "fsl,mpic"; 40 compatible = "fsl,mpic";
41 device_type = "open-pic"; 41 device_type = "open-pic";
42 big-endian;
43 single-cpu-affinity;
44 last-interrupt-source = <255>;
42}; 45};
43 46
44timer@41100 { 47timer@41100 {
diff --git a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
index bf957a7fca2a..d4c9d5daab21 100644
--- a/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
+++ b/arch/powerpc/boot/dts/fsl/pq3-sec4.4-0.dtsi
@@ -33,32 +33,32 @@
33 */ 33 */
34 34
35crypto@30000 { 35crypto@30000 {
36 compatible = "fsl,sec4.4", "fsl,sec4.0"; 36 compatible = "fsl,sec-v4.4", "fsl,sec-v4.0";
37 #address-cells = <1>; 37 #address-cells = <1>;
38 #size-cells = <1>; 38 #size-cells = <1>;
39 reg = <0x30000 0x10000>; 39 reg = <0x30000 0x10000>;
40 interrupts = <58 2 0 0>; 40 interrupts = <58 2 0 0>;
41 41
42 sec_jr0: jr@1000 { 42 sec_jr0: jr@1000 {
43 compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring"; 43 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
44 reg = <0x1000 0x1000>; 44 reg = <0x1000 0x1000>;
45 interrupts = <45 2 0 0>; 45 interrupts = <45 2 0 0>;
46 }; 46 };
47 47
48 sec_jr1: jr@2000 { 48 sec_jr1: jr@2000 {
49 compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring"; 49 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
50 reg = <0x2000 0x1000>; 50 reg = <0x2000 0x1000>;
51 interrupts = <45 2 0 0>; 51 interrupts = <45 2 0 0>;
52 }; 52 };
53 53
54 sec_jr2: jr@3000 { 54 sec_jr2: jr@3000 {
55 compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring"; 55 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
56 reg = <0x3000 0x1000>; 56 reg = <0x3000 0x1000>;
57 interrupts = <45 2 0 0>; 57 interrupts = <45 2 0 0>;
58 }; 58 };
59 59
60 sec_jr3: jr@4000 { 60 sec_jr3: jr@4000 {
61 compatible = "fsl,sec4.4-job-ring", "fsl,sec4.0-job-ring"; 61 compatible = "fsl,sec-v4.4-job-ring", "fsl,sec-v4.0-job-ring";
62 reg = <0x4000 0x1000>; 62 reg = <0x4000 0x1000>;
63 interrupts = <45 2 0 0>; 63 interrupts = <45 2 0 0>;
64 }; 64 };
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
index b9bada6a87dc..08f42271f86a 100644
--- a/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
+++ b/arch/powerpc/boot/dts/fsl/qoriq-mpic.dtsi
@@ -53,7 +53,7 @@ timer@41100 {
53 53
54msi0: msi@41600 { 54msi0: msi@41600 {
55 compatible = "fsl,mpic-msi"; 55 compatible = "fsl,mpic-msi";
56 reg = <0x41600 0x200>; 56 reg = <0x41600 0x200 0x44140 4>;
57 msi-available-ranges = <0 0x100>; 57 msi-available-ranges = <0 0x100>;
58 interrupts = < 58 interrupts = <
59 0xe0 0 0 0 59 0xe0 0 0 0
@@ -68,7 +68,7 @@ msi0: msi@41600 {
68 68
69msi1: msi@41800 { 69msi1: msi@41800 {
70 compatible = "fsl,mpic-msi"; 70 compatible = "fsl,mpic-msi";
71 reg = <0x41800 0x200>; 71 reg = <0x41800 0x200 0x45140 4>;
72 msi-available-ranges = <0 0x100>; 72 msi-available-ranges = <0 0x100>;
73 interrupts = < 73 interrupts = <
74 0xe8 0 0 0 74 0xe8 0 0 0
@@ -83,7 +83,7 @@ msi1: msi@41800 {
83 83
84msi2: msi@41a00 { 84msi2: msi@41a00 {
85 compatible = "fsl,mpic-msi"; 85 compatible = "fsl,mpic-msi";
86 reg = <0x41a00 0x200>; 86 reg = <0x41a00 0x200 0x46140 4>;
87 msi-available-ranges = <0 0x100>; 87 msi-available-ranges = <0 0x100>;
88 interrupts = < 88 interrupts = <
89 0xf0 0 0 0 89 0xf0 0 0 0
diff --git a/arch/powerpc/boot/dts/ge_imp3a.dts b/arch/powerpc/boot/dts/ge_imp3a.dts
new file mode 100644
index 000000000000..fefae416a097
--- /dev/null
+++ b/arch/powerpc/boot/dts/ge_imp3a.dts
@@ -0,0 +1,255 @@
1/*
2 * GE IMP3A Device Tree Source
3 *
4 * Copyright 2010-2011 GE Intelligent Platforms Embedded Systems, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 *
11 * Based on: P2020 DS Device Tree Source
12 * Copyright 2009 Freescale Semiconductor Inc.
13 */
14
15/include/ "fsl/p2020si-pre.dtsi"
16
17/ {
18 model = "GE_IMP3A";
19 compatible = "ge,imp3a";
20
21 memory {
22 device_type = "memory";
23 };
24
25 lbc: localbus@fef05000 {
26 reg = <0 0xfef05000 0 0x1000>;
27
28 ranges = <0x0 0x0 0x0 0xff000000 0x01000000
29 0x1 0x0 0x0 0xe0000000 0x08000000
30 0x2 0x0 0x0 0xe8000000 0x08000000
31 0x3 0x0 0x0 0xfc100000 0x00020000
32 0x4 0x0 0x0 0xfc000000 0x00008000
33 0x5 0x0 0x0 0xfc008000 0x00008000
34 0x6 0x0 0x0 0xfee00000 0x00040000
35 0x7 0x0 0x0 0xfee80000 0x00040000>;
36
37 /* nor@0,0 is a mirror of part of the memory in nor@1,0
38 nor@0,0 {
39 #address-cells = <1>;
40 #size-cells = <1>;
41 compatible = "ge,imp3a-firmware-mirror", "cfi-flash";
42 reg = <0x0 0x0 0x1000000>;
43 bank-width = <2>;
44 device-width = <1>;
45
46 partition@0 {
47 label = "firmware";
48 reg = <0x0 0x1000000>;
49 read-only;
50 };
51 };
52 */
53
54 nor@1,0 {
55 #address-cells = <1>;
56 #size-cells = <1>;
57 compatible = "ge,imp3a-paged-flash", "cfi-flash";
58 reg = <0x1 0x0 0x8000000>;
59 bank-width = <2>;
60 device-width = <1>;
61
62 partition@0 {
63 label = "user";
64 reg = <0x0 0x7800000>;
65 };
66
67 partition@7800000 {
68 label = "firmware";
69 reg = <0x7800000 0x800000>;
70 read-only;
71 };
72 };
73
74 nvram@3,0 {
75 device_type = "nvram";
76 compatible = "simtek,stk14ca8";
77 reg = <0x3 0x0 0x20000>;
78 };
79
80 fpga@4,0 {
81 compatible = "ge,imp3a-fpga-regs";
82 reg = <0x4 0x0 0x20>;
83 };
84
85 gef_pic: pic@4,20 {
86 #interrupt-cells = <1>;
87 interrupt-controller;
88 device_type = "interrupt-controller";
89 compatible = "ge,imp3a-fpga-pic", "gef,fpga-pic-1.00";
90 reg = <0x4 0x20 0x20>;
91 interrupts = <6 7 0 0>;
92 };
93
94 gef_gpio: gpio@4,400 {
95 #gpio-cells = <2>;
96 compatible = "ge,imp3a-gpio";
97 reg = <0x4 0x400 0x24>;
98 gpio-controller;
99 };
100
101 wdt@4,800 {
102 compatible = "ge,imp3a-fpga-wdt", "gef,fpga-wdt-1.00",
103 "gef,fpga-wdt";
104 reg = <0x4 0x800 0x8>;
105 interrupts = <10 4>;
106 interrupt-parent = <&gef_pic>;
107 };
108
109 /* Second watchdog available, driver currently supports one.
110 wdt@4,808 {
111 compatible = "gef,imp3a-fpga-wdt", "gef,fpga-wdt-1.00",
112 "gef,fpga-wdt";
113 reg = <0x4 0x808 0x8>;
114 interrupts = <9 4>;
115 interrupt-parent = <&gef_pic>;
116 };
117 */
118
119 nand@6,0 {
120 compatible = "fsl,elbc-fcm-nand";
121 reg = <0x6 0x0 0x40000>;
122 };
123
124 nand@7,0 {
125 compatible = "fsl,elbc-fcm-nand";
126 reg = <0x7 0x0 0x40000>;
127 };
128 };
129
130 soc: soc@fef00000 {
131 ranges = <0x0 0 0xfef00000 0x100000>;
132
133 i2c@3000 {
134 hwmon@48 {
135 compatible = "national,lm92";
136 reg = <0x48>;
137 };
138
139 hwmon@4c {
140 compatible = "adi,adt7461";
141 reg = <0x4c>;
142 };
143
144 rtc@51 {
145 compatible = "epson,rx8581";
146 reg = <0x51>;
147 };
148
149 eti@6b {
150 compatible = "dallas,ds1682";
151 reg = <0x6b>;
152 };
153 };
154
155 usb@22000 {
156 phy_type = "ulpi";
157 dr_mode = "host";
158 };
159
160 mdio@24520 {
161 phy0: ethernet-phy@0 {
162 interrupt-parent = <&gef_pic>;
163 interrupts = <0xc 0x4>;
164 reg = <0x1>;
165 };
166 phy1: ethernet-phy@1 {
167 interrupt-parent = <&gef_pic>;
168 interrupts = <0xb 0x4>;
169 reg = <0x2>;
170 };
171 tbi0: tbi-phy@11 {
172 reg = <0x11>;
173 device_type = "tbi-phy";
174 };
175 };
176
177 mdio@25520 {
178 tbi1: tbi-phy@11 {
179 reg = <0x11>;
180 device_type = "tbi-phy";
181 };
182 };
183
184 mdio@26520 {
185 status = "disabled";
186 };
187
188 enet0: ethernet@24000 {
189 tbi-handle = <&tbi0>;
190 phy-handle = <&phy0>;
191 phy-connection-type = "gmii";
192 };
193
194 enet1: ethernet@25000 {
195 tbi-handle = <&tbi1>;
196 phy-handle = <&phy1>;
197 phy-connection-type = "gmii";
198 };
199
200 enet2: ethernet@26000 {
201 status = "disabled";
202 };
203 };
204
205 pci0: pcie@fef08000 {
206 ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000
207 0x1000000 0x0 0x00000000 0 0xfe020000 0x0 0x10000>;
208 reg = <0 0xfef08000 0 0x1000>;
209
210 pcie@0 {
211 ranges = <0x2000000 0x0 0xc0000000
212 0x2000000 0x0 0xc0000000
213 0x0 0x20000000
214
215 0x1000000 0x0 0x0
216 0x1000000 0x0 0x0
217 0x0 0x10000>;
218 };
219 };
220
221 pci1: pcie@fef09000 {
222 reg = <0 0xfef09000 0 0x1000>;
223 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
224 0x1000000 0x0 0x00000000 0 0xfe010000 0x0 0x10000>;
225
226 pcie@0 {
227 ranges = <0x2000000 0x0 0xa0000000
228 0x2000000 0x0 0xa0000000
229 0x0 0x20000000
230
231 0x1000000 0x0 0x0
232 0x1000000 0x0 0x0
233 0x0 0x10000>;
234 };
235
236 };
237
238 pci2: pcie@fef0a000 {
239 reg = <0 0xfef0a000 0 0x1000>;
240 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
241 0x1000000 0x0 0x00000000 0 0xfe000000 0x0 0x10000>;
242
243 pcie@0 {
244 ranges = <0x2000000 0x0 0x80000000
245 0x2000000 0x0 0x80000000
246 0x0 0x20000000
247
248 0x1000000 0x0 0x0
249 0x1000000 0x0 0x0
250 0x0 0x10000>;
251 };
252 };
253};
254
255/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc836x_mds.dts b/arch/powerpc/boot/dts/mpc836x_mds.dts
index c0e450a551bf..81dd513d6308 100644
--- a/arch/powerpc/boot/dts/mpc836x_mds.dts
+++ b/arch/powerpc/boot/dts/mpc836x_mds.dts
@@ -405,6 +405,10 @@
405 reg = <0x1>; 405 reg = <0x1>;
406 device_type = "ethernet-phy"; 406 device_type = "ethernet-phy";
407 }; 407 };
408 tbi-phy@2 {
409 device_type = "tbi-phy";
410 reg = <0x2>;
411 };
408 }; 412 };
409 413
410 qeic: interrupt-controller@80 { 414 qeic: interrupt-controller@80 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts
index c15881574fdc..19736222a0b9 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * MPC8536 DS Device Tree Source 2 * MPC8536 DS Device Tree Source
3 * 3 *
4 * Copyright 2008 Freescale Semiconductor, Inc. 4 * Copyright 2008, 2011 Freescale Semiconductor, Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -34,6 +34,10 @@
34 34
35 lbc: localbus@ffe05000 { 35 lbc: localbus@ffe05000 {
36 reg = <0 0xffe05000 0 0x1000>; 36 reg = <0 0xffe05000 0 0x1000>;
37
38 ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
39 0x2 0x0 0x0 0xffa00000 0x00040000
40 0x3 0x0 0x0 0xffdf0000 0x00008000>;
37 }; 41 };
38 42
39 board_soc: soc: soc@ffe00000 { 43 board_soc: soc: soc@ffe00000 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dtsi b/arch/powerpc/boot/dts/mpc8536ds.dtsi
index 1462e4cf49d7..cc46dbd9746d 100644
--- a/arch/powerpc/boot/dts/mpc8536ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8536ds.dtsi
@@ -32,6 +32,99 @@
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */ 33 */
34 34
35&lbc {
36 nor@0,0 {
37 #address-cells = <1>;
38 #size-cells = <1>;
39 compatible = "cfi-flash";
40 reg = <0x0 0x0 0x8000000>;
41 bank-width = <2>;
42 device-width = <1>;
43
44 partition@0 {
45 reg = <0x0 0x03000000>;
46 label = "ramdisk-nor";
47 };
48
49 partition@3000000 {
50 reg = <0x03000000 0x00e00000>;
51 label = "diagnostic-nor";
52 read-only;
53 };
54
55 partition@3e00000 {
56 reg = <0x03e00000 0x00200000>;
57 label = "dink-nor";
58 read-only;
59 };
60
61 partition@4000000 {
62 reg = <0x04000000 0x00400000>;
63 label = "kernel-nor";
64 };
65
66 partition@4400000 {
67 reg = <0x04400000 0x03b00000>;
68 label = "fs-nor";
69 };
70
71 partition@7f00000 {
72 reg = <0x07f00000 0x00080000>;
73 label = "dtb-nor";
74 };
75
76 partition@7f80000 {
77 reg = <0x07f80000 0x00080000>;
78 label = "u-boot-nor";
79 read-only;
80 };
81 };
82
83 nand@2,0 {
84 #address-cells = <1>;
85 #size-cells = <1>;
86 compatible = "fsl,mpc8536-fcm-nand",
87 "fsl,elbc-fcm-nand";
88 reg = <0x2 0x0 0x40000>;
89
90 partition@0 {
91 reg = <0x0 0x02000000>;
92 label = "u-boot-nand";
93 read-only;
94 };
95
96 partition@2000000 {
97 reg = <0x02000000 0x10000000>;
98 label = "fs-nand";
99 };
100
101 partition@12000000 {
102 reg = <0x12000000 0x08000000>;
103 label = "ramdisk-nand";
104 };
105
106 partition@1a000000 {
107 reg = <0x1a000000 0x04000000>;
108 label = "kernel-nand";
109 };
110
111 partition@1e000000 {
112 reg = <0x1e000000 0x01000000>;
113 label = "dtb-nand";
114 };
115
116 partition@1f000000 {
117 reg = <0x1f000000 0x21000000>;
118 label = "empty-nand";
119 };
120 };
121
122 board-control@3,0 {
123 compatible = "fsl,mpc8536ds-fpga-pixis";
124 reg = <0x3 0x0 0x8000>;
125 };
126};
127
35&board_soc { 128&board_soc {
36 i2c@3100 { 129 i2c@3100 {
37 rtc@68 { 130 rtc@68 {
diff --git a/arch/powerpc/boot/dts/mpc8536ds_36b.dts b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
index 8f4b929b1d1d..f8a3b3413176 100644
--- a/arch/powerpc/boot/dts/mpc8536ds_36b.dts
+++ b/arch/powerpc/boot/dts/mpc8536ds_36b.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * MPC8536DS Device Tree Source (36-bit address map) 2 * MPC8536DS Device Tree Source (36-bit address map)
3 * 3 *
4 * Copyright 2008-2009 Freescale Semiconductor, Inc. 4 * Copyright 2008-2009, 2011 Freescale Semiconductor, Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -33,7 +33,11 @@
33 }; 33 };
34 34
35 lbc: localbus@ffe05000 { 35 lbc: localbus@ffe05000 {
36 reg = <0 0xffe05000 0 0x1000>; 36 reg = <0xf 0xffe05000 0 0x1000>;
37
38 ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
39 0x2 0x0 0xf 0xffa00000 0x00040000
40 0x3 0x0 0xf 0xffdf0000 0x00008000>;
37 }; 41 };
38 42
39 board_soc: soc: soc@fffe00000 { 43 board_soc: soc: soc@fffe00000 {
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dts b/arch/powerpc/boot/dts/mpc8548cds.dts
deleted file mode 100644
index 07b8dae0f46e..000000000000
--- a/arch/powerpc/boot/dts/mpc8548cds.dts
+++ /dev/null
@@ -1,306 +0,0 @@
1/*
2 * MPC8548 CDS Device Tree Source
3 *
4 * Copyright 2006, 2008 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/include/ "fsl/mpc8548si-pre.dtsi"
13
14/ {
15 model = "MPC8548CDS";
16 compatible = "MPC8548CDS", "MPC85xxCDS";
17
18 aliases {
19 ethernet0 = &enet0;
20 ethernet1 = &enet1;
21 ethernet2 = &enet2;
22 ethernet3 = &enet3;
23 serial0 = &serial0;
24 serial1 = &serial1;
25 pci0 = &pci0;
26 pci1 = &pci1;
27 pci2 = &pci2;
28 };
29
30 memory {
31 device_type = "memory";
32 reg = <0 0 0x0 0x8000000>; // 128M at 0x0
33 };
34
35 lbc: localbus@e0005000 {
36 reg = <0 0xe0005000 0 0x1000>;
37 };
38
39 soc: soc8548@e0000000 {
40 ranges = <0 0x0 0xe0000000 0x100000>;
41
42 i2c@3000 {
43 eeprom@50 {
44 compatible = "atmel,24c64";
45 reg = <0x50>;
46 };
47
48 eeprom@56 {
49 compatible = "atmel,24c64";
50 reg = <0x56>;
51 };
52
53 eeprom@57 {
54 compatible = "atmel,24c64";
55 reg = <0x57>;
56 };
57 };
58
59 i2c@3100 {
60 eeprom@50 {
61 compatible = "atmel,24c64";
62 reg = <0x50>;
63 };
64 };
65
66 enet0: ethernet@24000 {
67 tbi-handle = <&tbi0>;
68 phy-handle = <&phy0>;
69 };
70
71 mdio@24520 {
72 phy0: ethernet-phy@0 {
73 interrupts = <5 1 0 0>;
74 reg = <0x0>;
75 device_type = "ethernet-phy";
76 };
77 phy1: ethernet-phy@1 {
78 interrupts = <5 1 0 0>;
79 reg = <0x1>;
80 device_type = "ethernet-phy";
81 };
82 phy2: ethernet-phy@2 {
83 interrupts = <5 1 0 0>;
84 reg = <0x2>;
85 device_type = "ethernet-phy";
86 };
87 phy3: ethernet-phy@3 {
88 interrupts = <5 1 0 0>;
89 reg = <0x3>;
90 device_type = "ethernet-phy";
91 };
92 tbi0: tbi-phy@11 {
93 reg = <0x11>;
94 device_type = "tbi-phy";
95 };
96 };
97
98 enet1: ethernet@25000 {
99 tbi-handle = <&tbi1>;
100 phy-handle = <&phy1>;
101 };
102
103 mdio@25520 {
104 tbi1: tbi-phy@11 {
105 reg = <0x11>;
106 device_type = "tbi-phy";
107 };
108 };
109
110 enet2: ethernet@26000 {
111 tbi-handle = <&tbi2>;
112 phy-handle = <&phy2>;
113 };
114
115 mdio@26520 {
116 tbi2: tbi-phy@11 {
117 reg = <0x11>;
118 device_type = "tbi-phy";
119 };
120 };
121
122 enet3: ethernet@27000 {
123 tbi-handle = <&tbi3>;
124 phy-handle = <&phy3>;
125 };
126
127 mdio@27520 {
128 tbi3: tbi-phy@11 {
129 reg = <0x11>;
130 device_type = "tbi-phy";
131 };
132 };
133 };
134
135 pci0: pci@e0008000 {
136 reg = <0 0xe0008000 0 0x1000>;
137 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x10000000
138 0x1000000 0x0 0x00000000 0 0xe2000000 0x0 0x800000>;
139 clock-frequency = <66666666>;
140 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
141 interrupt-map = <
142 /* IDSEL 0x4 (PCIX Slot 2) */
143 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
144 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
145 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
146 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
147
148 /* IDSEL 0x5 (PCIX Slot 3) */
149 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
150 0x2800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
151 0x2800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
152 0x2800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
153
154 /* IDSEL 0x6 (PCIX Slot 4) */
155 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
156 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
157 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
158 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
159
160 /* IDSEL 0x8 (PCIX Slot 5) */
161 0x4000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
162 0x4000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
163 0x4000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
164 0x4000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
165
166 /* IDSEL 0xC (Tsi310 bridge) */
167 0x6000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
168 0x6000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
169 0x6000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
170 0x6000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
171
172 /* IDSEL 0x14 (Slot 2) */
173 0xa000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
174 0xa000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
175 0xa000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
176 0xa000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
177
178 /* IDSEL 0x15 (Slot 3) */
179 0xa800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
180 0xa800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
181 0xa800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
182 0xa800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
183
184 /* IDSEL 0x16 (Slot 4) */
185 0xb000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
186 0xb000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
187 0xb000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
188 0xb000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
189
190 /* IDSEL 0x18 (Slot 5) */
191 0xc000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
192 0xc000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
193 0xc000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
194 0xc000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
195
196 /* IDSEL 0x1C (Tsi310 bridge PCI primary) */
197 0xe000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
198 0xe000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
199 0xe000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
200 0xe000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
201
202 pci_bridge@1c {
203 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
204 interrupt-map = <
205
206 /* IDSEL 0x00 (PrPMC Site) */
207 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
208 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
209 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
210 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
211
212 /* IDSEL 0x04 (VIA chip) */
213 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
214 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
215 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
216 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
217
218 /* IDSEL 0x05 (8139) */
219 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
220
221 /* IDSEL 0x06 (Slot 6) */
222 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
223 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
224 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
225 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
226
227 /* IDESL 0x07 (Slot 7) */
228 0x3800 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
229 0x3800 0x0 0x0 0x2 &mpic 0x0 0x1 0 0
230 0x3800 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
231 0x3800 0x0 0x0 0x4 &mpic 0x2 0x1 0 0>;
232
233 reg = <0xe000 0x0 0x0 0x0 0x0>;
234 #interrupt-cells = <1>;
235 #size-cells = <2>;
236 #address-cells = <3>;
237 ranges = <0x2000000 0x0 0x80000000
238 0x2000000 0x0 0x80000000
239 0x0 0x20000000
240 0x1000000 0x0 0x0
241 0x1000000 0x0 0x0
242 0x0 0x80000>;
243 clock-frequency = <33333333>;
244
245 isa@4 {
246 device_type = "isa";
247 #interrupt-cells = <2>;
248 #size-cells = <1>;
249 #address-cells = <2>;
250 reg = <0x2000 0x0 0x0 0x0 0x0>;
251 ranges = <0x1 0x0 0x1000000 0x0 0x0 0x1000>;
252 interrupt-parent = <&i8259>;
253
254 i8259: interrupt-controller@20 {
255 interrupt-controller;
256 device_type = "interrupt-controller";
257 reg = <0x1 0x20 0x2
258 0x1 0xa0 0x2
259 0x1 0x4d0 0x2>;
260 #address-cells = <0>;
261 #interrupt-cells = <2>;
262 compatible = "chrp,iic";
263 interrupts = <0 1 0 0>;
264 interrupt-parent = <&mpic>;
265 };
266
267 rtc@70 {
268 compatible = "pnpPNP,b00";
269 reg = <0x1 0x70 0x2>;
270 };
271 };
272 };
273 };
274
275 pci1: pci@e0009000 {
276 reg = <0 0xe0009000 0 0x1000>;
277 ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x10000000
278 0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x800000>;
279 clock-frequency = <66666666>;
280 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
281 interrupt-map = <
282
283 /* IDSEL 0x15 */
284 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0
285 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
286 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
287 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
288 };
289
290 pci2: pcie@e000a000 {
291 reg = <0 0xe000a000 0 0x1000>;
292 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
293 0x1000000 0x0 0x00000000 0 0xe3000000 0x0 0x100000>;
294 pcie@0 {
295 ranges = <0x2000000 0x0 0xa0000000
296 0x2000000 0x0 0xa0000000
297 0x0 0x20000000
298
299 0x1000000 0x0 0x0
300 0x1000000 0x0 0x0
301 0x0 0x100000>;
302 };
303 };
304};
305
306/include/ "fsl/mpc8548si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8548cds.dtsi b/arch/powerpc/boot/dts/mpc8548cds.dtsi
new file mode 100644
index 000000000000..c61f525e4740
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8548cds.dtsi
@@ -0,0 +1,306 @@
1/*
2 * MPC8548CDS Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35&board_lbc {
36 nor@0,0 {
37 #address-cells = <1>;
38 #size-cells = <1>;
39 compatible = "cfi-flash";
40 reg = <0x0 0x0 0x01000000>;
41 bank-width = <2>;
42 device-width = <2>;
43
44 partition@0 {
45 reg = <0x0 0x0b00000>;
46 label = "ramdisk-nor";
47 };
48
49 partition@300000 {
50 reg = <0x0b00000 0x0400000>;
51 label = "kernel-nor";
52 };
53
54 partition@700000 {
55 reg = <0x0f00000 0x060000>;
56 label = "dtb-nor";
57 };
58
59 partition@760000 {
60 reg = <0x0f60000 0x020000>;
61 label = "env-nor";
62 read-only;
63 };
64
65 partition@780000 {
66 reg = <0x0f80000 0x080000>;
67 label = "u-boot-nor";
68 read-only;
69 };
70 };
71
72 board-control@1,0 {
73 compatible = "fsl,mpc8548cds-fpga";
74 reg = <0x1 0x0 0x1000>;
75 };
76};
77
78&board_soc {
79 i2c@3000 {
80 eeprom@50 {
81 compatible = "atmel,24c64";
82 reg = <0x50>;
83 };
84
85 eeprom@56 {
86 compatible = "atmel,24c64";
87 reg = <0x56>;
88 };
89
90 eeprom@57 {
91 compatible = "atmel,24c64";
92 reg = <0x57>;
93 };
94 };
95
96 i2c@3100 {
97 eeprom@50 {
98 compatible = "atmel,24c64";
99 reg = <0x50>;
100 };
101 };
102
103 enet0: ethernet@24000 {
104 tbi-handle = <&tbi0>;
105 phy-handle = <&phy0>;
106 };
107
108 mdio@24520 {
109 phy0: ethernet-phy@0 {
110 interrupts = <5 1 0 0>;
111 reg = <0x0>;
112 device_type = "ethernet-phy";
113 };
114 phy1: ethernet-phy@1 {
115 interrupts = <5 1 0 0>;
116 reg = <0x1>;
117 device_type = "ethernet-phy";
118 };
119 phy2: ethernet-phy@2 {
120 interrupts = <5 1 0 0>;
121 reg = <0x2>;
122 device_type = "ethernet-phy";
123 };
124 phy3: ethernet-phy@3 {
125 interrupts = <5 1 0 0>;
126 reg = <0x3>;
127 device_type = "ethernet-phy";
128 };
129 tbi0: tbi-phy@11 {
130 reg = <0x11>;
131 device_type = "tbi-phy";
132 };
133 };
134
135 enet1: ethernet@25000 {
136 tbi-handle = <&tbi1>;
137 phy-handle = <&phy1>;
138 };
139
140 mdio@25520 {
141 tbi1: tbi-phy@11 {
142 reg = <0x11>;
143 device_type = "tbi-phy";
144 };
145 };
146
147 enet2: ethernet@26000 {
148 tbi-handle = <&tbi2>;
149 phy-handle = <&phy2>;
150 };
151
152 mdio@26520 {
153 tbi2: tbi-phy@11 {
154 reg = <0x11>;
155 device_type = "tbi-phy";
156 };
157 };
158
159 enet3: ethernet@27000 {
160 tbi-handle = <&tbi3>;
161 phy-handle = <&phy3>;
162 };
163
164 mdio@27520 {
165 tbi3: tbi-phy@11 {
166 reg = <0x11>;
167 device_type = "tbi-phy";
168 };
169 };
170};
171
172&board_pci0 {
173 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
174 interrupt-map = <
175 /* IDSEL 0x4 (PCIX Slot 2) */
176 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
177 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
178 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
179 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
180
181 /* IDSEL 0x5 (PCIX Slot 3) */
182 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
183 0x2800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
184 0x2800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
185 0x2800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
186
187 /* IDSEL 0x6 (PCIX Slot 4) */
188 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
189 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
190 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
191 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
192
193 /* IDSEL 0x8 (PCIX Slot 5) */
194 0x4000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
195 0x4000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
196 0x4000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
197 0x4000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
198
199 /* IDSEL 0xC (Tsi310 bridge) */
200 0x6000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
201 0x6000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
202 0x6000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
203 0x6000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
204
205 /* IDSEL 0x14 (Slot 2) */
206 0xa000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
207 0xa000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
208 0xa000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
209 0xa000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
210
211 /* IDSEL 0x15 (Slot 3) */
212 0xa800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
213 0xa800 0x0 0x0 0x2 &mpic 0x2 0x1 0 0
214 0xa800 0x0 0x0 0x3 &mpic 0x3 0x1 0 0
215 0xa800 0x0 0x0 0x4 &mpic 0x0 0x1 0 0
216
217 /* IDSEL 0x16 (Slot 4) */
218 0xb000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
219 0xb000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
220 0xb000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
221 0xb000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
222
223 /* IDSEL 0x18 (Slot 5) */
224 0xc000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
225 0xc000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
226 0xc000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
227 0xc000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
228
229 /* IDSEL 0x1C (Tsi310 bridge PCI primary) */
230 0xe000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
231 0xe000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
232 0xe000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
233 0xe000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
234
235 pci_bridge@1c {
236 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
237 interrupt-map = <
238
239 /* IDSEL 0x00 (PrPMC Site) */
240 0000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
241 0000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
242 0000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
243 0000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
244
245 /* IDSEL 0x04 (VIA chip) */
246 0x2000 0x0 0x0 0x1 &mpic 0x0 0x1 0 0
247 0x2000 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
248 0x2000 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
249 0x2000 0x0 0x0 0x4 &mpic 0x3 0x1 0 0
250
251 /* IDSEL 0x05 (8139) */
252 0x2800 0x0 0x0 0x1 &mpic 0x1 0x1 0 0
253
254 /* IDSEL 0x06 (Slot 6) */
255 0x3000 0x0 0x0 0x1 &mpic 0x2 0x1 0 0
256 0x3000 0x0 0x0 0x2 &mpic 0x3 0x1 0 0
257 0x3000 0x0 0x0 0x3 &mpic 0x0 0x1 0 0
258 0x3000 0x0 0x0 0x4 &mpic 0x1 0x1 0 0
259
260 /* IDESL 0x07 (Slot 7) */
261 0x3800 0x0 0x0 0x1 &mpic 0x3 0x1 0 0
262 0x3800 0x0 0x0 0x2 &mpic 0x0 0x1 0 0
263 0x3800 0x0 0x0 0x3 &mpic 0x1 0x1 0 0
264 0x3800 0x0 0x0 0x4 &mpic 0x2 0x1 0 0>;
265
266 reg = <0xe000 0x0 0x0 0x0 0x0>;
267 #interrupt-cells = <1>;
268 #size-cells = <2>;
269 #address-cells = <3>;
270 ranges = <0x2000000 0x0 0x80000000
271 0x2000000 0x0 0x80000000
272 0x0 0x20000000
273 0x1000000 0x0 0x0
274 0x1000000 0x0 0x0
275 0x0 0x80000>;
276 clock-frequency = <33333333>;
277
278 isa@4 {
279 device_type = "isa";
280 #interrupt-cells = <2>;
281 #size-cells = <1>;
282 #address-cells = <2>;
283 reg = <0x2000 0x0 0x0 0x0 0x0>;
284 ranges = <0x1 0x0 0x1000000 0x0 0x0 0x1000>;
285 interrupt-parent = <&i8259>;
286
287 i8259: interrupt-controller@20 {
288 interrupt-controller;
289 device_type = "interrupt-controller";
290 reg = <0x1 0x20 0x2
291 0x1 0xa0 0x2
292 0x1 0x4d0 0x2>;
293 #address-cells = <0>;
294 #interrupt-cells = <2>;
295 compatible = "chrp,iic";
296 interrupts = <0 1 0 0>;
297 interrupt-parent = <&mpic>;
298 };
299
300 rtc@70 {
301 compatible = "pnpPNP,b00";
302 reg = <0x1 0x70 0x2>;
303 };
304 };
305 };
306};
diff --git a/arch/powerpc/boot/dts/mpc8548cds_32b.dts b/arch/powerpc/boot/dts/mpc8548cds_32b.dts
new file mode 100644
index 000000000000..6fd63163fc6b
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8548cds_32b.dts
@@ -0,0 +1,86 @@
1/*
2 * MPC8548 CDS Device Tree Source (32-bit address map)
3 *
4 * Copyright 2006, 2008, 2011-2012 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/include/ "fsl/mpc8548si-pre.dtsi"
13
14/ {
15 model = "MPC8548CDS";
16 compatible = "MPC8548CDS", "MPC85xxCDS";
17
18 memory {
19 device_type = "memory";
20 reg = <0 0 0x0 0x8000000>; // 128M at 0x0
21 };
22
23 board_lbc: lbc: localbus@e0005000 {
24 reg = <0 0xe0005000 0 0x1000>;
25
26 ranges = <0x0 0x0 0x0 0xff000000 0x01000000
27 0x1 0x0 0x0 0xf8004000 0x00001000>;
28
29 };
30
31 board_soc: soc: soc8548@e0000000 {
32 ranges = <0 0x0 0xe0000000 0x100000>;
33 };
34
35 board_pci0: pci0: pci@e0008000 {
36 reg = <0 0xe0008000 0 0x1000>;
37 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x10000000
38 0x1000000 0x0 0x00000000 0 0xe2000000 0x0 0x800000>;
39 clock-frequency = <66666666>;
40 };
41
42 pci1: pci@e0009000 {
43 reg = <0 0xe0009000 0 0x1000>;
44 ranges = <0x2000000 0x0 0x90000000 0 0x90000000 0x0 0x10000000
45 0x1000000 0x0 0x00000000 0 0xe2800000 0x0 0x800000>;
46 clock-frequency = <66666666>;
47 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
48 interrupt-map = <
49
50 /* IDSEL 0x15 */
51 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0
52 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
53 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
54 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
55 };
56
57 pci2: pcie@e000a000 {
58 reg = <0 0xe000a000 0 0x1000>;
59 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
60 0x1000000 0x0 0x00000000 0 0xe3000000 0x0 0x100000>;
61 pcie@0 {
62 ranges = <0x2000000 0x0 0xa0000000
63 0x2000000 0x0 0xa0000000
64 0x0 0x20000000
65
66 0x1000000 0x0 0x0
67 0x1000000 0x0 0x0
68 0x0 0x100000>;
69 };
70 };
71
72 rio: rapidio@e00c0000 {
73 reg = <0x0 0xe00c0000 0x0 0x20000>;
74 port1 {
75 ranges = <0x0 0x0 0x0 0xc0000000 0x0 0x20000000>;
76 };
77 };
78};
79
80/*
81 * mpc8548cds.dtsi must be last to ensure board_pci0 overrides pci0 settings
82 * for interrupt-map & interrupt-map-mask.
83 */
84
85/include/ "fsl/mpc8548si-post.dtsi"
86/include/ "mpc8548cds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8548cds_36b.dts b/arch/powerpc/boot/dts/mpc8548cds_36b.dts
new file mode 100644
index 000000000000..10e551b11bd6
--- /dev/null
+++ b/arch/powerpc/boot/dts/mpc8548cds_36b.dts
@@ -0,0 +1,86 @@
1/*
2 * MPC8548 CDS Device Tree Source (36-bit address map)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12/include/ "fsl/mpc8548si-pre.dtsi"
13
14/ {
15 model = "MPC8548CDS";
16 compatible = "MPC8548CDS", "MPC85xxCDS";
17
18 memory {
19 device_type = "memory";
20 reg = <0 0 0x0 0x8000000>; // 128M at 0x0
21 };
22
23 board_lbc: lbc: localbus@fe0005000 {
24 reg = <0xf 0xe0005000 0 0x1000>;
25
26 ranges = <0x0 0x0 0xf 0xff000000 0x01000000
27 0x1 0x0 0xf 0xf8004000 0x00001000>;
28
29 };
30
31 board_soc: soc: soc8548@fe0000000 {
32 ranges = <0 0xf 0xe0000000 0x100000>;
33 };
34
35 board_pci0: pci0: pci@fe0008000 {
36 reg = <0xf 0xe0008000 0 0x1000>;
37 ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x10000000
38 0x1000000 0x0 0x00000000 0xf 0xe2000000 0x0 0x800000>;
39 clock-frequency = <66666666>;
40 };
41
42 pci1: pci@fe0009000 {
43 reg = <0xf 0xe0009000 0 0x1000>;
44 ranges = <0x2000000 0x0 0xe0000000 0xc 0x10000000 0x0 0x10000000
45 0x1000000 0x0 0x00000000 0xf 0xe2800000 0x0 0x800000>;
46 clock-frequency = <66666666>;
47 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
48 interrupt-map = <
49
50 /* IDSEL 0x15 */
51 0xa800 0x0 0x0 0x1 &mpic 0xb 0x1 0 0
52 0xa800 0x0 0x0 0x2 &mpic 0x1 0x1 0 0
53 0xa800 0x0 0x0 0x3 &mpic 0x2 0x1 0 0
54 0xa800 0x0 0x0 0x4 &mpic 0x3 0x1 0 0>;
55 };
56
57 pci2: pcie@fe000a000 {
58 reg = <0xf 0xe000a000 0 0x1000>;
59 ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
60 0x1000000 0x0 0x00000000 0xf 0xe3000000 0x0 0x100000>;
61 pcie@0 {
62 ranges = <0x2000000 0x0 0xa0000000
63 0x2000000 0x0 0xa0000000
64 0x0 0x20000000
65
66 0x1000000 0x0 0x0
67 0x1000000 0x0 0x0
68 0x0 0x100000>;
69 };
70 };
71
72 rio: rapidio@fe00c0000 {
73 reg = <0xf 0xe00c0000 0x0 0x20000>;
74 port1 {
75 ranges = <0x0 0x0 0xc 0x40000000 0x0 0x20000000>;
76 };
77 };
78};
79
80/*
81 * mpc8548cds.dtsi must be last to ensure board_pci0 overrides pci0 settings
82 * for interrupt-map & interrupt-map-mask.
83 */
84
85/include/ "fsl/mpc8548si-post.dtsi"
86/include/ "mpc8548cds.dtsi"
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dtsi b/arch/powerpc/boot/dts/mpc8572ds.dtsi
index c3d4fac0532a..14178944e220 100644
--- a/arch/powerpc/boot/dts/mpc8572ds.dtsi
+++ b/arch/powerpc/boot/dts/mpc8572ds.dtsi
@@ -41,37 +41,47 @@
41 bank-width = <2>; 41 bank-width = <2>;
42 device-width = <1>; 42 device-width = <1>;
43 43
44 ramdisk@0 { 44 partition@0 {
45 reg = <0x0 0x03000000>; 45 reg = <0x0 0x03000000>;
46 read-only; 46 label = "ramdisk-nor";
47 }; 47 };
48 48
49 diagnostic@3000000 { 49 partition@3000000 {
50 reg = <0x03000000 0x00e00000>; 50 reg = <0x03000000 0x00e00000>;
51 label = "diagnostic-nor";
51 read-only; 52 read-only;
52 }; 53 };
53 54
54 dink@3e00000 { 55 partition@3e00000 {
55 reg = <0x03e00000 0x00200000>; 56 reg = <0x03e00000 0x00200000>;
57 label = "dink-nor";
56 read-only; 58 read-only;
57 }; 59 };
58 60
59 kernel@4000000 { 61 partition@4000000 {
60 reg = <0x04000000 0x00400000>; 62 reg = <0x04000000 0x00400000>;
61 read-only; 63 label = "kernel-nor";
62 }; 64 };
63 65
64 jffs2@4400000 { 66 partition@4400000 {
65 reg = <0x04400000 0x03b00000>; 67 reg = <0x04400000 0x03b00000>;
68 label = "fs-nor";
69 };
70
71 partition@7f00000 {
72 reg = <0x07f00000 0x00060000>;
73 label = "dtb-nor";
66 }; 74 };
67 75
68 dtb@7f00000 { 76 partition@7f60000 {
69 reg = <0x07f00000 0x00080000>; 77 reg = <0x07f60000 0x00020000>;
78 label = "env-nor";
70 read-only; 79 read-only;
71 }; 80 };
72 81
73 u-boot@7f80000 { 82 partition@7f80000 {
74 reg = <0x07f80000 0x00080000>; 83 reg = <0x07f80000 0x00080000>;
84 label = "u-boot-nor";
75 read-only; 85 read-only;
76 }; 86 };
77 }; 87 };
@@ -83,31 +93,35 @@
83 "fsl,elbc-fcm-nand"; 93 "fsl,elbc-fcm-nand";
84 reg = <0x2 0x0 0x40000>; 94 reg = <0x2 0x0 0x40000>;
85 95
86 u-boot@0 { 96 partition@0 {
87 reg = <0x0 0x02000000>; 97 reg = <0x0 0x02000000>;
98 label = "u-boot-nand";
88 read-only; 99 read-only;
89 }; 100 };
90 101
91 jffs2@2000000 { 102 partition@2000000 {
92 reg = <0x02000000 0x10000000>; 103 reg = <0x02000000 0x10000000>;
104 label = "fs-nand";
93 }; 105 };
94 106
95 ramdisk@12000000 { 107 partition@12000000 {
96 reg = <0x12000000 0x08000000>; 108 reg = <0x12000000 0x08000000>;
97 read-only; 109 label = "ramdisk-nand";
98 }; 110 };
99 111
100 kernel@1a000000 { 112 partition@1a000000 {
101 reg = <0x1a000000 0x04000000>; 113 reg = <0x1a000000 0x04000000>;
114 label = "kernel-nand";
102 }; 115 };
103 116
104 dtb@1e000000 { 117 partition@1e000000 {
105 reg = <0x1e000000 0x01000000>; 118 reg = <0x1e000000 0x01000000>;
106 read-only; 119 label = "dtb-nand";
107 }; 120 };
108 121
109 empty@1f000000 { 122 partition@1f000000 {
110 reg = <0x1f000000 0x21000000>; 123 reg = <0x1f000000 0x21000000>;
124 label = "empty-nand";
111 }; 125 };
112 }; 126 };
113 127
diff --git a/arch/powerpc/boot/dts/p1010rdb.dtsi b/arch/powerpc/boot/dts/p1010rdb.dtsi
index d4c4a7730285..49776143a1b8 100644
--- a/arch/powerpc/boot/dts/p1010rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1010rdb.dtsi
@@ -138,7 +138,7 @@
138 #size-cells = <1>; 138 #size-cells = <1>;
139 compatible = "spansion,s25sl12801"; 139 compatible = "spansion,s25sl12801";
140 reg = <0>; 140 reg = <0>;
141 spi-max-frequency = <50000000>; 141 spi-max-frequency = <40000000>;
142 142
143 partition@0 { 143 partition@0 {
144 /* 1MB for u-boot Bootloader Image */ 144 /* 1MB for u-boot Bootloader Image */
@@ -196,7 +196,7 @@
196 }; 196 };
197 197
198 tbi-phy@3 { 198 tbi-phy@3 {
199 device-type = "tbi-phy"; 199 device_type = "tbi-phy";
200 reg = <0x3>; 200 reg = <0x3>;
201 }; 201 };
202 }; 202 };
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc.dtsi b/arch/powerpc/boot/dts/p1020rdb-pc.dtsi
new file mode 100644
index 000000000000..c952cd37cf6d
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc.dtsi
@@ -0,0 +1,247 @@
1/*
2 * P1020 RDB-PC Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35&lbc {
36 nor@0,0 {
37 #address-cells = <1>;
38 #size-cells = <1>;
39 compatible = "cfi-flash";
40 reg = <0x0 0x0 0x1000000>;
41 bank-width = <2>;
42 device-width = <1>;
43
44 partition@0 {
45 /* This location must not be altered */
46 /* 256KB for Vitesse 7385 Switch firmware */
47 reg = <0x0 0x00040000>;
48 label = "NOR Vitesse-7385 Firmware";
49 read-only;
50 };
51
52 partition@40000 {
53 /* 256KB for DTB Image */
54 reg = <0x00040000 0x00040000>;
55 label = "NOR DTB Image";
56 };
57
58 partition@80000 {
59 /* 3.5 MB for Linux Kernel Image */
60 reg = <0x00080000 0x00380000>;
61 label = "NOR Linux Kernel Image";
62 };
63
64 partition@400000 {
65 /* 11MB for JFFS2 based Root file System */
66 reg = <0x00400000 0x00b00000>;
67 label = "NOR JFFS2 Root File System";
68 };
69
70 partition@f00000 {
71 /* This location must not be altered */
72 /* 512KB for u-boot Bootloader Image */
73 /* 512KB for u-boot Environment Variables */
74 reg = <0x00f00000 0x00100000>;
75 label = "NOR U-Boot Image";
76 read-only;
77 };
78 };
79
80 nand@1,0 {
81 #address-cells = <1>;
82 #size-cells = <1>;
83 compatible = "fsl,p1020-fcm-nand",
84 "fsl,elbc-fcm-nand";
85 reg = <0x1 0x0 0x40000>;
86
87 partition@0 {
88 /* This location must not be altered */
89 /* 1MB for u-boot Bootloader Image */
90 reg = <0x0 0x00100000>;
91 label = "NAND U-Boot Image";
92 read-only;
93 };
94
95 partition@100000 {
96 /* 1MB for DTB Image */
97 reg = <0x00100000 0x00100000>;
98 label = "NAND DTB Image";
99 };
100
101 partition@200000 {
102 /* 4MB for Linux Kernel Image */
103 reg = <0x00200000 0x00400000>;
104 label = "NAND Linux Kernel Image";
105 };
106
107 partition@600000 {
108 /* 4MB for Compressed Root file System Image */
109 reg = <0x00600000 0x00400000>;
110 label = "NAND Compressed RFS Image";
111 };
112
113 partition@a00000 {
114 /* 7MB for JFFS2 based Root file System */
115 reg = <0x00a00000 0x00700000>;
116 label = "NAND JFFS2 Root File System";
117 };
118
119 partition@1100000 {
120 /* 15MB for JFFS2 based Root file System */
121 reg = <0x01100000 0x00f00000>;
122 label = "NAND Writable User area";
123 };
124 };
125
126 L2switch@2,0 {
127 #address-cells = <1>;
128 #size-cells = <1>;
129 compatible = "vitesse-7385";
130 reg = <0x2 0x0 0x20000>;
131 };
132
133 cpld@3,0 {
134 #address-cells = <1>;
135 #size-cells = <1>;
136 compatible = "cpld";
137 reg = <0x3 0x0 0x20000>;
138 read-only;
139 };
140};
141
142&soc {
143 i2c@3000 {
144 rtc@68 {
145 compatible = "pericom,pt7c4338";
146 reg = <0x68>;
147 };
148 };
149
150 spi@7000 {
151 flash@0 {
152 #address-cells = <1>;
153 #size-cells = <1>;
154 compatible = "spansion,s25sl12801";
155 reg = <0>;
156 spi-max-frequency = <40000000>; /* input clock */
157
158 partition@u-boot {
159 /* 512KB for u-boot Bootloader Image */
160 reg = <0x0 0x00080000>;
161 label = "u-boot";
162 read-only;
163 };
164
165 partition@dtb {
166 /* 512KB for DTB Image*/
167 reg = <0x00080000 0x00080000>;
168 label = "dtb";
169 };
170
171 partition@kernel {
172 /* 4MB for Linux Kernel Image */
173 reg = <0x00100000 0x00400000>;
174 label = "kernel";
175 };
176
177 partition@fs {
178 /* 4MB for Compressed RFS Image */
179 reg = <0x00500000 0x00400000>;
180 label = "file system";
181 };
182
183 partition@jffs-fs {
184 /* 7MB for JFFS2 based RFS */
185 reg = <0x00900000 0x00700000>;
186 label = "file system jffs2";
187 };
188 };
189 };
190
191 usb@22000 {
192 phy_type = "ulpi";
193 };
194
195 /* USB2 is shared with localbus, so it must be disabled
196 by default. We can't put 'status = "disabled";' here
197 since U-Boot doesn't clear the status property when
198 it enables USB2. OTOH, U-Boot does create a new node
199 when there isn't any. So, just comment it out.
200 usb@23000 {
201 phy_type = "ulpi";
202 };
203 */
204
205 mdio@24000 {
206 phy0: ethernet-phy@0 {
207 interrupt-parent = <&mpic>;
208 interrupts = <3 1>;
209 reg = <0x0>;
210 };
211
212 phy1: ethernet-phy@1 {
213 interrupt-parent = <&mpic>;
214 interrupts = <2 1>;
215 reg = <0x1>;
216 };
217
218 tbi0: tbi-phy@11 {
219 device_type = "tbi-phy";
220 reg = <0x11>;
221 };
222 };
223
224 mdio@25000 {
225 tbi1: tbi-phy@11 {
226 reg = <0x11>;
227 device_type = "tbi-phy";
228 };
229 };
230
231 enet0: ethernet@b0000 {
232 fixed-link = <1 1 1000 0 0>;
233 phy-connection-type = "rgmii-id";
234
235 };
236
237 enet1: ethernet@b1000 {
238 phy-handle = <&phy0>;
239 tbi-handle = <&tbi1>;
240 phy-connection-type = "sgmii";
241 };
242
243 enet2: ethernet@b2000 {
244 phy-handle = <&phy1>;
245 phy-connection-type = "rgmii-id";
246 };
247};
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc_32b.dts b/arch/powerpc/boot/dts/p1020rdb-pc_32b.dts
new file mode 100644
index 000000000000..4de69b726dc5
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc_32b.dts
@@ -0,0 +1,90 @@
1/*
2 * P1020 RDB-PC Device Tree Source (32-bit address map)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1020si-pre.dtsi"
36/ {
37 model = "fsl,P1020RDB-PC";
38 compatible = "fsl,P1020RDB-PC";
39
40 memory {
41 device_type = "memory";
42 };
43
44 lbc: localbus@ffe05000 {
45 reg = <0 0xffe05000 0 0x1000>;
46
47 /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
48 ranges = <0x0 0x0 0x0 0xef000000 0x01000000
49 0x1 0x0 0x0 0xff800000 0x00040000
50 0x2 0x0 0x0 0xffb00000 0x00020000
51 0x3 0x0 0x0 0xffa00000 0x00020000>;
52 };
53
54 soc: soc@ffe00000 {
55 ranges = <0x0 0x0 0xffe00000 0x100000>;
56 };
57
58 pci0: pcie@ffe09000 {
59 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
60 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
61 reg = <0 0xffe09000 0 0x1000>;
62 pcie@0 {
63 ranges = <0x2000000 0x0 0xa0000000
64 0x2000000 0x0 0xa0000000
65 0x0 0x20000000
66
67 0x1000000 0x0 0x0
68 0x1000000 0x0 0x0
69 0x0 0x100000>;
70 };
71 };
72
73 pci1: pcie@ffe0a000 {
74 reg = <0 0xffe0a000 0 0x1000>;
75 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
76 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
77 pcie@0 {
78 ranges = <0x2000000 0x0 0x80000000
79 0x2000000 0x0 0x80000000
80 0x0 0x20000000
81
82 0x1000000 0x0 0x0
83 0x1000000 0x0 0x0
84 0x0 0x100000>;
85 };
86 };
87};
88
89/include/ "p1020rdb-pc.dtsi"
90/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc_36b.dts b/arch/powerpc/boot/dts/p1020rdb-pc_36b.dts
new file mode 100644
index 000000000000..5237da7441bc
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc_36b.dts
@@ -0,0 +1,90 @@
1/*
2 * P1020 RDB-PC Device Tree Source (36-bit address map)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1020si-pre.dtsi"
36/ {
37 model = "fsl,P1020RDB-PC";
38 compatible = "fsl,P1020RDB-PC";
39
40 memory {
41 device_type = "memory";
42 };
43
44 lbc: localbus@fffe05000 {
45 reg = <0xf 0xffe05000 0 0x1000>;
46
47 /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
48 ranges = <0x0 0x0 0xf 0xef000000 0x01000000
49 0x1 0x0 0xf 0xff800000 0x00040000
50 0x2 0x0 0xf 0xffb00000 0x00040000
51 0x3 0x0 0xf 0xffa00000 0x00020000>;
52 };
53
54 soc: soc@fffe00000 {
55 ranges = <0x0 0xf 0xffe00000 0x100000>;
56 };
57
58 pci0: pcie@fffe09000 {
59 reg = <0xf 0xffe09000 0 0x1000>;
60 ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
61 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
62 pcie@0 {
63 ranges = <0x2000000 0x0 0xc0000000
64 0x2000000 0x0 0xc0000000
65 0x0 0x20000000
66
67 0x1000000 0x0 0x0
68 0x1000000 0x0 0x0
69 0x0 0x100000>;
70 };
71 };
72
73 pci1: pcie@fffe0a000 {
74 reg = <0xf 0xffe0a000 0 0x1000>;
75 ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
76 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
77 pcie@0 {
78 ranges = <0x2000000 0x0 0x80000000
79 0x2000000 0x0 0x80000000
80 0x0 0x20000000
81
82 0x1000000 0x0 0x0
83 0x1000000 0x0 0x0
84 0x0 0x100000>;
85 };
86 };
87};
88
89/include/ "p1020rdb-pc.dtsi"
90/include/ "fsl/p1020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc_camp_core0.dts b/arch/powerpc/boot/dts/p1020rdb-pc_camp_core0.dts
new file mode 100644
index 000000000000..f411515937ec
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc_camp_core0.dts
@@ -0,0 +1,64 @@
1/*
2 * P1020 RDB-PC Core0 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts file allows core0 to have memory, l2, i2c, spi, gpio, tdm, dma, usb,
7 * eth1, eth2, sdhc, crypto, global-util, message, pci0, pci1, msi.
8 *
9 * Please note to add "-b 0" for core0's dts compiling.
10 *
11 * Copyright 2012 Freescale Semiconductor Inc.
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19/include/ "p1020rdb-pc_32b.dts"
20
21/ {
22 model = "fsl,P1020RDB-PC";
23 compatible = "fsl,P1020RDB-PC";
24
25 aliases {
26 ethernet1 = &enet1;
27 ethernet2 = &enet2;
28 serial0 = &serial0;
29 pci0 = &pci0;
30 pci1 = &pci1;
31 };
32
33 cpus {
34 PowerPC,P1020@1 {
35 status = "disabled";
36 };
37 };
38
39 memory {
40 device_type = "memory";
41 };
42
43 localbus@ffe05000 {
44 status = "disabled";
45 };
46
47 soc@ffe00000 {
48 serial1: serial@4600 {
49 status = "disabled";
50 };
51
52 enet0: ethernet@b0000 {
53 status = "disabled";
54 };
55
56 mpic: pic@40000 {
57 protected-sources = <
58 42 29 30 34 /* serial1, enet0-queue-group0 */
59 17 18 24 45 /* enet0-queue-group1, crypto */
60 >;
61 pic-no-reset;
62 };
63 };
64};
diff --git a/arch/powerpc/boot/dts/p1020rdb-pc_camp_core1.dts b/arch/powerpc/boot/dts/p1020rdb-pc_camp_core1.dts
new file mode 100644
index 000000000000..a91335ad82c2
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1020rdb-pc_camp_core1.dts
@@ -0,0 +1,142 @@
1/*
2 * P1020 RDB-PC Core1 Device Tree Source in CAMP mode.
3 *
4 * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache
5 * can be shared, all the other devices must be assigned to one core only.
6 * This dts allows core1 to have l2, eth0, crypto.
7 *
8 * Please note to add "-b 1" for core1's dts compiling.
9 *
10 * Copyright 2012 Freescale Semiconductor Inc.
11 *
12 * This program is free software; you can redistribute it and/or modify it
13 * under the terms of the GNU General Public License as published by the
14 * Free Software Foundation; either version 2 of the License, or (at your
15 * option) any later version.
16 */
17
18/include/ "p1020rdb-pc_32b.dts"
19
20/ {
21 model = "fsl,P1020RDB-PC";
22 compatible = "fsl,P1020RDB-PC";
23
24 aliases {
25 ethernet0 = &enet0;
26 serial0 = &serial1;
27 };
28
29 cpus {
30 PowerPC,P1020@0 {
31 status = "disabled";
32 };
33 };
34
35 memory {
36 device_type = "memory";
37 };
38
39 localbus@ffe05000 {
40 status = "disabled";
41 };
42
43 soc@ffe00000 {
44 ecm-law@0 {
45 status = "disabled";
46 };
47
48 ecm@1000 {
49 status = "disabled";
50 };
51
52 memory-controller@2000 {
53 status = "disabled";
54 };
55
56 i2c@3000 {
57 status = "disabled";
58 };
59
60 i2c@3100 {
61 status = "disabled";
62 };
63
64 serial0: serial@4500 {
65 status = "disabled";
66 };
67
68 spi@7000 {
69 status = "disabled";
70 };
71
72 gpio: gpio-controller@f000 {
73 status = "disabled";
74 };
75
76 dma@21300 {
77 status = "disabled";
78 };
79
80 mdio@24000 {
81 status = "disabled";
82 };
83
84 mdio@25000 {
85 status = "disabled";
86 };
87
88 enet1: ethernet@b1000 {
89 status = "disabled";
90 };
91
92 enet2: ethernet@b2000 {
93 status = "disabled";
94 };
95
96 usb@22000 {
97 status = "disabled";
98 };
99
100 sdhci@2e000 {
101 status = "disabled";
102 };
103
104 mpic: pic@40000 {
105 protected-sources = <
106 16 /* ecm, mem, L2, pci0, pci1 */
107 43 42 59 /* i2c, serial0, spi */
108 47 63 62 /* gpio, tdm */
109 20 21 22 23 /* dma */
110 03 02 /* mdio */
111 35 36 40 /* enet1-queue-group0 */
112 51 52 67 /* enet1-queue-group1 */
113 31 32 33 /* enet2-queue-group0 */
114 25 26 27 /* enet2-queue-group1 */
115 28 72 58 /* usb, sdhci, crypto */
116 0xb0 0xb1 0xb2 /* message */
117 0xb3 0xb4 0xb5
118 0xb6 0xb7
119 0xe0 0xe1 0xe2 /* msi */
120 0xe3 0xe4 0xe5
121 0xe6 0xe7 /* sdhci, crypto , pci */
122 >;
123 pic-no-reset;
124 };
125
126 msi@41600 {
127 status = "disabled";
128 };
129
130 global-utilities@e0000 { //global utilities block
131 status = "disabled";
132 };
133 };
134
135 pci0: pcie@ffe09000 {
136 status = "disabled";
137 };
138
139 pci1: pcie@ffe0a000 {
140 status = "disabled";
141 };
142};
diff --git a/arch/powerpc/boot/dts/p1020rdb.dtsi b/arch/powerpc/boot/dts/p1020rdb.dtsi
index b5bd86f4baf2..1fb7e0e0940f 100644
--- a/arch/powerpc/boot/dts/p1020rdb.dtsi
+++ b/arch/powerpc/boot/dts/p1020rdb.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * P1020 RDB Device Tree Source stub (no addresses or top-level ranges) 2 * P1020 RDB Device Tree Source stub (no addresses or top-level ranges)
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011-2012 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -190,17 +190,16 @@
190 190
191 usb@22000 { 191 usb@22000 {
192 phy_type = "ulpi"; 192 phy_type = "ulpi";
193 dr_mode = "host";
193 }; 194 };
194 195
195 /* USB2 is shared with localbus, so it must be disabled 196 /* USB2 is shared with localbus. It is used
196 by default. We can't put 'status = "disabled";' here 197 only in case of SPI and SD boot after
197 since U-Boot doesn't clear the status property when 198 appropriate device-tree fixup done by uboot */
198 it enables USB2. OTOH, U-Boot does create a new node
199 when there isn't any. So, just comment it out.
200 usb@23000 { 199 usb@23000 {
201 phy_type = "ulpi"; 200 phy_type = "ulpi";
201 dr_mode = "host";
202 }; 202 };
203 */
204 203
205 mdio@24000 { 204 mdio@24000 {
206 phy0: ethernet-phy@0 { 205 phy0: ethernet-phy@0 {
diff --git a/arch/powerpc/boot/dts/p1021mds.dts b/arch/powerpc/boot/dts/p1021mds.dts
index d9540791e434..97116f198a37 100644
--- a/arch/powerpc/boot/dts/p1021mds.dts
+++ b/arch/powerpc/boot/dts/p1021mds.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P1021 MDS Device Tree Source 2 * P1021 MDS Device Tree Source
3 * 3 *
4 * Copyright 2010 Freescale Semiconductor Inc. 4 * Copyright 2010,2012 Freescale Semiconductor Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -151,6 +151,7 @@
151 151
152 usb@22000 { 152 usb@22000 {
153 phy_type = "ulpi"; 153 phy_type = "ulpi";
154 dr_mode = "host";
154 }; 155 };
155 156
156 mdio@24000 { 157 mdio@24000 {
diff --git a/arch/powerpc/boot/dts/p1021rdb.dts b/arch/powerpc/boot/dts/p1021rdb.dts
new file mode 100644
index 000000000000..90b6b4caa273
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1021rdb.dts
@@ -0,0 +1,96 @@
1/*
2 * P1021 RDB Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1021si-pre.dtsi"
36/ {
37 model = "fsl,P1021RDB";
38 compatible = "fsl,P1021RDB-PC";
39
40 memory {
41 device_type = "memory";
42 };
43
44 lbc: localbus@ffe05000 {
45 reg = <0 0xffe05000 0 0x1000>;
46
47 /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
48 ranges = <0x0 0x0 0x0 0xef000000 0x01000000
49 0x1 0x0 0x0 0xff800000 0x00040000
50 0x2 0x0 0x0 0xffb00000 0x00020000>;
51 };
52
53 soc: soc@ffe00000 {
54 ranges = <0x0 0x0 0xffe00000 0x100000>;
55 };
56
57 pci0: pcie@ffe09000 {
58 ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000
59 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
60 reg = <0 0xffe09000 0 0x1000>;
61 pcie@0 {
62 ranges = <0x2000000 0x0 0xa0000000
63 0x2000000 0x0 0xa0000000
64 0x0 0x20000000
65
66 0x1000000 0x0 0x0
67 0x1000000 0x0 0x0
68 0x0 0x100000>;
69 };
70 };
71
72 pci1: pcie@ffe0a000 {
73 reg = <0 0xffe0a000 0 0x1000>;
74 ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000
75 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
76 pcie@0 {
77 ranges = <0x2000000 0x0 0x80000000
78 0x2000000 0x0 0x80000000
79 0x0 0x20000000
80
81 0x1000000 0x0 0x0
82 0x1000000 0x0 0x0
83 0x0 0x100000>;
84 };
85 };
86
87 qe: qe@ffe80000 {
88 ranges = <0x0 0x0 0xffe80000 0x40000>;
89 reg = <0 0xffe80000 0 0x480>;
90 brg-frequency = <0>;
91 bus-frequency = <0>;
92 };
93};
94
95/include/ "p1021rdb.dtsi"
96/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1021rdb.dtsi b/arch/powerpc/boot/dts/p1021rdb.dtsi
new file mode 100644
index 000000000000..b973461ab751
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1021rdb.dtsi
@@ -0,0 +1,236 @@
1/*
2 * P1021 RDB Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35&lbc {
36 nor@0,0 {
37 #address-cells = <1>;
38 #size-cells = <1>;
39 compatible = "cfi-flash";
40 reg = <0x0 0x0 0x1000000>;
41 bank-width = <2>;
42 device-width = <1>;
43
44 partition@0 {
45 /* This location must not be altered */
46 /* 256KB for Vitesse 7385 Switch firmware */
47 reg = <0x0 0x00040000>;
48 label = "NOR Vitesse-7385 Firmware";
49 read-only;
50 };
51
52 partition@40000 {
53 /* 256KB for DTB Image */
54 reg = <0x00040000 0x00040000>;
55 label = "NOR DTB Image";
56 };
57
58 partition@80000 {
59 /* 3.5 MB for Linux Kernel Image */
60 reg = <0x00080000 0x00380000>;
61 label = "NOR Linux Kernel Image";
62 };
63
64 partition@400000 {
65 /* 11MB for JFFS2 based Root file System */
66 reg = <0x00400000 0x00b00000>;
67 label = "NOR JFFS2 Root File System";
68 };
69
70 partition@f00000 {
71 /* This location must not be altered */
72 /* 512KB for u-boot Bootloader Image */
73 /* 512KB for u-boot Environment Variables */
74 reg = <0x00f00000 0x00100000>;
75 label = "NOR U-Boot Image";
76 };
77 };
78
79 nand@1,0 {
80 #address-cells = <1>;
81 #size-cells = <1>;
82 compatible = "fsl,p1021-fcm-nand",
83 "fsl,elbc-fcm-nand";
84 reg = <0x1 0x0 0x40000>;
85
86 partition@0 {
87 /* This location must not be altered */
88 /* 1MB for u-boot Bootloader Image */
89 reg = <0x0 0x00100000>;
90 label = "NAND U-Boot Image";
91 read-only;
92 };
93
94 partition@100000 {
95 /* 1MB for DTB Image */
96 reg = <0x00100000 0x00100000>;
97 label = "NAND DTB Image";
98 };
99
100 partition@200000 {
101 /* 4MB for Linux Kernel Image */
102 reg = <0x00200000 0x00400000>;
103 label = "NAND Linux Kernel Image";
104 };
105
106 partition@600000 {
107 /* 4MB for Compressed Root file System Image */
108 reg = <0x00600000 0x00400000>;
109 label = "NAND Compressed RFS Image";
110 };
111
112 partition@a00000 {
113 /* 7MB for JFFS2 based Root file System */
114 reg = <0x00a00000 0x00700000>;
115 label = "NAND JFFS2 Root File System";
116 };
117
118 partition@1100000 {
119 /* 15MB for User Writable Area */
120 reg = <0x01100000 0x00f00000>;
121 label = "NAND Writable User area";
122 };
123 };
124
125 L2switch@2,0 {
126 #address-cells = <1>;
127 #size-cells = <1>;
128 compatible = "vitesse-7385";
129 reg = <0x2 0x0 0x20000>;
130 };
131};
132
133&soc {
134 i2c@3000 {
135 rtc@68 {
136 compatible = "pericom,pt7c4338";
137 reg = <0x68>;
138 };
139 };
140
141 spi@7000 {
142 flash@0 {
143 #address-cells = <1>;
144 #size-cells = <1>;
145 compatible = "spansion,s25sl12801";
146 reg = <0>;
147 spi-max-frequency = <40000000>; /* input clock */
148
149 partition@u-boot {
150 /* 512KB for u-boot Bootloader Image */
151 reg = <0x0 0x00080000>;
152 label = "SPI Flash U-Boot Image";
153 read-only;
154 };
155
156 partition@dtb {
157 /* 512KB for DTB Image */
158 reg = <0x00080000 0x00080000>;
159 label = "SPI Flash DTB Image";
160 };
161
162 partition@kernel {
163 /* 4MB for Linux Kernel Image */
164 reg = <0x00100000 0x00400000>;
165 label = "SPI Flash Linux Kernel Image";
166 };
167
168 partition@fs {
169 /* 4MB for Compressed RFS Image */
170 reg = <0x00500000 0x00400000>;
171 label = "SPI Flash Compressed RFSImage";
172 };
173
174 partition@jffs-fs {
175 /* 7MB for JFFS2 based RFS */
176 reg = <0x00900000 0x00700000>;
177 label = "SPI Flash JFFS2 RFS";
178 };
179 };
180 };
181
182 usb@22000 {
183 phy_type = "ulpi";
184 };
185
186 mdio@24000 {
187 phy0: ethernet-phy@0 {
188 interrupt-parent = <&mpic>;
189 interrupts = <3 1 0 0>;
190 reg = <0x0>;
191 };
192
193 phy1: ethernet-phy@1 {
194 interrupt-parent = <&mpic>;
195 interrupts = <2 1 0 0>;
196 reg = <0x1>;
197 };
198
199 tbi0: tbi-phy@11 {
200 reg = <0x11>;
201 device_type = "tbi-phy";
202 };
203 };
204
205 mdio@25000 {
206 tbi1: tbi-phy@11 {
207 reg = <0x11>;
208 device_type = "tbi-phy";
209 };
210 };
211
212 mdio@26000 {
213 tbi2: tbi-phy@11 {
214 reg = <0x11>;
215 device_type = "tbi-phy";
216 };
217 };
218
219 enet0: ethernet@b0000 {
220 fixed-link = <1 1 1000 0 0>;
221 phy-connection-type = "rgmii-id";
222
223 };
224
225 enet1: ethernet@b1000 {
226 phy-handle = <&phy0>;
227 tbi-handle = <&tbi1>;
228 phy-connection-type = "sgmii";
229 };
230
231 enet2: ethernet@b2000 {
232 phy-handle = <&phy1>;
233 tbi-handle = <&tbi2>;
234 phy-connection-type = "rgmii-id";
235 };
236};
diff --git a/arch/powerpc/boot/dts/p1021rdb_36b.dts b/arch/powerpc/boot/dts/p1021rdb_36b.dts
new file mode 100644
index 000000000000..ea6d8b5fa10b
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1021rdb_36b.dts
@@ -0,0 +1,96 @@
1/*
2 * P1021 RDB Device Tree Source (36-bit address map)
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1021si-pre.dtsi"
36/ {
37 model = "fsl,P1021RDB";
38 compatible = "fsl,P1021RDB-PC";
39
40 memory {
41 device_type = "memory";
42 };
43
44 lbc: localbus@fffe05000 {
45 reg = <0xf 0xffe05000 0 0x1000>;
46
47 /* NOR, NAND Flashes and Vitesse 5 port L2 switch */
48 ranges = <0x0 0x0 0xf 0xef000000 0x01000000
49 0x1 0x0 0xf 0xff800000 0x00040000
50 0x2 0x0 0xf 0xffb00000 0x00020000>;
51 };
52
53 soc: soc@fffe00000 {
54 ranges = <0x0 0xf 0xffe00000 0x100000>;
55 };
56
57 pci0: pcie@fffe09000 {
58 ranges = <0x2000000 0x0 0xc0000000 0xc 0x20000000 0x0 0x20000000
59 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
60 reg = <0xf 0xffe09000 0 0x1000>;
61 pcie@0 {
62 ranges = <0x2000000 0x0 0xa0000000
63 0x2000000 0x0 0xa0000000
64 0x0 0x20000000
65
66 0x1000000 0x0 0x0
67 0x1000000 0x0 0x0
68 0x0 0x100000>;
69 };
70 };
71
72 pci1: pcie@fffe0a000 {
73 reg = <0xf 0xffe0a000 0 0x1000>;
74 ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000
75 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
76 pcie@0 {
77 ranges = <0x2000000 0x0 0xc0000000
78 0x2000000 0x0 0xc0000000
79 0x0 0x20000000
80
81 0x1000000 0x0 0x0
82 0x1000000 0x0 0x0
83 0x0 0x100000>;
84 };
85 };
86
87 qe: qe@fffe80000 {
88 ranges = <0x0 0xf 0xffe80000 0x40000>;
89 reg = <0xf 0xffe80000 0 0x480>;
90 brg-frequency = <0>;
91 bus-frequency = <0>;
92 };
93};
94
95/include/ "p1021rdb.dtsi"
96/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts
deleted file mode 100644
index ef95717db4bc..000000000000
--- a/arch/powerpc/boot/dts/p1022ds.dts
+++ /dev/null
@@ -1,274 +0,0 @@
1/*
2 * P1022 DS 36Bit Physical Address Map Device Tree Source
3 *
4 * Copyright 2010 Freescale Semiconductor, Inc.
5 *
6 * This file is licensed under the terms of the GNU General Public License
7 * version 2. This program is licensed "as is" without any warranty of any
8 * kind, whether express or implied.
9 */
10
11/include/ "fsl/p1022si-pre.dtsi"
12/ {
13 model = "fsl,P1022DS";
14 compatible = "fsl,P1022DS";
15
16 memory {
17 device_type = "memory";
18 };
19
20 lbc: localbus@fffe05000 {
21 reg = <0xf 0xffe05000 0 0x1000>;
22 ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
23 0x1 0x0 0xf 0xe0000000 0x08000000
24 0x2 0x0 0xf 0xff800000 0x00040000
25 0x3 0x0 0xf 0xffdf0000 0x00008000>;
26
27 /*
28 * This node is used to access the pixis via "indirect" mode,
29 * which is done by writing the pixis register index to chip
30 * select 0 and the value to/from chip select 1. Indirect
31 * mode is the only way to access the pixis when DIU video
32 * is enabled. Note that this assumes that the first column
33 * of the 'ranges' property above is the chip select number.
34 */
35 board-control@0,0 {
36 compatible = "fsl,p1022ds-indirect-pixis";
37 reg = <0x0 0x0 1 /* CS0 */
38 0x1 0x0 1>; /* CS1 */
39 };
40
41 nor@0,0 {
42 #address-cells = <1>;
43 #size-cells = <1>;
44 compatible = "cfi-flash";
45 reg = <0x0 0x0 0x8000000>;
46 bank-width = <2>;
47 device-width = <1>;
48
49 partition@0 {
50 reg = <0x0 0x03000000>;
51 label = "ramdisk-nor";
52 read-only;
53 };
54
55 partition@3000000 {
56 reg = <0x03000000 0x00e00000>;
57 label = "diagnostic-nor";
58 read-only;
59 };
60
61 partition@3e00000 {
62 reg = <0x03e00000 0x00200000>;
63 label = "dink-nor";
64 read-only;
65 };
66
67 partition@4000000 {
68 reg = <0x04000000 0x00400000>;
69 label = "kernel-nor";
70 read-only;
71 };
72
73 partition@4400000 {
74 reg = <0x04400000 0x03b00000>;
75 label = "jffs2-nor";
76 };
77
78 partition@7f00000 {
79 reg = <0x07f00000 0x00080000>;
80 label = "dtb-nor";
81 read-only;
82 };
83
84 partition@7f80000 {
85 reg = <0x07f80000 0x00080000>;
86 label = "u-boot-nor";
87 read-only;
88 };
89 };
90
91 nand@2,0 {
92 #address-cells = <1>;
93 #size-cells = <1>;
94 compatible = "fsl,elbc-fcm-nand";
95 reg = <0x2 0x0 0x40000>;
96
97 partition@0 {
98 reg = <0x0 0x02000000>;
99 label = "u-boot-nand";
100 read-only;
101 };
102
103 partition@2000000 {
104 reg = <0x02000000 0x10000000>;
105 label = "jffs2-nand";
106 };
107
108 partition@12000000 {
109 reg = <0x12000000 0x10000000>;
110 label = "ramdisk-nand";
111 read-only;
112 };
113
114 partition@22000000 {
115 reg = <0x22000000 0x04000000>;
116 label = "kernel-nand";
117 };
118
119 partition@26000000 {
120 reg = <0x26000000 0x01000000>;
121 label = "dtb-nand";
122 read-only;
123 };
124
125 partition@27000000 {
126 reg = <0x27000000 0x19000000>;
127 label = "reserved-nand";
128 };
129 };
130
131 board-control@3,0 {
132 compatible = "fsl,p1022ds-fpga", "fsl,fpga-ngpixis";
133 reg = <3 0 0x30>;
134 interrupt-parent = <&mpic>;
135 /*
136 * IRQ8 is generated if the "EVENT" switch is pressed
137 * and PX_CTL[EVESEL] is set to 00.
138 */
139 interrupts = <8 8 0 0>;
140 };
141 };
142
143 soc: soc@fffe00000 {
144 ranges = <0x0 0xf 0xffe00000 0x100000>;
145
146 i2c@3100 {
147 wm8776:codec@1a {
148 compatible = "wlf,wm8776";
149 reg = <0x1a>;
150 /*
151 * clock-frequency will be set by U-Boot if
152 * the clock is enabled.
153 */
154 };
155 };
156
157 spi@7000 {
158 flash@0 {
159 #address-cells = <1>;
160 #size-cells = <1>;
161 compatible = "spansion,s25sl12801";
162 reg = <0>;
163 spi-max-frequency = <40000000>; /* input clock */
164
165 partition@0 {
166 label = "u-boot-spi";
167 reg = <0x00000000 0x00100000>;
168 read-only;
169 };
170 partition@100000 {
171 label = "kernel-spi";
172 reg = <0x00100000 0x00500000>;
173 read-only;
174 };
175 partition@600000 {
176 label = "dtb-spi";
177 reg = <0x00600000 0x00100000>;
178 read-only;
179 };
180 partition@700000 {
181 label = "file system-spi";
182 reg = <0x00700000 0x00900000>;
183 };
184 };
185 };
186
187 ssi@15000 {
188 fsl,mode = "i2s-slave";
189 codec-handle = <&wm8776>;
190 fsl,ssi-asynchronous;
191 };
192
193 usb@22000 {
194 phy_type = "ulpi";
195 };
196
197 usb@23000 {
198 status = "disabled";
199 };
200
201 mdio@24000 {
202 phy0: ethernet-phy@0 {
203 interrupts = <3 1 0 0>;
204 reg = <0x1>;
205 };
206 phy1: ethernet-phy@1 {
207 interrupts = <9 1 0 0>;
208 reg = <0x2>;
209 };
210 tbi-phy@2 {
211 device_type = "tbi-phy";
212 reg = <0x2>;
213 };
214 };
215
216 ethernet@b0000 {
217 phy-handle = <&phy0>;
218 phy-connection-type = "rgmii-id";
219 };
220
221 ethernet@b1000 {
222 phy-handle = <&phy1>;
223 phy-connection-type = "rgmii-id";
224 };
225 };
226
227 pci0: pcie@fffe09000 {
228 reg = <0xf 0xffe09000 0 0x1000>;
229 ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
230 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
231 pcie@0 {
232 ranges = <0x2000000 0x0 0xe0000000
233 0x2000000 0x0 0xe0000000
234 0x0 0x20000000
235
236 0x1000000 0x0 0x0
237 0x1000000 0x0 0x0
238 0x0 0x100000>;
239 };
240 };
241
242 pci1: pcie@fffe0a000 {
243 reg = <0xf 0xffe0a000 0 0x1000>;
244 ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000
245 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>;
246 pcie@0 {
247 reg = <0x0 0x0 0x0 0x0 0x0>;
248 ranges = <0x2000000 0x0 0xe0000000
249 0x2000000 0x0 0xe0000000
250 0x0 0x20000000
251
252 0x1000000 0x0 0x0
253 0x1000000 0x0 0x0
254 0x0 0x100000>;
255 };
256 };
257
258 pci2: pcie@fffe0b000 {
259 reg = <0xf 0xffe0b000 0 0x1000>;
260 ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
261 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
262 pcie@0 {
263 ranges = <0x2000000 0x0 0xe0000000
264 0x2000000 0x0 0xe0000000
265 0x0 0x20000000
266
267 0x1000000 0x0 0x0
268 0x1000000 0x0 0x0
269 0x0 0x100000>;
270 };
271 };
272};
273
274/include/ "fsl/p1022si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
new file mode 100644
index 000000000000..7cdb505036bb
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1022ds.dtsi
@@ -0,0 +1,234 @@
1/*
2 * P1022 DS Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35&board_lbc {
36 /*
37 * This node is used to access the pixis via "indirect" mode,
38 * which is done by writing the pixis register index to chip
39 * select 0 and the value to/from chip select 1. Indirect
40 * mode is the only way to access the pixis when DIU video
41 * is enabled. Note that this assumes that the first column
42 * of the 'ranges' property above is the chip select number.
43 */
44 board-control@0,0 {
45 compatible = "fsl,p1022ds-indirect-pixis";
46 reg = <0x0 0x0 1 /* CS0 */
47 0x1 0x0 1>; /* CS1 */
48 interrupt-parent = <&mpic>;
49 interrupts = <8 0 0 0>;
50 };
51
52 nor@0,0 {
53 #address-cells = <1>;
54 #size-cells = <1>;
55 compatible = "cfi-flash";
56 reg = <0x0 0x0 0x8000000>;
57 bank-width = <2>;
58 device-width = <1>;
59
60 partition@0 {
61 reg = <0x0 0x03000000>;
62 label = "ramdisk-nor";
63 read-only;
64 };
65
66 partition@3000000 {
67 reg = <0x03000000 0x00e00000>;
68 label = "diagnostic-nor";
69 read-only;
70 };
71
72 partition@3e00000 {
73 reg = <0x03e00000 0x00200000>;
74 label = "dink-nor";
75 read-only;
76 };
77
78 partition@4000000 {
79 reg = <0x04000000 0x00400000>;
80 label = "kernel-nor";
81 read-only;
82 };
83
84 partition@4400000 {
85 reg = <0x04400000 0x03b00000>;
86 label = "jffs2-nor";
87 };
88
89 partition@7f00000 {
90 reg = <0x07f00000 0x00080000>;
91 label = "dtb-nor";
92 read-only;
93 };
94
95 partition@7f80000 {
96 reg = <0x07f80000 0x00080000>;
97 label = "u-boot-nor";
98 read-only;
99 };
100 };
101
102 nand@2,0 {
103 #address-cells = <1>;
104 #size-cells = <1>;
105 compatible = "fsl,elbc-fcm-nand";
106 reg = <0x2 0x0 0x40000>;
107
108 partition@0 {
109 reg = <0x0 0x02000000>;
110 label = "u-boot-nand";
111 read-only;
112 };
113
114 partition@2000000 {
115 reg = <0x02000000 0x10000000>;
116 label = "jffs2-nand";
117 };
118
119 partition@12000000 {
120 reg = <0x12000000 0x10000000>;
121 label = "ramdisk-nand";
122 read-only;
123 };
124
125 partition@22000000 {
126 reg = <0x22000000 0x04000000>;
127 label = "kernel-nand";
128 };
129
130 partition@26000000 {
131 reg = <0x26000000 0x01000000>;
132 label = "dtb-nand";
133 read-only;
134 };
135
136 partition@27000000 {
137 reg = <0x27000000 0x19000000>;
138 label = "reserved-nand";
139 };
140 };
141
142 board-control@3,0 {
143 compatible = "fsl,p1022ds-fpga", "fsl,fpga-ngpixis";
144 reg = <3 0 0x30>;
145 interrupt-parent = <&mpic>;
146 /*
147 * IRQ8 is generated if the "EVENT" switch is pressed
148 * and PX_CTL[EVESEL] is set to 00.
149 */
150 interrupts = <8 0 0 0>;
151 };
152};
153
154&board_soc {
155 i2c@3100 {
156 wm8776:codec@1a {
157 compatible = "wlf,wm8776";
158 reg = <0x1a>;
159 /*
160 * clock-frequency will be set by U-Boot if
161 * the clock is enabled.
162 */
163 };
164 };
165
166 spi@7000 {
167 flash@0 {
168 #address-cells = <1>;
169 #size-cells = <1>;
170 compatible = "spansion,s25sl12801";
171 reg = <0>;
172 spi-max-frequency = <40000000>; /* input clock */
173
174 partition@0 {
175 label = "u-boot-spi";
176 reg = <0x00000000 0x00100000>;
177 read-only;
178 };
179 partition@100000 {
180 label = "kernel-spi";
181 reg = <0x00100000 0x00500000>;
182 read-only;
183 };
184 partition@600000 {
185 label = "dtb-spi";
186 reg = <0x00600000 0x00100000>;
187 read-only;
188 };
189 partition@700000 {
190 label = "file system-spi";
191 reg = <0x00700000 0x00900000>;
192 };
193 };
194 };
195
196 ssi@15000 {
197 fsl,mode = "i2s-slave";
198 codec-handle = <&wm8776>;
199 fsl,ssi-asynchronous;
200 };
201
202 usb@22000 {
203 phy_type = "ulpi";
204 };
205
206 usb@23000 {
207 status = "disabled";
208 };
209
210 mdio@24000 {
211 phy0: ethernet-phy@0 {
212 interrupts = <3 1 0 0>;
213 reg = <0x1>;
214 };
215 phy1: ethernet-phy@1 {
216 interrupts = <9 1 0 0>;
217 reg = <0x2>;
218 };
219 tbi-phy@2 {
220 device_type = "tbi-phy";
221 reg = <0x2>;
222 };
223 };
224
225 ethernet@b0000 {
226 phy-handle = <&phy0>;
227 phy-connection-type = "rgmii-id";
228 };
229
230 ethernet@b1000 {
231 phy-handle = <&phy1>;
232 phy-connection-type = "rgmii-id";
233 };
234};
diff --git a/arch/powerpc/boot/dts/p1022ds_32b.dts b/arch/powerpc/boot/dts/p1022ds_32b.dts
new file mode 100644
index 000000000000..d96cae00a9e3
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1022ds_32b.dts
@@ -0,0 +1,103 @@
1/*
2 * P1022 DS 32-bit Physical Address Map Device Tree Source
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1022si-pre.dtsi"
36/ {
37 model = "fsl,P1022DS";
38 compatible = "fsl,P1022DS";
39
40 memory {
41 device_type = "memory";
42 };
43
44 board_lbc: lbc: localbus@ffe05000 {
45 ranges = <0x0 0x0 0x0 0xe8000000 0x08000000
46 0x1 0x0 0x0 0xe0000000 0x08000000
47 0x2 0x0 0x0 0xff800000 0x00040000
48 0x3 0x0 0x0 0xffdf0000 0x00008000>;
49 reg = <0x0 0xffe05000 0 0x1000>;
50 };
51
52 board_soc: soc: soc@ffe00000 {
53 ranges = <0x0 0x0 0xffe00000 0x100000>;
54 };
55
56 pci0: pcie@ffe09000 {
57 ranges = <0x2000000 0x0 0xe0000000 0 0xa0000000 0x0 0x20000000
58 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
59 reg = <0x0 0xffe09000 0 0x1000>;
60 pcie@0 {
61 ranges = <0x2000000 0x0 0xe0000000
62 0x2000000 0x0 0xe0000000
63 0x0 0x20000000
64
65 0x1000000 0x0 0x0
66 0x1000000 0x0 0x0
67 0x0 0x100000>;
68 };
69 };
70
71 pci1: pcie@ffe0a000 {
72 ranges = <0x2000000 0x0 0xe0000000 0 0xc0000000 0x0 0x20000000
73 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>;
74 reg = <0 0xffe0a000 0 0x1000>;
75 pcie@0 {
76 ranges = <0x2000000 0x0 0xe0000000
77 0x2000000 0x0 0xe0000000
78 0x0 0x20000000
79
80 0x1000000 0x0 0x0
81 0x1000000 0x0 0x0
82 0x0 0x100000>;
83 };
84 };
85
86 pci2: pcie@ffe0b000 {
87 ranges = <0x2000000 0x0 0xe0000000 0 0x80000000 0x0 0x20000000
88 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
89 reg = <0 0xffe0b000 0 0x1000>;
90 pcie@0 {
91 ranges = <0x2000000 0x0 0xe0000000
92 0x2000000 0x0 0xe0000000
93 0x0 0x20000000
94
95 0x1000000 0x0 0x0
96 0x1000000 0x0 0x0
97 0x0 0x100000>;
98 };
99 };
100};
101
102/include/ "fsl/p1022si-post.dtsi"
103/include/ "p1022ds.dtsi"
diff --git a/arch/powerpc/boot/dts/p1022ds_36b.dts b/arch/powerpc/boot/dts/p1022ds_36b.dts
new file mode 100644
index 000000000000..f7aacce40bf6
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1022ds_36b.dts
@@ -0,0 +1,103 @@
1/*
2 * P1022 DS 36-bit Physical Address Map Device Tree Source
3 *
4 * Copyright 2012 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor "AS IS" AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1022si-pre.dtsi"
36/ {
37 model = "fsl,P1022DS";
38 compatible = "fsl,P1022DS";
39
40 memory {
41 device_type = "memory";
42 };
43
44 board_lbc: lbc: localbus@fffe05000 {
45 ranges = <0x0 0x0 0xf 0xe8000000 0x08000000
46 0x1 0x0 0xf 0xe0000000 0x08000000
47 0x2 0x0 0xf 0xff800000 0x00040000
48 0x3 0x0 0xf 0xffdf0000 0x00008000>;
49 reg = <0xf 0xffe05000 0 0x1000>;
50 };
51
52 board_soc: soc: soc@fffe00000 {
53 ranges = <0x0 0xf 0xffe00000 0x100000>;
54 };
55
56 pci0: pcie@fffe09000 {
57 ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
58 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
59 reg = <0xf 0xffe09000 0 0x1000>;
60 pcie@0 {
61 ranges = <0x2000000 0x0 0xe0000000
62 0x2000000 0x0 0xe0000000
63 0x0 0x20000000
64
65 0x1000000 0x0 0x0
66 0x1000000 0x0 0x0
67 0x0 0x100000>;
68 };
69 };
70
71 pci1: pcie@fffe0a000 {
72 ranges = <0x2000000 0x0 0xe0000000 0xc 0x40000000 0x0 0x20000000
73 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>;
74 reg = <0xf 0xffe0a000 0 0x1000>;
75 pcie@0 {
76 ranges = <0x2000000 0x0 0xe0000000
77 0x2000000 0x0 0xe0000000
78 0x0 0x20000000
79
80 0x1000000 0x0 0x0
81 0x1000000 0x0 0x0
82 0x0 0x100000>;
83 };
84 };
85
86 pci2: pcie@fffe0b000 {
87 ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
88 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
89 reg = <0xf 0xffe0b000 0 0x1000>;
90 pcie@0 {
91 ranges = <0x2000000 0x0 0xe0000000
92 0x2000000 0x0 0xe0000000
93 0x0 0x20000000
94
95 0x1000000 0x0 0x0
96 0x1000000 0x0 0x0
97 0x0 0x100000>;
98 };
99 };
100};
101
102/include/ "fsl/p1022si-post.dtsi"
103/include/ "p1022ds.dtsi"
diff --git a/arch/powerpc/boot/dts/p1025rdb.dtsi b/arch/powerpc/boot/dts/p1025rdb.dtsi
new file mode 100644
index 000000000000..cf3676fc714b
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025rdb.dtsi
@@ -0,0 +1,286 @@
1/*
2 * P1025 RDB Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35&lbc {
36 nor@0,0 {
37 #address-cells = <1>;
38 #size-cells = <1>;
39 compatible = "cfi-flash";
40 reg = <0x0 0x0 0x1000000>;
41 bank-width = <2>;
42 device-width = <1>;
43
44 partition@0 {
45 /* This location must not be altered */
46 /* 256KB for Vitesse 7385 Switch firmware */
47 reg = <0x0 0x00040000>;
48 label = "NOR Vitesse-7385 Firmware";
49 read-only;
50 };
51
52 partition@40000 {
53 /* 256KB for DTB Image */
54 reg = <0x00040000 0x00040000>;
55 label = "NOR DTB Image";
56 };
57
58 partition@80000 {
59 /* 3.5 MB for Linux Kernel Image */
60 reg = <0x00080000 0x00380000>;
61 label = "NOR Linux Kernel Image";
62 };
63
64 partition@400000 {
65 /* 11MB for JFFS2 based Root file System */
66 reg = <0x00400000 0x00b00000>;
67 label = "NOR JFFS2 Root File System";
68 };
69
70 partition@f00000 {
71 /* This location must not be altered */
72 /* 512KB for u-boot Bootloader Image */
73 /* 512KB for u-boot Environment Variables */
74 reg = <0x00f00000 0x00100000>;
75 label = "NOR U-Boot Image";
76 read-only;
77 };
78 };
79
80 nand@1,0 {
81 #address-cells = <1>;
82 #size-cells = <1>;
83 compatible = "fsl,p1025-fcm-nand",
84 "fsl,elbc-fcm-nand";
85 reg = <0x1 0x0 0x40000>;
86
87 partition@0 {
88 /* This location must not be altered */
89 /* 1MB for u-boot Bootloader Image */
90 reg = <0x0 0x00100000>;
91 label = "NAND U-Boot Image";
92 read-only;
93 };
94
95 partition@100000 {
96 /* 1MB for DTB Image */
97 reg = <0x00100000 0x00100000>;
98 label = "NAND DTB Image";
99 };
100
101 partition@200000 {
102 /* 4MB for Linux Kernel Image */
103 reg = <0x00200000 0x00400000>;
104 label = "NAND Linux Kernel Image";
105 };
106
107 partition@600000 {
108 /* 4MB for Compressed Root file System Image */
109 reg = <0x00600000 0x00400000>;
110 label = "NAND Compressed RFS Image";
111 };
112
113 partition@a00000 {
114 /* 7MB for JFFS2 based Root file System */
115 reg = <0x00a00000 0x00700000>;
116 label = "NAND JFFS2 Root File System";
117 };
118
119 partition@1100000 {
120 /* 15MB for JFFS2 based Root file System */
121 reg = <0x01100000 0x00f00000>;
122 label = "NAND Writable User area";
123 };
124 };
125
126};
127
128&soc {
129 i2c@3000 {
130 rtc@68 {
131 compatible = "dallas,ds1339";
132 reg = <0x68>;
133 };
134 };
135
136 spi@7000 {
137 flash@0 {
138 #address-cells = <1>;
139 #size-cells = <1>;
140 compatible = "spansion,s25sl12801";
141 reg = <0>;
142 spi-max-frequency = <40000000>; /* input clock */
143
144 partition@u-boot {
145 /* 512KB for u-boot Bootloader Image */
146 reg = <0x0 0x00080000>;
147 label = "u-boot";
148 read-only;
149 };
150
151 partition@dtb {
152 /* 512KB for DTB Image */
153 reg = <0x00080000 0x00080000>;
154 label = "dtb";
155 };
156
157 partition@kernel {
158 /* 4MB for Linux Kernel Image */
159 reg = <0x00100000 0x00400000>;
160 label = "kernel";
161 };
162
163 partition@fs {
164 /* 4MB for Compressed RFS Image */
165 reg = <0x00500000 0x00400000>;
166 label = "file system";
167 };
168
169 partition@jffs-fs {
170 /* 7MB for JFFS2 based RFS */
171 reg = <0x00900000 0x00700000>;
172 label = "file system jffs2";
173 };
174 };
175 };
176
177 usb@22000 {
178 phy_type = "ulpi";
179 };
180
181 /* USB2 is shared with localbus, so it must be disabled
182 by default. We can't put 'status = "disabled";' here
183 since U-Boot doesn't clear the status property when
184 it enables USB2. OTOH, U-Boot does create a new node
185 when there isn't any. So, just comment it out.
186 usb@23000 {
187 phy_type = "ulpi";
188 };
189 */
190
191 mdio@24000 {
192 phy0: ethernet-phy@0 {
193 interrupt-parent = <&mpic>;
194 interrupts = <3 1>;
195 reg = <0x0>;
196 };
197
198 phy1: ethernet-phy@1 {
199 interrupt-parent = <&mpic>;
200 interrupts = <2 1>;
201 reg = <0x1>;
202 };
203
204 tbi0: tbi-phy@11 {
205 reg = <0x11>;
206 device_type = "tbi-phy";
207 };
208 };
209
210 mdio@25000 {
211 tbi1: tbi-phy@11 {
212 reg = <0x11>;
213 device_type = "tbi-phy";
214 };
215 };
216
217 mdio@26000 {
218 tbi2: tbi-phy@11 {
219 reg = <0x11>;
220 device_type = "tbi-phy";
221 };
222 };
223
224 enet0: ethernet@b0000 {
225 fixed-link = <1 1 1000 0 0>;
226 phy-connection-type = "rgmii-id";
227
228 };
229
230 enet1: ethernet@b1000 {
231 phy-handle = <&phy0>;
232 tbi-handle = <&tbi1>;
233 phy-connection-type = "sgmii";
234 };
235
236 enet2: ethernet@b2000 {
237 phy-handle = <&phy1>;
238 phy-connection-type = "rgmii-id";
239 };
240
241 par_io@e0100 {
242 #address-cells = <1>;
243 #size-cells = <1>;
244 reg = <0xe0100 0x60>;
245 ranges = <0x0 0xe0100 0x60>;
246 device_type = "par_io";
247 num-ports = <3>;
248 pio1: ucc_pin@01 {
249 pio-map = <
250 /* port pin dir open_drain assignment has_irq */
251 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
252 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
253 0x0 0x17 0x2 0x0 0x2 0x0 /* CLK12 */
254 0x0 0x18 0x2 0x0 0x1 0x0 /* CLK9 */
255 0x0 0x7 0x1 0x0 0x2 0x0 /* ENET1_TXD0_SER1_TXD0 */
256 0x0 0x9 0x1 0x0 0x2 0x0 /* ENET1_TXD1_SER1_TXD1 */
257 0x0 0xb 0x1 0x0 0x2 0x0 /* ENET1_TXD2_SER1_TXD2 */
258 0x0 0xc 0x1 0x0 0x2 0x0 /* ENET1_TXD3_SER1_TXD3 */
259 0x0 0x6 0x2 0x0 0x2 0x0 /* ENET1_RXD0_SER1_RXD0 */
260 0x0 0xa 0x2 0x0 0x2 0x0 /* ENET1_RXD1_SER1_RXD1 */
261 0x0 0xe 0x2 0x0 0x2 0x0 /* ENET1_RXD2_SER1_RXD2 */
262 0x0 0xf 0x2 0x0 0x2 0x0 /* ENET1_RXD3_SER1_RXD3 */
263 0x0 0x5 0x1 0x0 0x2 0x0 /* ENET1_TX_EN_SER1_RTS_B */
264 0x0 0xd 0x1 0x0 0x2 0x0 /* ENET1_TX_ER */
265 0x0 0x4 0x2 0x0 0x2 0x0 /* ENET1_RX_DV_SER1_CTS_B */
266 0x0 0x8 0x2 0x0 0x2 0x0 /* ENET1_RX_ER_SER1_CD_B */
267 0x0 0x11 0x2 0x0 0x2 0x0 /* ENET1_CRS */
268 0x0 0x10 0x2 0x0 0x2 0x0>; /* ENET1_COL */
269 };
270
271 pio2: ucc_pin@02 {
272 pio-map = <
273 /* port pin dir open_drain assignment has_irq */
274 0x1 0x13 0x1 0x0 0x1 0x0 /* QE_MUX_MDC */
275 0x1 0x14 0x3 0x0 0x1 0x0 /* QE_MUX_MDIO */
276 0x1 0xb 0x2 0x0 0x1 0x0 /* CLK13 */
277 0x1 0x7 0x1 0x0 0x2 0x0 /* ENET5_TXD0_SER5_TXD0 */
278 0x1 0xa 0x1 0x0 0x2 0x0 /* ENET5_TXD1_SER5_TXD1 */
279 0x1 0x6 0x2 0x0 0x2 0x0 /* ENET5_RXD0_SER5_RXD0 */
280 0x1 0x9 0x2 0x0 0x2 0x0 /* ENET5_RXD1_SER5_RXD1 */
281 0x1 0x5 0x1 0x0 0x2 0x0 /* ENET5_TX_EN_SER5_RTS_B */
282 0x1 0x4 0x2 0x0 0x2 0x0 /* ENET5_RX_DV_SER5_CTS_B */
283 0x1 0x8 0x2 0x0 0x2 0x0>; /* ENET5_RX_ER_SER5_CD_B */
284 };
285 };
286};
diff --git a/arch/powerpc/boot/dts/p1025rdb_32b.dts b/arch/powerpc/boot/dts/p1025rdb_32b.dts
new file mode 100644
index 000000000000..ac5729c14eda
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025rdb_32b.dts
@@ -0,0 +1,135 @@
1/*
2 * P1025 RDB Device Tree Source (32-bit address map)
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1021si-pre.dtsi"
36/ {
37 model = "fsl,P1025RDB";
38 compatible = "fsl,P1025RDB";
39
40 memory {
41 device_type = "memory";
42 };
43
44 lbc: localbus@ffe05000 {
45 reg = <0 0xffe05000 0 0x1000>;
46
47 /* NOR, NAND Flashes */
48 ranges = <0x0 0x0 0x0 0xef000000 0x01000000
49 0x1 0x0 0x0 0xff800000 0x00040000>;
50 };
51
52 soc: soc@ffe00000 {
53 ranges = <0x0 0x0 0xffe00000 0x100000>;
54 };
55
56 pci0: pcie@ffe09000 {
57 ranges = <0x2000000 0x0 0xe0000000 0 0xe0000000 0x0 0x20000000
58 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
59 reg = <0 0xffe09000 0 0x1000>;
60 pcie@0 {
61 ranges = <0x2000000 0x0 0xe0000000
62 0x2000000 0x0 0xe0000000
63 0x0 0x20000000
64
65 0x1000000 0x0 0x0
66 0x1000000 0x0 0x0
67 0x0 0x100000>;
68 };
69 };
70
71 pci1: pcie@ffe0a000 {
72 reg = <0 0xffe0a000 0 0x1000>;
73 ranges = <0x2000000 0x0 0xe0000000 0 0xe0000000 0x0 0x20000000
74 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
75 pcie@0 {
76 ranges = <0x2000000 0x0 0xe0000000
77 0x2000000 0x0 0xe0000000
78 0x0 0x20000000
79
80 0x1000000 0x0 0x0
81 0x1000000 0x0 0x0
82 0x0 0x100000>;
83 };
84 };
85
86 qe: qe@ffe80000 {
87 ranges = <0x0 0x0 0xffe80000 0x40000>;
88 reg = <0 0xffe80000 0 0x480>;
89 brg-frequency = <0>;
90 bus-frequency = <0>;
91 status = "disabled"; /* no firmware loaded */
92
93 enet3: ucc@2000 {
94 device_type = "network";
95 compatible = "ucc_geth";
96 rx-clock-name = "clk12";
97 tx-clock-name = "clk9";
98 pio-handle = <&pio1>;
99 phy-handle = <&qe_phy0>;
100 phy-connection-type = "mii";
101 };
102
103 mdio@2120 {
104 qe_phy0: ethernet-phy@0 {
105 interrupt-parent = <&mpic>;
106 interrupts = <4 1 0 0>;
107 reg = <0x6>;
108 device_type = "ethernet-phy";
109 };
110 qe_phy1: ethernet-phy@03 {
111 interrupt-parent = <&mpic>;
112 interrupts = <5 1 0 0>;
113 reg = <0x3>;
114 device_type = "ethernet-phy";
115 };
116 tbi-phy@11 {
117 reg = <0x11>;
118 device_type = "tbi-phy";
119 };
120 };
121
122 enet4: ucc@2400 {
123 device_type = "network";
124 compatible = "ucc_geth";
125 rx-clock-name = "none";
126 tx-clock-name = "clk13";
127 pio-handle = <&pio2>;
128 phy-handle = <&qe_phy1>;
129 phy-connection-type = "rmii";
130 };
131 };
132};
133
134/include/ "p1025rdb.dtsi"
135/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p1025rdb_36b.dts b/arch/powerpc/boot/dts/p1025rdb_36b.dts
new file mode 100644
index 000000000000..4ce4bfa0eda4
--- /dev/null
+++ b/arch/powerpc/boot/dts/p1025rdb_36b.dts
@@ -0,0 +1,88 @@
1/*
2 * P1025 RDB Device Tree Source (36-bit address map)
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p1021si-pre.dtsi"
36/ {
37 model = "fsl,P1025RDB";
38 compatible = "fsl,P1025RDB";
39
40 memory {
41 device_type = "memory";
42 };
43
44 lbc: localbus@fffe05000 {
45 reg = <0xf 0xffe05000 0 0x1000>;
46
47 /* NOR, NAND Flashes */
48 ranges = <0x0 0x0 0xf 0xef000000 0x01000000
49 0x1 0x0 0xf 0xff800000 0x00040000>;
50 };
51
52 soc: soc@fffe00000 {
53 ranges = <0x0 0xf 0xffe00000 0x100000>;
54 };
55
56 pci0: pcie@fffe09000 {
57 reg = <0xf 0xffe09000 0 0x1000>;
58 ranges = <0x2000000 0x0 0xe0000000 0xe 0x20000000 0x0 0x20000000
59 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
60 pcie@0 {
61 ranges = <0x2000000 0x0 0xe0000000
62 0x2000000 0x0 0xe0000000
63 0x0 0x20000000
64
65 0x1000000 0x0 0x0
66 0x1000000 0x0 0x0
67 0x0 0x100000>;
68 };
69 };
70
71 pci1: pcie@fffe0a000 {
72 reg = <0xf 0xffe0a000 0 0x1000>;
73 ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
74 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
75 pcie@0 {
76 ranges = <0x2000000 0x0 0xe0000000
77 0x2000000 0x0 0xe0000000
78 0x0 0x20000000
79
80 0x1000000 0x0 0x0
81 0x1000000 0x0 0x0
82 0x0 0x100000>;
83 };
84 };
85};
86
87/include/ "p1025rdb.dtsi"
88/include/ "fsl/p1021si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020ds.dtsi b/arch/powerpc/boot/dts/p2020ds.dtsi
index c1cf6cef4dd6..d3b939c573b0 100644
--- a/arch/powerpc/boot/dts/p2020ds.dtsi
+++ b/arch/powerpc/boot/dts/p2020ds.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * P2020DS Device Tree Source stub (no addresses or top-level ranges) 2 * P2020DS Device Tree Source stub (no addresses or top-level ranges)
3 * 3 *
4 * Copyright 2011 Freescale Semiconductor Inc. 4 * Copyright 2011-2012 Freescale Semiconductor Inc.
5 * 5 *
6 * Redistribution and use in source and binary forms, with or without 6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met: 7 * modification, are permitted provided that the following conditions are met:
@@ -134,6 +134,7 @@
134&board_soc { 134&board_soc {
135 usb@22000 { 135 usb@22000 {
136 phy_type = "ulpi"; 136 phy_type = "ulpi";
137 dr_mode = "host";
137 }; 138 };
138 139
139 mdio@24520 { 140 mdio@24520 {
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc.dtsi b/arch/powerpc/boot/dts/p2020rdb-pc.dtsi
new file mode 100644
index 000000000000..c21d1c7d16cd
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb-pc.dtsi
@@ -0,0 +1,241 @@
1/*
2 * P2020 RDB-PC Device Tree Source stub (no addresses or top-level ranges)
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35&lbc {
36 nor@0,0 {
37 #address-cells = <1>;
38 #size-cells = <1>;
39 compatible = "cfi-flash";
40 reg = <0x0 0x0 0x1000000>;
41 bank-width = <2>;
42 device-width = <1>;
43
44 partition@0 {
45 /* This location must not be altered */
46 /* 256KB for Vitesse 7385 Switch firmware */
47 reg = <0x0 0x00040000>;
48 label = "NOR Vitesse-7385 Firmware";
49 read-only;
50 };
51
52 partition@40000 {
53 /* 256KB for DTB Image */
54 reg = <0x00040000 0x00040000>;
55 label = "NOR DTB Image";
56 };
57
58 partition@80000 {
59 /* 3.5 MB for Linux Kernel Image */
60 reg = <0x00080000 0x00380000>;
61 label = "NOR Linux Kernel Image";
62 };
63
64 partition@400000 {
65 /* 11MB for JFFS2 based Root file System */
66 reg = <0x00400000 0x00b00000>;
67 label = "NOR JFFS2 Root File System";
68 };
69
70 partition@f00000 {
71 /* This location must not be altered */
72 /* 512KB for u-boot Bootloader Image */
73 /* 512KB for u-boot Environment Variables */
74 reg = <0x00f00000 0x00100000>;
75 label = "NOR U-Boot Image";
76 read-only;
77 };
78 };
79
80 nand@1,0 {
81 #address-cells = <1>;
82 #size-cells = <1>;
83 compatible = "fsl,p2020-fcm-nand",
84 "fsl,elbc-fcm-nand";
85 reg = <0x1 0x0 0x40000>;
86
87 partition@0 {
88 /* This location must not be altered */
89 /* 1MB for u-boot Bootloader Image */
90 reg = <0x0 0x00100000>;
91 label = "NAND U-Boot Image";
92 read-only;
93 };
94
95 partition@100000 {
96 /* 1MB for DTB Image */
97 reg = <0x00100000 0x00100000>;
98 label = "NAND DTB Image";
99 };
100
101 partition@200000 {
102 /* 4MB for Linux Kernel Image */
103 reg = <0x00200000 0x00400000>;
104 label = "NAND Linux Kernel Image";
105 };
106
107 partition@600000 {
108 /* 4MB for Compressed Root file System Image */
109 reg = <0x00600000 0x00400000>;
110 label = "NAND Compressed RFS Image";
111 };
112
113 partition@a00000 {
114 /* 7MB for JFFS2 based Root file System */
115 reg = <0x00a00000 0x00700000>;
116 label = "NAND JFFS2 Root File System";
117 };
118
119 partition@1100000 {
120 /* 15MB for JFFS2 based Root file System */
121 reg = <0x01100000 0x00f00000>;
122 label = "NAND Writable User area";
123 };
124 };
125
126 L2switch@2,0 {
127 #address-cells = <1>;
128 #size-cells = <1>;
129 compatible = "vitesse-7385";
130 reg = <0x2 0x0 0x20000>;
131 };
132
133 cpld@3,0 {
134 #address-cells = <1>;
135 #size-cells = <1>;
136 compatible = "cpld";
137 reg = <0x3 0x0 0x20000>;
138 read-only;
139 };
140};
141
142&soc {
143 i2c@3000 {
144 rtc@68 {
145 compatible = "pericom,pt7c4338";
146 reg = <0x68>;
147 };
148 };
149
150 spi@7000 {
151 flash@0 {
152 #address-cells = <1>;
153 #size-cells = <1>;
154 compatible = "spansion,m25p80";
155 reg = <0>;
156 spi-max-frequency = <40000000>;
157
158 partition@0 {
159 /* 512KB for u-boot Bootloader Image */
160 reg = <0x0 0x00080000>;
161 label = "SPI U-Boot Image";
162 read-only;
163 };
164
165 partition@80000 {
166 /* 512KB for DTB Image */
167 reg = <0x00080000 0x00080000>;
168 label = "SPI DTB Image";
169 };
170
171 partition@100000 {
172 /* 4MB for Linux Kernel Image */
173 reg = <0x00100000 0x00400000>;
174 label = "SPI Linux Kernel Image";
175 };
176
177 partition@500000 {
178 /* 4MB for Compressed RFS Image */
179 reg = <0x00500000 0x00400000>;
180 label = "SPI Compressed RFS Image";
181 };
182
183 partition@900000 {
184 /* 7MB for JFFS2 based RFS */
185 reg = <0x00900000 0x00700000>;
186 label = "SPI JFFS2 RFS";
187 };
188 };
189 };
190
191 usb@22000 {
192 phy_type = "ulpi";
193 };
194
195 mdio@24520 {
196 phy0: ethernet-phy@0 {
197 interrupts = <3 1 0 0>;
198 reg = <0x0>;
199 };
200 phy1: ethernet-phy@1 {
201 interrupts = <2 1 0 0>;
202 reg = <0x1>;
203 };
204 };
205
206 mdio@25520 {
207 tbi0: tbi-phy@11 {
208 reg = <0x11>;
209 device_type = "tbi-phy";
210 };
211 };
212
213 mdio@26520 {
214 status = "disabled";
215 };
216
217 ptp_clock@24e00 {
218 fsl,tclk-period = <5>;
219 fsl,tmr-prsc = <200>;
220 fsl,tmr-add = <0xCCCCCCCD>;
221 fsl,tmr-fiper1 = <0x3B9AC9FB>;
222 fsl,tmr-fiper2 = <0x0001869B>;
223 fsl,max-adj = <249999999>;
224 };
225
226 enet0: ethernet@24000 {
227 fixed-link = <1 1 1000 0 0>;
228 phy-connection-type = "rgmii-id";
229 };
230
231 enet1: ethernet@25000 {
232 tbi-handle = <&tbi0>;
233 phy-handle = <&phy0>;
234 phy-connection-type = "sgmii";
235 };
236
237 enet2: ethernet@26000 {
238 phy-handle = <&phy1>;
239 phy-connection-type = "rgmii-id";
240 };
241};
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts b/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts
new file mode 100644
index 000000000000..852e5b27485d
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb-pc_32b.dts
@@ -0,0 +1,96 @@
1/*
2 * P2020 RDB-PC 32Bit Physical Address Map Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p2020si-pre.dtsi"
36
37/ {
38 model = "fsl,P2020RDB";
39 compatible = "fsl,P2020RDB-PC";
40
41 memory {
42 device_type = "memory";
43 };
44
45 lbc: localbus@ffe05000 {
46 reg = <0 0xffe05000 0 0x1000>;
47
48 /* NOR and NAND Flashes */
49 ranges = <0x0 0x0 0x0 0xef000000 0x01000000
50 0x1 0x0 0x0 0xff800000 0x00040000
51 0x2 0x0 0x0 0xffb00000 0x00020000
52 0x3 0x0 0x0 0xffa00000 0x00020000>;
53 };
54
55 soc: soc@ffe00000 {
56 ranges = <0x0 0x0 0xffe00000 0x100000>;
57 };
58
59 pci0: pcie@ffe08000 {
60 reg = <0 0xffe08000 0 0x1000>;
61 status = "disabled";
62 };
63
64 pci1: pcie@ffe09000 {
65 reg = <0 0xffe09000 0 0x1000>;
66 ranges = <0x2000000 0x0 0xe0000000 0 0xa0000000 0x0 0x20000000
67 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>;
68 pcie@0 {
69 ranges = <0x2000000 0x0 0xe0000000
70 0x2000000 0x0 0xe0000000
71 0x0 0x20000000
72
73 0x1000000 0x0 0x0
74 0x1000000 0x0 0x0
75 0x0 0x100000>;
76 };
77 };
78
79 pci2: pcie@ffe0a000 {
80 reg = <0 0xffe0a000 0 0x1000>;
81 ranges = <0x2000000 0x0 0xe0000000 0 0x80000000 0x0 0x20000000
82 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>;
83 pcie@0 {
84 ranges = <0x2000000 0x0 0xe0000000
85 0x2000000 0x0 0xe0000000
86 0x0 0x20000000
87
88 0x1000000 0x0 0x0
89 0x1000000 0x0 0x0
90 0x0 0x100000>;
91 };
92 };
93};
94
95/include/ "p2020rdb-pc.dtsi"
96/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts b/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts
new file mode 100644
index 000000000000..b5a56ca51cf7
--- /dev/null
+++ b/arch/powerpc/boot/dts/p2020rdb-pc_36b.dts
@@ -0,0 +1,96 @@
1/*
2 * P2020 RDB-PC 36Bit Physical Address Map Device Tree Source
3 *
4 * Copyright 2011 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35/include/ "fsl/p2020si-pre.dtsi"
36
37/ {
38 model = "fsl,P2020RDB";
39 compatible = "fsl,P2020RDB-PC";
40
41 memory {
42 device_type = "memory";
43 };
44
45 lbc: localbus@fffe05000 {
46 reg = <0xf 0xffe05000 0 0x1000>;
47
48 /* NOR and NAND Flashes */
49 ranges = <0x0 0x0 0xf 0xef000000 0x01000000
50 0x1 0x0 0xf 0xff800000 0x00040000
51 0x2 0x0 0xf 0xffb00000 0x00020000
52 0x3 0x0 0xf 0xffa00000 0x00020000>;
53 };
54
55 soc: soc@fffe00000 {
56 ranges = <0x0 0xf 0xffe00000 0x100000>;
57 };
58
59 pci0: pcie@fffe08000 {
60 reg = <0xf 0xffe08000 0 0x1000>;
61 status = "disabled";
62 };
63
64 pci1: pcie@fffe09000 {
65 reg = <0xf 0xffe09000 0 0x1000>;
66 ranges = <0x2000000 0x0 0xe0000000 0xc 0x20000000 0x0 0x20000000
67 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>;
68 pcie@0 {
69 ranges = <0x2000000 0x0 0xe0000000
70 0x2000000 0x0 0xe0000000
71 0x0 0x20000000
72
73 0x1000000 0x0 0x0
74 0x1000000 0x0 0x0
75 0x0 0x100000>;
76 };
77 };
78
79 pci2: pcie@fffe0a000 {
80 reg = <0xf 0xffe0a000 0 0x1000>;
81 ranges = <0x2000000 0x0 0xe0000000 0xc 0x00000000 0x0 0x20000000
82 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>;
83 pcie@0 {
84 ranges = <0x2000000 0x0 0xe0000000
85 0x2000000 0x0 0xe0000000
86 0x0 0x20000000
87
88 0x1000000 0x0 0x0
89 0x1000000 0x0 0x0
90 0x0 0x100000>;
91 };
92 };
93};
94
95/include/ "p2020rdb-pc.dtsi"
96/include/ "fsl/p2020si-post.dtsi"
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts
index 26759a591712..153bc76bb48e 100644
--- a/arch/powerpc/boot/dts/p2020rdb.dts
+++ b/arch/powerpc/boot/dts/p2020rdb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * P2020 RDB Device Tree Source 2 * P2020 RDB Device Tree Source
3 * 3 *
4 * Copyright 2009-2011 Freescale Semiconductor Inc. 4 * Copyright 2009-2012 Freescale Semiconductor Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -34,7 +34,7 @@
34 34
35 /* NOR and NAND Flashes */ 35 /* NOR and NAND Flashes */
36 ranges = <0x0 0x0 0x0 0xef000000 0x01000000 36 ranges = <0x0 0x0 0x0 0xef000000 0x01000000
37 0x1 0x0 0x0 0xffa00000 0x00040000 37 0x1 0x0 0x0 0xff800000 0x00040000
38 0x2 0x0 0x0 0xffb00000 0x00020000>; 38 0x2 0x0 0x0 0xffb00000 0x00020000>;
39 39
40 nor@0,0 { 40 nor@0,0 {
@@ -157,7 +157,7 @@
157 #size-cells = <1>; 157 #size-cells = <1>;
158 compatible = "spansion,s25sl12801"; 158 compatible = "spansion,s25sl12801";
159 reg = <0>; 159 reg = <0>;
160 spi-max-frequency = <50000000>; 160 spi-max-frequency = <40000000>;
161 161
162 partition@0 { 162 partition@0 {
163 /* 512KB for u-boot Bootloader Image */ 163 /* 512KB for u-boot Bootloader Image */
@@ -197,6 +197,7 @@
197 197
198 usb@22000 { 198 usb@22000 {
199 phy_type = "ulpi"; 199 phy_type = "ulpi";
200 dr_mode = "host";
200 }; 201 };
201 202
202 mdio@24520 { 203 mdio@24520 {
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper
index f090e6d2907e..6761c746048d 100755
--- a/arch/powerpc/boot/wrapper
+++ b/arch/powerpc/boot/wrapper
@@ -144,6 +144,7 @@ tmp=$tmpdir/zImage.$$.o
144ksection=.kernel:vmlinux.strip 144ksection=.kernel:vmlinux.strip
145isection=.kernel:initrd 145isection=.kernel:initrd
146link_address='0x400000' 146link_address='0x400000'
147make_space=y
147 148
148case "$platform" in 149case "$platform" in
149pseries) 150pseries)
@@ -210,6 +211,7 @@ ps3)
210 ksection=.kernel:vmlinux.bin 211 ksection=.kernel:vmlinux.bin
211 isection=.kernel:initrd 212 isection=.kernel:initrd
212 link_address='' 213 link_address=''
214 make_space=n
213 pie= 215 pie=
214 ;; 216 ;;
215ep88xc|ep405|ep8248e) 217ep88xc|ep405|ep8248e)
@@ -278,17 +280,19 @@ else
278 rm -f $vmz.$$ 280 rm -f $vmz.$$
279fi 281fi
280 282
281# Round the size to next higher MB limit 283if [ "$make_space" = "y" ]; then
282round_size=$(((strip_size + 0xfffff) & 0xfff00000)) 284 # Round the size to next higher MB limit
285 round_size=$(((strip_size + 0xfffff) & 0xfff00000))
283 286
284round_size=0x$(printf "%x" $round_size) 287 round_size=0x$(printf "%x" $round_size)
285link_addr=$(printf "%d" $link_address) 288 link_addr=$(printf "%d" $link_address)
286 289
287if [ $link_addr -lt $strip_size ]; then 290 if [ $link_addr -lt $strip_size ]; then
288 echo "INFO: Uncompressed kernel (size 0x$(printf "%x\n" $strip_size))" \ 291 echo "INFO: Uncompressed kernel (size 0x$(printf "%x\n" $strip_size))" \
289 "overlaps the address of the wrapper($link_address)" 292 "overlaps the address of the wrapper($link_address)"
290 echo "INFO: Fixing the link_address of wrapper to ($round_size)" 293 echo "INFO: Fixing the link_address of wrapper to ($round_size)"
291 link_address=$round_size 294 link_address=$round_size
295 fi
292fi 296fi
293 297
294vmz="$vmz$gzip" 298vmz="$vmz$gzip"
diff --git a/arch/powerpc/configs/85xx/ge_imp3a_defconfig b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
new file mode 100644
index 000000000000..f8c51a4ab995
--- /dev/null
+++ b/arch/powerpc/configs/85xx/ge_imp3a_defconfig
@@ -0,0 +1,257 @@
1CONFIG_PPC_85xx=y
2CONFIG_SMP=y
3CONFIG_NR_CPUS=2
4CONFIG_EXPERIMENTAL=y
5CONFIG_SYSVIPC=y
6CONFIG_POSIX_MQUEUE=y
7CONFIG_BSD_PROCESS_ACCT=y
8CONFIG_BSD_PROCESS_ACCT_V3=y
9CONFIG_SPARSE_IRQ=y
10CONFIG_IKCONFIG=y
11CONFIG_IKCONFIG_PROC=y
12# CONFIG_UTS_NS is not set
13# CONFIG_IPC_NS is not set
14# CONFIG_USER_NS is not set
15# CONFIG_PID_NS is not set
16# CONFIG_NET_NS is not set
17CONFIG_SYSFS_DEPRECATED=y
18CONFIG_SYSFS_DEPRECATED_V2=y
19CONFIG_RELAY=y
20CONFIG_BLK_DEV_INITRD=y
21CONFIG_PERF_EVENTS=y
22CONFIG_SLAB=y
23CONFIG_MODULES=y
24CONFIG_MODULE_UNLOAD=y
25# CONFIG_BLK_DEV_BSG is not set
26CONFIG_GE_IMP3A=y
27CONFIG_QUICC_ENGINE=y
28CONFIG_QE_GPIO=y
29CONFIG_CPM2=y
30CONFIG_HIGHMEM=y
31CONFIG_HIGH_RES_TIMERS=y
32CONFIG_HZ_1000=y
33CONFIG_PREEMPT=y
34# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
35CONFIG_BINFMT_MISC=m
36CONFIG_MATH_EMULATION=y
37CONFIG_IRQ_ALL_CPUS=y
38CONFIG_FORCE_MAX_ZONEORDER=17
39CONFIG_PCI=y
40CONFIG_PCIEPORTBUS=y
41CONFIG_PCI_MSI=y
42CONFIG_PCCARD=y
43# CONFIG_PCMCIA_LOAD_CIS is not set
44CONFIG_YENTA=y
45CONFIG_NET=y
46CONFIG_PACKET=y
47CONFIG_UNIX=y
48CONFIG_XFRM_USER=m
49CONFIG_NET_KEY=y
50CONFIG_INET=y
51CONFIG_IP_MULTICAST=y
52CONFIG_IP_ADVANCED_ROUTER=y
53CONFIG_IP_MULTIPLE_TABLES=y
54CONFIG_IP_ROUTE_MULTIPATH=y
55CONFIG_IP_ROUTE_VERBOSE=y
56CONFIG_IP_PNP=y
57CONFIG_IP_PNP_DHCP=y
58CONFIG_IP_PNP_BOOTP=y
59CONFIG_IP_PNP_RARP=y
60CONFIG_NET_IPIP=m
61CONFIG_IP_MROUTE=y
62CONFIG_IP_PIMSM_V1=y
63CONFIG_IP_PIMSM_V2=y
64CONFIG_SYN_COOKIES=y
65CONFIG_INET_AH=m
66CONFIG_INET_ESP=m
67CONFIG_INET_IPCOMP=m
68# CONFIG_INET_XFRM_MODE_BEET is not set
69CONFIG_INET6_AH=m
70CONFIG_INET6_IPCOMP=m
71CONFIG_IPV6_TUNNEL=m
72CONFIG_NET_PKTGEN=m
73CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
74CONFIG_MTD=y
75CONFIG_MTD_OF_PARTS=y
76CONFIG_MTD_CHAR=y
77CONFIG_MTD_BLOCK=y
78CONFIG_MTD_CFI=y
79CONFIG_MTD_JEDECPROBE=y
80CONFIG_MTD_CFI_INTELEXT=y
81CONFIG_MTD_CFI_AMDSTD=y
82CONFIG_MTD_PHYSMAP_OF=y
83CONFIG_MTD_NAND=y
84CONFIG_MTD_NAND_FSL_ELBC=y
85CONFIG_PROC_DEVICETREE=y
86CONFIG_BLK_DEV_LOOP=m
87CONFIG_BLK_DEV_CRYPTOLOOP=m
88CONFIG_BLK_DEV_NBD=m
89CONFIG_BLK_DEV_RAM=y
90CONFIG_BLK_DEV_RAM_SIZE=131072
91CONFIG_MISC_DEVICES=y
92CONFIG_DS1682=y
93CONFIG_BLK_DEV_SD=y
94CONFIG_CHR_DEV_ST=y
95CONFIG_BLK_DEV_SR=y
96CONFIG_ATA=y
97CONFIG_SATA_AHCI=y
98CONFIG_SATA_SIL24=y
99# CONFIG_ATA_SFF is not set
100CONFIG_NETDEVICES=y
101CONFIG_BONDING=m
102CONFIG_DUMMY=m
103CONFIG_NETCONSOLE=y
104CONFIG_NETPOLL_TRAP=y
105CONFIG_TUN=m
106# CONFIG_NET_VENDOR_3COM is not set
107CONFIG_FS_ENET=y
108CONFIG_UCC_GETH=y
109CONFIG_GIANFAR=y
110CONFIG_PPP=m
111CONFIG_PPP_BSDCOMP=m
112CONFIG_PPP_DEFLATE=m
113CONFIG_PPP_FILTER=y
114CONFIG_PPP_MULTILINK=y
115CONFIG_PPPOE=m
116CONFIG_PPP_ASYNC=m
117CONFIG_PPP_SYNC_TTY=m
118CONFIG_SLIP=m
119CONFIG_SLIP_COMPRESSED=y
120CONFIG_SLIP_SMART=y
121CONFIG_SLIP_MODE_SLIP6=y
122# CONFIG_INPUT_KEYBOARD is not set
123# CONFIG_INPUT_MOUSE is not set
124# CONFIG_SERIO is not set
125# CONFIG_LEGACY_PTYS is not set
126CONFIG_SERIAL_8250=y
127CONFIG_SERIAL_8250_CONSOLE=y
128CONFIG_SERIAL_8250_NR_UARTS=2
129CONFIG_SERIAL_8250_RUNTIME_UARTS=2
130CONFIG_SERIAL_8250_EXTENDED=y
131CONFIG_SERIAL_8250_MANY_PORTS=y
132CONFIG_SERIAL_8250_DETECT_IRQ=y
133CONFIG_SERIAL_8250_RSA=y
134CONFIG_SERIAL_QE=m
135CONFIG_NVRAM=y
136CONFIG_I2C=y
137CONFIG_I2C_CHARDEV=y
138CONFIG_I2C_CPM=m
139CONFIG_I2C_MPC=y
140CONFIG_GPIO_SYSFS=y
141CONFIG_GPIO_GE_FPGA=y
142CONFIG_SENSORS_LM90=y
143CONFIG_SENSORS_LM92=y
144CONFIG_WATCHDOG=y
145CONFIG_GEF_WDT=y
146CONFIG_VIDEO_OUTPUT_CONTROL=m
147CONFIG_HID_DRAGONRISE=y
148CONFIG_HID_GYRATION=y
149CONFIG_HID_TWINHAN=y
150CONFIG_HID_ORTEK=y
151CONFIG_HID_PANTHERLORD=y
152CONFIG_HID_PETALYNX=y
153CONFIG_HID_SAMSUNG=y
154CONFIG_HID_SONY=y
155CONFIG_HID_SUNPLUS=y
156CONFIG_HID_GREENASIA=y
157CONFIG_HID_SMARTJOYPLUS=y
158CONFIG_HID_TOPSEED=y
159CONFIG_HID_THRUSTMASTER=y
160CONFIG_HID_ZEROPLUS=y
161CONFIG_USB=y
162CONFIG_USB_DEVICEFS=y
163CONFIG_USB_EHCI_HCD=y
164# CONFIG_USB_EHCI_TT_NEWSCHED is not set
165CONFIG_USB_EHCI_FSL=y
166CONFIG_USB_OHCI_HCD=y
167CONFIG_USB_OHCI_HCD_PPC_OF_BE=y
168CONFIG_USB_OHCI_HCD_PPC_OF_LE=y
169CONFIG_USB_STORAGE=y
170CONFIG_EDAC=y
171CONFIG_EDAC_MM_EDAC=y
172CONFIG_EDAC_MPC85XX=y
173CONFIG_RTC_CLASS=y
174# CONFIG_RTC_INTF_PROC is not set
175CONFIG_RTC_DRV_RX8581=y
176CONFIG_DMADEVICES=y
177CONFIG_FSL_DMA=y
178# CONFIG_NET_DMA is not set
179CONFIG_EXT2_FS=y
180CONFIG_EXT2_FS_XATTR=y
181CONFIG_EXT2_FS_POSIX_ACL=y
182CONFIG_EXT3_FS=y
183# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
184CONFIG_EXT3_FS_POSIX_ACL=y
185CONFIG_EXT4_FS=y
186CONFIG_FUSE_FS=y
187CONFIG_ISO9660_FS=y
188CONFIG_JOLIET=y
189CONFIG_ZISOFS=y
190CONFIG_UDF_FS=y
191CONFIG_MSDOS_FS=y
192CONFIG_VFAT_FS=y
193CONFIG_FAT_DEFAULT_CODEPAGE=850
194CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
195CONFIG_NTFS_FS=y
196CONFIG_PROC_KCORE=y
197CONFIG_TMPFS=y
198CONFIG_JFFS2_FS=y
199CONFIG_NFS_FS=y
200CONFIG_NFS_V3=y
201CONFIG_NFS_V4=y
202CONFIG_ROOT_NFS=y
203CONFIG_NFSD=y
204CONFIG_NFSD_V4=y
205CONFIG_CIFS=m
206CONFIG_CIFS_XATTR=y
207CONFIG_CIFS_POSIX=y
208CONFIG_NLS_CODEPAGE_437=y
209CONFIG_NLS_CODEPAGE_737=m
210CONFIG_NLS_CODEPAGE_775=m
211CONFIG_NLS_CODEPAGE_850=y
212CONFIG_NLS_CODEPAGE_852=m
213CONFIG_NLS_CODEPAGE_855=m
214CONFIG_NLS_CODEPAGE_857=m
215CONFIG_NLS_CODEPAGE_860=m
216CONFIG_NLS_CODEPAGE_861=m
217CONFIG_NLS_CODEPAGE_862=m
218CONFIG_NLS_CODEPAGE_863=m
219CONFIG_NLS_CODEPAGE_864=m
220CONFIG_NLS_CODEPAGE_865=m
221CONFIG_NLS_CODEPAGE_866=m
222CONFIG_NLS_CODEPAGE_869=m
223CONFIG_NLS_CODEPAGE_936=m
224CONFIG_NLS_CODEPAGE_950=m
225CONFIG_NLS_CODEPAGE_932=m
226CONFIG_NLS_CODEPAGE_949=m
227CONFIG_NLS_CODEPAGE_874=m
228CONFIG_NLS_ISO8859_8=m
229CONFIG_NLS_CODEPAGE_1250=m
230CONFIG_NLS_CODEPAGE_1251=m
231CONFIG_NLS_ASCII=y
232CONFIG_NLS_ISO8859_1=y
233CONFIG_NLS_ISO8859_2=m
234CONFIG_NLS_ISO8859_3=m
235CONFIG_NLS_ISO8859_4=m
236CONFIG_NLS_ISO8859_5=m
237CONFIG_NLS_ISO8859_6=m
238CONFIG_NLS_ISO8859_7=m
239CONFIG_NLS_ISO8859_9=m
240CONFIG_NLS_ISO8859_13=m
241CONFIG_NLS_ISO8859_14=m
242CONFIG_NLS_ISO8859_15=y
243CONFIG_NLS_KOI8_R=m
244CONFIG_NLS_KOI8_U=m
245CONFIG_NLS_UTF8=y
246CONFIG_CRC_CCITT=y
247CONFIG_CRC_T10DIF=y
248CONFIG_LIBCRC32C=y
249CONFIG_MAGIC_SYSRQ=y
250CONFIG_SYSCTL_SYSCALL_CHECK=y
251CONFIG_CRYPTO_CBC=y
252CONFIG_CRYPTO_MD5=y
253CONFIG_CRYPTO_SHA256=m
254CONFIG_CRYPTO_SHA512=m
255CONFIG_CRYPTO_DES=y
256# CONFIG_CRYPTO_ANSI_CPRNG is not set
257CONFIG_CRYPTO_DEV_TALITOS=y
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
index d41857a5152d..da731c2fe984 100644
--- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
+++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig
@@ -131,6 +131,7 @@ CONFIG_I2C=y
131CONFIG_I2C_CHARDEV=y 131CONFIG_I2C_CHARDEV=y
132CONFIG_I2C_MPC=y 132CONFIG_I2C_MPC=y
133CONFIG_GPIO_SYSFS=y 133CONFIG_GPIO_SYSFS=y
134CONFIG_GPIO_GE_FPGA=y
134CONFIG_SENSORS_LM90=y 135CONFIG_SENSORS_LM90=y
135CONFIG_SENSORS_LM92=y 136CONFIG_SENSORS_LM92=y
136CONFIG_WATCHDOG=y 137CONFIG_WATCHDOG=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
index 38303ec11bcd..2149360a1e62 100644
--- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig
@@ -132,6 +132,7 @@ CONFIG_I2C=y
132CONFIG_I2C_CHARDEV=y 132CONFIG_I2C_CHARDEV=y
133CONFIG_I2C_MPC=y 133CONFIG_I2C_MPC=y
134CONFIG_GPIO_SYSFS=y 134CONFIG_GPIO_SYSFS=y
135CONFIG_GPIO_GE_FPGA=y
135CONFIG_SENSORS_LM90=y 136CONFIG_SENSORS_LM90=y
136CONFIG_SENSORS_LM92=y 137CONFIG_SENSORS_LM92=y
137CONFIG_WATCHDOG=y 138CONFIG_WATCHDOG=y
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
index 98533973d20f..af2e8e1edba6 100644
--- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig
+++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig
@@ -183,6 +183,8 @@ CONFIG_NVRAM=y
183CONFIG_I2C=y 183CONFIG_I2C=y
184CONFIG_I2C_CHARDEV=y 184CONFIG_I2C_CHARDEV=y
185CONFIG_I2C_MPC=y 185CONFIG_I2C_MPC=y
186CONFIG_GPIO_SYSFS=y
187CONFIG_GPIO_GE_FPGA=y
186CONFIG_SENSORS_LM90=y 188CONFIG_SENSORS_LM90=y
187CONFIG_SENSORS_LM92=y 189CONFIG_SENSORS_LM92=y
188CONFIG_WATCHDOG=y 190CONFIG_WATCHDOG=y
diff --git a/arch/powerpc/configs/iseries_defconfig b/arch/powerpc/configs/iseries_defconfig
deleted file mode 100644
index 27c46d679968..000000000000
--- a/arch/powerpc/configs/iseries_defconfig
+++ /dev/null
@@ -1,236 +0,0 @@
1CONFIG_PPC64=y
2CONFIG_SMP=y
3CONFIG_EXPERIMENTAL=y
4CONFIG_SYSVIPC=y
5CONFIG_POSIX_MQUEUE=y
6CONFIG_AUDIT=y
7CONFIG_AUDITSYSCALL=y
8CONFIG_IKCONFIG=y
9CONFIG_IKCONFIG_PROC=y
10CONFIG_BLK_DEV_INITRD=y
11# CONFIG_COMPAT_BRK is not set
12CONFIG_MODULES=y
13CONFIG_MODULE_UNLOAD=y
14CONFIG_MODVERSIONS=y
15CONFIG_MODULE_SRCVERSION_ALL=y
16# CONFIG_PPC_PSERIES is not set
17CONFIG_LPARCFG=y
18CONFIG_PPC_ISERIES=y
19CONFIG_VIODASD=y
20CONFIG_VIOCD=m
21CONFIG_VIOTAPE=m
22# CONFIG_PPC_PMAC is not set
23CONFIG_NO_HZ=y
24CONFIG_HIGH_RES_TIMERS=y
25CONFIG_IRQ_ALL_CPUS=y
26# CONFIG_MIGRATION is not set
27CONFIG_PACKET=y
28CONFIG_UNIX=y
29CONFIG_XFRM_USER=m
30CONFIG_XFRM_SUB_POLICY=y
31CONFIG_NET_KEY=m
32CONFIG_INET=y
33CONFIG_IP_MULTICAST=y
34CONFIG_NET_IPIP=y
35CONFIG_SYN_COOKIES=y
36CONFIG_INET_AH=m
37CONFIG_INET_ESP=m
38CONFIG_INET_IPCOMP=m
39CONFIG_INET_XFRM_MODE_BEET=m
40# CONFIG_INET_LRO is not set
41# CONFIG_IPV6 is not set
42CONFIG_NETFILTER=y
43CONFIG_NETFILTER_NETLINK_QUEUE=m
44CONFIG_NETFILTER_NETLINK_LOG=m
45CONFIG_NF_CONNTRACK=m
46CONFIG_NF_CONNTRACK_EVENTS=y
47# CONFIG_NF_CT_PROTO_SCTP is not set
48CONFIG_NF_CONNTRACK_FTP=m
49CONFIG_NF_CONNTRACK_IRC=m
50CONFIG_NF_CONNTRACK_TFTP=m
51CONFIG_NF_CT_NETLINK=m
52CONFIG_NETFILTER_TPROXY=m
53CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
54CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
55CONFIG_NETFILTER_XT_TARGET_DSCP=m
56CONFIG_NETFILTER_XT_TARGET_MARK=m
57CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
58CONFIG_NETFILTER_XT_TARGET_TPROXY=m
59CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=m
60CONFIG_NETFILTER_XT_MATCH_COMMENT=m
61CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
62CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
63CONFIG_NETFILTER_XT_MATCH_DSCP=m
64CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
65CONFIG_NETFILTER_XT_MATCH_LENGTH=m
66CONFIG_NETFILTER_XT_MATCH_LIMIT=m
67CONFIG_NETFILTER_XT_MATCH_MAC=m
68CONFIG_NETFILTER_XT_MATCH_MARK=m
69CONFIG_NETFILTER_XT_MATCH_OWNER=m
70CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
71CONFIG_NETFILTER_XT_MATCH_RATEEST=m
72CONFIG_NETFILTER_XT_MATCH_REALM=m
73CONFIG_NETFILTER_XT_MATCH_RECENT=m
74CONFIG_NETFILTER_XT_MATCH_STRING=m
75CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
76CONFIG_NETFILTER_XT_MATCH_TIME=m
77CONFIG_NF_CONNTRACK_IPV4=m
78CONFIG_IP_NF_QUEUE=m
79CONFIG_IP_NF_IPTABLES=m
80CONFIG_IP_NF_MATCH_ADDRTYPE=m
81CONFIG_IP_NF_MATCH_ECN=m
82CONFIG_IP_NF_MATCH_TTL=m
83CONFIG_IP_NF_FILTER=m
84CONFIG_IP_NF_TARGET_REJECT=m
85CONFIG_IP_NF_TARGET_LOG=m
86CONFIG_IP_NF_TARGET_ULOG=m
87CONFIG_NF_NAT=m
88CONFIG_IP_NF_TARGET_MASQUERADE=m
89CONFIG_IP_NF_TARGET_NETMAP=m
90CONFIG_IP_NF_TARGET_REDIRECT=m
91CONFIG_IP_NF_MANGLE=m
92CONFIG_IP_NF_TARGET_CLUSTERIP=m
93CONFIG_IP_NF_TARGET_ECN=m
94CONFIG_IP_NF_TARGET_TTL=m
95CONFIG_IP_NF_RAW=m
96CONFIG_IP_NF_ARPTABLES=m
97CONFIG_IP_NF_ARPFILTER=m
98CONFIG_IP_NF_ARP_MANGLE=m
99CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
100CONFIG_PROC_DEVICETREE=y
101CONFIG_BLK_DEV_LOOP=y
102CONFIG_BLK_DEV_NBD=m
103CONFIG_BLK_DEV_RAM=y
104CONFIG_BLK_DEV_RAM_SIZE=65536
105CONFIG_SCSI=y
106CONFIG_BLK_DEV_SD=y
107CONFIG_CHR_DEV_ST=y
108CONFIG_BLK_DEV_SR=y
109CONFIG_BLK_DEV_SR_VENDOR=y
110CONFIG_CHR_DEV_SG=y
111CONFIG_SCSI_MULTI_LUN=y
112CONFIG_SCSI_CONSTANTS=y
113CONFIG_SCSI_SPI_ATTRS=y
114CONFIG_SCSI_FC_ATTRS=y
115CONFIG_SCSI_SAS_LIBSAS=m
116CONFIG_SCSI_IBMVSCSI=m
117CONFIG_MD=y
118CONFIG_BLK_DEV_MD=y
119CONFIG_MD_LINEAR=y
120CONFIG_MD_RAID0=y
121CONFIG_MD_RAID1=y
122CONFIG_MD_RAID10=m
123CONFIG_MD_MULTIPATH=m
124CONFIG_MD_FAULTY=m
125CONFIG_BLK_DEV_DM=y
126CONFIG_DM_CRYPT=m
127CONFIG_DM_SNAPSHOT=m
128CONFIG_DM_MIRROR=m
129CONFIG_DM_ZERO=m
130CONFIG_NETDEVICES=y
131CONFIG_DUMMY=m
132CONFIG_BONDING=m
133CONFIG_TUN=m
134CONFIG_NET_ETHERNET=y
135CONFIG_NET_PCI=y
136CONFIG_PCNET32=y
137CONFIG_E100=y
138CONFIG_ACENIC=m
139CONFIG_E1000=m
140CONFIG_ISERIES_VETH=y
141CONFIG_PPP=m
142CONFIG_PPP_ASYNC=m
143CONFIG_PPP_SYNC_TTY=m
144CONFIG_PPP_DEFLATE=m
145CONFIG_PPP_BSDCOMP=m
146CONFIG_PPPOE=m
147CONFIG_NETCONSOLE=y
148CONFIG_NETPOLL_TRAP=y
149# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
150# CONFIG_INPUT_KEYBOARD is not set
151# CONFIG_INPUT_MOUSE is not set
152# CONFIG_SERIO is not set
153CONFIG_SERIAL_ICOM=m
154# CONFIG_HW_RANDOM is not set
155CONFIG_GEN_RTC=y
156CONFIG_RAW_DRIVER=y
157# CONFIG_HWMON is not set
158# CONFIG_HID_SUPPORT is not set
159# CONFIG_USB_SUPPORT is not set
160CONFIG_EXT2_FS=y
161CONFIG_EXT2_FS_XATTR=y
162CONFIG_EXT2_FS_POSIX_ACL=y
163CONFIG_EXT2_FS_SECURITY=y
164CONFIG_EXT2_FS_XIP=y
165CONFIG_EXT3_FS=y
166CONFIG_EXT3_FS_POSIX_ACL=y
167CONFIG_EXT3_FS_SECURITY=y
168CONFIG_EXT4_FS=y
169CONFIG_REISERFS_FS=y
170CONFIG_REISERFS_FS_XATTR=y
171CONFIG_REISERFS_FS_POSIX_ACL=y
172CONFIG_REISERFS_FS_SECURITY=y
173CONFIG_JFS_FS=m
174CONFIG_JFS_POSIX_ACL=y
175CONFIG_JFS_SECURITY=y
176CONFIG_XFS_FS=m
177CONFIG_XFS_POSIX_ACL=y
178CONFIG_GFS2_FS=m
179CONFIG_AUTOFS_FS=m
180CONFIG_ISO9660_FS=y
181CONFIG_JOLIET=y
182CONFIG_ZISOFS=y
183CONFIG_UDF_FS=m
184CONFIG_MSDOS_FS=y
185CONFIG_VFAT_FS=y
186CONFIG_PROC_KCORE=y
187CONFIG_TMPFS=y
188CONFIG_TMPFS_POSIX_ACL=y
189CONFIG_CRAMFS=y
190CONFIG_NFS_FS=y
191CONFIG_NFS_V3=y
192CONFIG_NFS_V3_ACL=y
193CONFIG_NFS_V4=y
194CONFIG_NFSD=m
195CONFIG_NFSD_V3_ACL=y
196CONFIG_NFSD_V4=y
197CONFIG_RPCSEC_GSS_SPKM3=m
198CONFIG_CIFS=m
199CONFIG_CIFS_XATTR=y
200CONFIG_CIFS_POSIX=y
201CONFIG_NLS_CODEPAGE_437=y
202CONFIG_NLS_ASCII=y
203CONFIG_NLS_ISO8859_1=y
204CONFIG_DLM=m
205CONFIG_CRC_T10DIF=y
206CONFIG_MAGIC_SYSRQ=y
207CONFIG_DEBUG_FS=y
208CONFIG_DEBUG_KERNEL=y
209# CONFIG_RCU_CPU_STALL_DETECTOR is not set
210CONFIG_LATENCYTOP=y
211CONFIG_SYSCTL_SYSCALL_CHECK=y
212CONFIG_DEBUG_STACKOVERFLOW=y
213CONFIG_DEBUG_STACK_USAGE=y
214CONFIG_CRYPTO_NULL=m
215CONFIG_CRYPTO_TEST=m
216CONFIG_CRYPTO_ECB=m
217CONFIG_CRYPTO_PCBC=m
218CONFIG_CRYPTO_HMAC=y
219CONFIG_CRYPTO_MD4=m
220CONFIG_CRYPTO_MICHAEL_MIC=m
221CONFIG_CRYPTO_SHA256=m
222CONFIG_CRYPTO_SHA512=m
223CONFIG_CRYPTO_TGR192=m
224CONFIG_CRYPTO_WP512=m
225CONFIG_CRYPTO_AES=m
226CONFIG_CRYPTO_ANUBIS=m
227CONFIG_CRYPTO_ARC4=m
228CONFIG_CRYPTO_BLOWFISH=m
229CONFIG_CRYPTO_CAST6=m
230CONFIG_CRYPTO_KHAZAD=m
231CONFIG_CRYPTO_SEED=m
232CONFIG_CRYPTO_SERPENT=m
233CONFIG_CRYPTO_TEA=m
234CONFIG_CRYPTO_TWOFISH=m
235# CONFIG_CRYPTO_ANSI_CPRNG is not set
236# CONFIG_CRYPTO_HW is not set
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig
index 2a1320fb2723..6640a35bebb7 100644
--- a/arch/powerpc/configs/mpc5200_defconfig
+++ b/arch/powerpc/configs/mpc5200_defconfig
@@ -1,8 +1,8 @@
1CONFIG_EXPERIMENTAL=y 1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 2CONFIG_SYSVIPC=y
3CONFIG_SPARSE_IRQ=y
3CONFIG_LOG_BUF_SHIFT=14 4CONFIG_LOG_BUF_SHIFT=14
4CONFIG_BLK_DEV_INITRD=y 5CONFIG_BLK_DEV_INITRD=y
5# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
6CONFIG_MODULES=y 6CONFIG_MODULES=y
7CONFIG_MODULE_UNLOAD=y 7CONFIG_MODULE_UNLOAD=y
8# CONFIG_BLK_DEV_BSG is not set 8# CONFIG_BLK_DEV_BSG is not set
@@ -13,15 +13,12 @@ CONFIG_PPC_EFIKA=y
13CONFIG_PPC_LITE5200=y 13CONFIG_PPC_LITE5200=y
14CONFIG_PPC_MEDIA5200=y 14CONFIG_PPC_MEDIA5200=y
15CONFIG_PPC_MPC5200_BUGFIX=y 15CONFIG_PPC_MPC5200_BUGFIX=y
16CONFIG_PPC_MPC5200_GPIO=y
17CONFIG_PPC_MPC5200_LPBFIFO=m 16CONFIG_PPC_MPC5200_LPBFIFO=m
18# CONFIG_PPC_PMAC is not set 17# CONFIG_PPC_PMAC is not set
19CONFIG_PPC_BESTCOMM=y 18CONFIG_PPC_BESTCOMM=y
20CONFIG_SIMPLE_GPIO=y 19CONFIG_SIMPLE_GPIO=y
21CONFIG_NO_HZ=y 20CONFIG_NO_HZ=y
22CONFIG_HIGH_RES_TIMERS=y 21CONFIG_HIGH_RES_TIMERS=y
23CONFIG_SPARSE_IRQ=y
24CONFIG_PM=y
25CONFIG_NET=y 22CONFIG_NET=y
26CONFIG_PACKET=y 23CONFIG_PACKET=y
27CONFIG_UNIX=y 24CONFIG_UNIX=y
@@ -36,23 +33,20 @@ CONFIG_SYN_COOKIES=y
36# CONFIG_IPV6 is not set 33# CONFIG_IPV6 is not set
37CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 34CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
38CONFIG_MTD=y 35CONFIG_MTD=y
39CONFIG_MTD_CONCAT=y
40CONFIG_MTD_PARTITIONS=y
41CONFIG_MTD_CMDLINE_PARTS=y 36CONFIG_MTD_CMDLINE_PARTS=y
42CONFIG_MTD_OF_PARTS=y 37CONFIG_MTD_OF_PARTS=y
43CONFIG_MTD_CHAR=y 38CONFIG_MTD_CHAR=y
44CONFIG_MTD_BLOCK=y 39CONFIG_MTD_BLOCK=y
45CONFIG_MTD_CFI=y 40CONFIG_MTD_CFI=y
46CONFIG_MTD_CFI_AMDSTD=y 41CONFIG_MTD_CFI_AMDSTD=y
47CONFIG_MTD_RAM=y
48CONFIG_MTD_ROM=y 42CONFIG_MTD_ROM=y
49CONFIG_MTD_PHYSMAP_OF=y 43CONFIG_MTD_PHYSMAP_OF=y
44CONFIG_MTD_PLATRAM=y
50CONFIG_MTD_UBI=m 45CONFIG_MTD_UBI=m
51CONFIG_PROC_DEVICETREE=y 46CONFIG_PROC_DEVICETREE=y
52CONFIG_BLK_DEV_LOOP=y 47CONFIG_BLK_DEV_LOOP=y
53CONFIG_BLK_DEV_RAM=y 48CONFIG_BLK_DEV_RAM=y
54CONFIG_BLK_DEV_RAM_SIZE=32768 49CONFIG_BLK_DEV_RAM_SIZE=32768
55CONFIG_MISC_DEVICES=y
56CONFIG_EEPROM_AT24=y 50CONFIG_EEPROM_AT24=y
57CONFIG_SCSI_TGT=y 51CONFIG_SCSI_TGT=y
58CONFIG_BLK_DEV_SD=y 52CONFIG_BLK_DEV_SD=y
@@ -61,11 +55,10 @@ CONFIG_ATA=y
61CONFIG_PATA_MPC52xx=y 55CONFIG_PATA_MPC52xx=y
62CONFIG_PATA_PLATFORM=y 56CONFIG_PATA_PLATFORM=y
63CONFIG_NETDEVICES=y 57CONFIG_NETDEVICES=y
64CONFIG_LXT_PHY=y
65CONFIG_NET_ETHERNET=y
66CONFIG_FEC_MPC52xx=y 58CONFIG_FEC_MPC52xx=y
67# CONFIG_NETDEV_1000 is not set 59CONFIG_AMD_PHY=y
68# CONFIG_NETDEV_10000 is not set 60CONFIG_LXT_PHY=y
61CONFIG_FIXED_PHY=y
69# CONFIG_INPUT_KEYBOARD is not set 62# CONFIG_INPUT_KEYBOARD is not set
70# CONFIG_INPUT_MOUSE is not set 63# CONFIG_INPUT_MOUSE is not set
71# CONFIG_SERIO is not set 64# CONFIG_SERIO is not set
@@ -80,11 +73,17 @@ CONFIG_SPI_GPIO=m
80CONFIG_SPI_MPC52xx=m 73CONFIG_SPI_MPC52xx=m
81CONFIG_SPI_MPC52xx_PSC=m 74CONFIG_SPI_MPC52xx_PSC=m
82CONFIG_SPI_SPIDEV=m 75CONFIG_SPI_SPIDEV=m
76CONFIG_GPIO_SYSFS=y
77CONFIG_SENSORS_LM80=y
78CONFIG_SENSORS_LM87=m
83CONFIG_WATCHDOG=y 79CONFIG_WATCHDOG=y
80CONFIG_MFD_SM501=m
84CONFIG_DRM=y 81CONFIG_DRM=y
85CONFIG_VIDEO_OUTPUT_CONTROL=y 82CONFIG_VIDEO_OUTPUT_CONTROL=y
86CONFIG_FB=y 83CONFIG_FB=y
84CONFIG_FB_FOREIGN_ENDIAN=y
87CONFIG_FB_RADEON=y 85CONFIG_FB_RADEON=y
86CONFIG_FB_SM501=m
88# CONFIG_VGA_CONSOLE is not set 87# CONFIG_VGA_CONSOLE is not set
89CONFIG_FRAMEBUFFER_CONSOLE=y 88CONFIG_FRAMEBUFFER_CONSOLE=y
90CONFIG_LOGO=y 89CONFIG_LOGO=y
@@ -124,10 +123,11 @@ CONFIG_USB_STORAGE=y
124CONFIG_NEW_LEDS=y 123CONFIG_NEW_LEDS=y
125CONFIG_RTC_CLASS=y 124CONFIG_RTC_CLASS=y
126CONFIG_RTC_DRV_DS1307=y 125CONFIG_RTC_DRV_DS1307=y
126CONFIG_RTC_DRV_DS1374=y
127CONFIG_RTC_DRV_PCF8563=m
127CONFIG_EXT2_FS=y 128CONFIG_EXT2_FS=y
128CONFIG_EXT3_FS=y 129CONFIG_EXT3_FS=y
129# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 130# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
130CONFIG_INOTIFY=y
131CONFIG_MSDOS_FS=y 131CONFIG_MSDOS_FS=y
132CONFIG_VFAT_FS=y 132CONFIG_VFAT_FS=y
133CONFIG_PROC_KCORE=y 133CONFIG_PROC_KCORE=y
@@ -145,5 +145,4 @@ CONFIG_PRINTK_TIME=y
145CONFIG_DEBUG_KERNEL=y 145CONFIG_DEBUG_KERNEL=y
146CONFIG_DETECT_HUNG_TASK=y 146CONFIG_DETECT_HUNG_TASK=y
147CONFIG_DEBUG_INFO=y 147CONFIG_DEBUG_INFO=y
148# CONFIG_RCU_CPU_STALL_DETECTOR is not set
149# CONFIG_CRYPTO_ANSI_CPRNG is not set 148# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index f37a2ab48881..5fb0c8a94811 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -1,4 +1,5 @@
1CONFIG_PPC_85xx=y 1CONFIG_PPC_85xx=y
2CONFIG_PHYS_64BIT=y
2CONFIG_EXPERIMENTAL=y 3CONFIG_EXPERIMENTAL=y
3CONFIG_SYSVIPC=y 4CONFIG_SYSVIPC=y
4CONFIG_POSIX_MQUEUE=y 5CONFIG_POSIX_MQUEUE=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index abdcd317cda7..fb51bc90edd2 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -1,4 +1,5 @@
1CONFIG_PPC_85xx=y 1CONFIG_PPC_85xx=y
2CONFIG_PHYS_64BIT=y
2CONFIG_SMP=y 3CONFIG_SMP=y
3CONFIG_NR_CPUS=8 4CONFIG_NR_CPUS=8
4CONFIG_EXPERIMENTAL=y 5CONFIG_EXPERIMENTAL=y
diff --git a/arch/powerpc/configs/ppc64_defconfig b/arch/powerpc/configs/ppc64_defconfig
index 2156e077859b..1acf65026773 100644
--- a/arch/powerpc/configs/ppc64_defconfig
+++ b/arch/powerpc/configs/ppc64_defconfig
@@ -24,10 +24,6 @@ CONFIG_PPC_SPLPAR=y
24CONFIG_SCANLOG=m 24CONFIG_SCANLOG=m
25CONFIG_PPC_SMLPAR=y 25CONFIG_PPC_SMLPAR=y
26CONFIG_DTL=y 26CONFIG_DTL=y
27CONFIG_PPC_ISERIES=y
28CONFIG_VIODASD=y
29CONFIG_VIOCD=m
30CONFIG_VIOTAPE=m
31CONFIG_PPC_MAPLE=y 27CONFIG_PPC_MAPLE=y
32CONFIG_PPC_PASEMI=y 28CONFIG_PPC_PASEMI=y
33CONFIG_PPC_PASEMI_IOMMU=y 29CONFIG_PPC_PASEMI_IOMMU=y
@@ -259,7 +255,6 @@ CONFIG_PASEMI_MAC=y
259CONFIG_MLX4_EN=m 255CONFIG_MLX4_EN=m
260CONFIG_QLGE=m 256CONFIG_QLGE=m
261CONFIG_BE2NET=m 257CONFIG_BE2NET=m
262CONFIG_ISERIES_VETH=m
263CONFIG_PPP=m 258CONFIG_PPP=m
264CONFIG_PPP_ASYNC=m 259CONFIG_PPP_ASYNC=m
265CONFIG_PPP_SYNC_TTY=m 260CONFIG_PPP_SYNC_TTY=m
diff --git a/arch/powerpc/include/asm/abs_addr.h b/arch/powerpc/include/asm/abs_addr.h
index 5ab0b71531be..9d92ba04b033 100644
--- a/arch/powerpc/include/asm/abs_addr.h
+++ b/arch/powerpc/include/asm/abs_addr.h
@@ -17,7 +17,6 @@
17#include <asm/types.h> 17#include <asm/types.h>
18#include <asm/page.h> 18#include <asm/page.h>
19#include <asm/prom.h> 19#include <asm/prom.h>
20#include <asm/firmware.h>
21 20
22struct mschunks_map { 21struct mschunks_map {
23 unsigned long num_chunks; 22 unsigned long num_chunks;
@@ -46,30 +45,12 @@ static inline unsigned long addr_to_chunk(unsigned long addr)
46 45
47static inline unsigned long phys_to_abs(unsigned long pa) 46static inline unsigned long phys_to_abs(unsigned long pa)
48{ 47{
49 unsigned long chunk; 48 return pa;
50
51 /* This is a no-op on non-iSeries */
52 if (!firmware_has_feature(FW_FEATURE_ISERIES))
53 return pa;
54
55 chunk = addr_to_chunk(pa);
56
57 if (chunk < mschunks_map.num_chunks)
58 chunk = mschunks_map.mapping[chunk];
59
60 return chunk_to_addr(chunk) + (pa & MSCHUNKS_OFFSET_MASK);
61} 49}
62 50
63/* Convenience macros */ 51/* Convenience macros */
64#define virt_to_abs(va) phys_to_abs(__pa(va)) 52#define virt_to_abs(va) phys_to_abs(__pa(va))
65#define abs_to_virt(aa) __va(aa) 53#define abs_to_virt(aa) __va(aa)
66 54
67/*
68 * Converts Virtual Address to Real Address for
69 * Legacy iSeries Hypervisor calls
70 */
71#define iseries_hv_addr(virtaddr) \
72 (0x8000000000000000UL | virt_to_abs(virtaddr))
73
74#endif /* __KERNEL__ */ 55#endif /* __KERNEL__ */
75#endif /* _ASM_POWERPC_ABS_ADDR_H */ 56#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
index 02e41b53488d..14174e838ad9 100644
--- a/arch/powerpc/include/asm/atomic.h
+++ b/arch/powerpc/include/asm/atomic.h
@@ -212,6 +212,36 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
212 return t; 212 return t;
213} 213}
214 214
215/**
216 * atomic_inc_not_zero - increment unless the number is zero
217 * @v: pointer of type atomic_t
218 *
219 * Atomically increments @v by 1, so long as @v is non-zero.
220 * Returns non-zero if @v was non-zero, and zero otherwise.
221 */
222static __inline__ int atomic_inc_not_zero(atomic_t *v)
223{
224 int t1, t2;
225
226 __asm__ __volatile__ (
227 PPC_ATOMIC_ENTRY_BARRIER
228"1: lwarx %0,0,%2 # atomic_inc_not_zero\n\
229 cmpwi 0,%0,0\n\
230 beq- 2f\n\
231 addic %1,%0,1\n"
232 PPC405_ERR77(0,%2)
233" stwcx. %1,0,%2\n\
234 bne- 1b\n"
235 PPC_ATOMIC_EXIT_BARRIER
236 "\n\
2372:"
238 : "=&r" (t1), "=&r" (t2)
239 : "r" (&v->counter)
240 : "cc", "xer", "memory");
241
242 return t1;
243}
244#define atomic_inc_not_zero(v) atomic_inc_not_zero((v))
215 245
216#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0) 246#define atomic_sub_and_test(a, v) (atomic_sub_return((a), (v)) == 0)
217#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0) 247#define atomic_dec_and_test(v) (atomic_dec_return((v)) == 0)
@@ -467,7 +497,34 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
467 return t != u; 497 return t != u;
468} 498}
469 499
470#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) 500/**
501 * atomic_inc64_not_zero - increment unless the number is zero
502 * @v: pointer of type atomic64_t
503 *
504 * Atomically increments @v by 1, so long as @v is non-zero.
505 * Returns non-zero if @v was non-zero, and zero otherwise.
506 */
507static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
508{
509 long t1, t2;
510
511 __asm__ __volatile__ (
512 PPC_ATOMIC_ENTRY_BARRIER
513"1: ldarx %0,0,%2 # atomic64_inc_not_zero\n\
514 cmpdi 0,%0,0\n\
515 beq- 2f\n\
516 addic %1,%0,1\n\
517 stdcx. %1,0,%2\n\
518 bne- 1b\n"
519 PPC_ATOMIC_EXIT_BARRIER
520 "\n\
5212:"
522 : "=&r" (t1), "=&r" (t2)
523 : "r" (&v->counter)
524 : "cc", "xer", "memory");
525
526 return t1;
527}
471 528
472#endif /* __powerpc64__ */ 529#endif /* __powerpc64__ */
473 530
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index ad55a1ccb9fb..b9219e99bd2a 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -390,6 +390,10 @@ extern const char *powerpc_base_platform;
390 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 390 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
391 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 391 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
392 CPU_FTR_DEBUG_LVL_EXC) 392 CPU_FTR_DEBUG_LVL_EXC)
393#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
394 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
395 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
396 CPU_FTR_DEBUG_LVL_EXC)
393#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 397#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
394 398
395/* 64-bit CPUs */ 399/* 64-bit CPUs */
@@ -442,7 +446,7 @@ extern const char *powerpc_base_platform;
442 446
443#ifdef __powerpc64__ 447#ifdef __powerpc64__
444#ifdef CONFIG_PPC_BOOK3E 448#ifdef CONFIG_PPC_BOOK3E
445#define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500 | CPU_FTRS_A2) 449#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500 | CPU_FTRS_A2)
446#else 450#else
447#define CPU_FTRS_POSSIBLE \ 451#define CPU_FTRS_POSSIBLE \
448 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ 452 (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \
@@ -483,7 +487,7 @@ enum {
483#endif 487#endif
484#ifdef CONFIG_E500 488#ifdef CONFIG_E500
485 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | 489 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC |
486 CPU_FTRS_E5500 | 490 CPU_FTRS_E5500 | CPU_FTRS_E6500 |
487#endif 491#endif
488 0, 492 0,
489}; 493};
@@ -491,7 +495,7 @@ enum {
491 495
492#ifdef __powerpc64__ 496#ifdef __powerpc64__
493#ifdef CONFIG_PPC_BOOK3E 497#ifdef CONFIG_PPC_BOOK3E
494#define CPU_FTRS_ALWAYS (CPU_FTRS_E5500 & CPU_FTRS_A2) 498#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500 & CPU_FTRS_A2)
495#else 499#else
496#define CPU_FTRS_ALWAYS \ 500#define CPU_FTRS_ALWAYS \
497 (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ 501 (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \
@@ -528,7 +532,7 @@ enum {
528#endif 532#endif
529#ifdef CONFIG_E500 533#ifdef CONFIG_E500
530 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & 534 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC &
531 CPU_FTRS_E5500 & 535 CPU_FTRS_E5500 & CPU_FTRS_E6500 &
532#endif 536#endif
533 CPU_FTRS_POSSIBLE, 537 CPU_FTRS_POSSIBLE,
534}; 538};
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h
index d57c08acedfc..63d5ca49cece 100644
--- a/arch/powerpc/include/asm/device.h
+++ b/arch/powerpc/include/asm/device.h
@@ -31,6 +31,9 @@ struct dev_archdata {
31#ifdef CONFIG_SWIOTLB 31#ifdef CONFIG_SWIOTLB
32 dma_addr_t max_direct_dma_addr; 32 dma_addr_t max_direct_dma_addr;
33#endif 33#endif
34#ifdef CONFIG_EEH
35 struct eeh_dev *edev;
36#endif
34}; 37};
35 38
36struct pdev_archdata { 39struct pdev_archdata {
diff --git a/arch/powerpc/include/asm/dma.h b/arch/powerpc/include/asm/dma.h
index a7e06e25c708..adadb9943610 100644
--- a/arch/powerpc/include/asm/dma.h
+++ b/arch/powerpc/include/asm/dma.h
@@ -34,8 +34,6 @@
34/* Doesn't really apply... */ 34/* Doesn't really apply... */
35#define MAX_DMA_ADDRESS (~0UL) 35#define MAX_DMA_ADDRESS (~0UL)
36 36
37#if !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI)
38
39#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER 37#ifdef HAVE_REALLY_SLOW_DMA_CONTROLLER
40#define dma_outb outb_p 38#define dma_outb outb_p
41#else 39#else
@@ -354,7 +352,5 @@ extern int isa_dma_bridge_buggy;
354#define isa_dma_bridge_buggy (0) 352#define isa_dma_bridge_buggy (0)
355#endif 353#endif
356 354
357#endif /* !defined(CONFIG_PPC_ISERIES) || defined(CONFIG_PCI) */
358
359#endif /* __KERNEL__ */ 355#endif /* __KERNEL__ */
360#endif /* _ASM_POWERPC_DMA_H */ 356#endif /* _ASM_POWERPC_DMA_H */
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 66ea9b8b95c5..d60f99814ffb 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * eeh.h
3 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation. 2 * Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
3 * Copyright 2001-2012 IBM Corporation.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -31,44 +31,105 @@ struct device_node;
31 31
32#ifdef CONFIG_EEH 32#ifdef CONFIG_EEH
33 33
34extern int eeh_subsystem_enabled; 34/*
35 * The struct is used to trace EEH state for the associated
36 * PCI device node or PCI device. In future, it might
37 * represent PE as well so that the EEH device to form
38 * another tree except the currently existing tree of PCI
39 * buses and PCI devices
40 */
41#define EEH_MODE_SUPPORTED (1<<0) /* EEH supported on the device */
42#define EEH_MODE_NOCHECK (1<<1) /* EEH check should be skipped */
43#define EEH_MODE_ISOLATED (1<<2) /* The device has been isolated */
44#define EEH_MODE_RECOVERING (1<<3) /* Recovering the device */
45#define EEH_MODE_IRQ_DISABLED (1<<4) /* Interrupt disabled */
46
47struct eeh_dev {
48 int mode; /* EEH mode */
49 int class_code; /* Class code of the device */
50 int config_addr; /* Config address */
51 int pe_config_addr; /* PE config address */
52 int check_count; /* Times of ignored error */
53 int freeze_count; /* Times of froze up */
54 int false_positives; /* Times of reported #ff's */
55 u32 config_space[16]; /* Saved PCI config space */
56 struct pci_controller *phb; /* Associated PHB */
57 struct device_node *dn; /* Associated device node */
58 struct pci_dev *pdev; /* Associated PCI device */
59};
60
61static inline struct device_node *eeh_dev_to_of_node(struct eeh_dev *edev)
62{
63 return edev->dn;
64}
65
66static inline struct pci_dev *eeh_dev_to_pci_dev(struct eeh_dev *edev)
67{
68 return edev->pdev;
69}
35 70
36/* Values for eeh_mode bits in device_node */ 71/*
37#define EEH_MODE_SUPPORTED (1<<0) 72 * The struct is used to trace the registered EEH operation
38#define EEH_MODE_NOCHECK (1<<1) 73 * callback functions. Actually, those operation callback
39#define EEH_MODE_ISOLATED (1<<2) 74 * functions are heavily platform dependent. That means the
40#define EEH_MODE_RECOVERING (1<<3) 75 * platform should register its own EEH operation callback
41#define EEH_MODE_IRQ_DISABLED (1<<4) 76 * functions before any EEH further operations.
77 */
78#define EEH_OPT_DISABLE 0 /* EEH disable */
79#define EEH_OPT_ENABLE 1 /* EEH enable */
80#define EEH_OPT_THAW_MMIO 2 /* MMIO enable */
81#define EEH_OPT_THAW_DMA 3 /* DMA enable */
82#define EEH_STATE_UNAVAILABLE (1 << 0) /* State unavailable */
83#define EEH_STATE_NOT_SUPPORT (1 << 1) /* EEH not supported */
84#define EEH_STATE_RESET_ACTIVE (1 << 2) /* Active reset */
85#define EEH_STATE_MMIO_ACTIVE (1 << 3) /* Active MMIO */
86#define EEH_STATE_DMA_ACTIVE (1 << 4) /* Active DMA */
87#define EEH_STATE_MMIO_ENABLED (1 << 5) /* MMIO enabled */
88#define EEH_STATE_DMA_ENABLED (1 << 6) /* DMA enabled */
89#define EEH_RESET_DEACTIVATE 0 /* Deactivate the PE reset */
90#define EEH_RESET_HOT 1 /* Hot reset */
91#define EEH_RESET_FUNDAMENTAL 3 /* Fundamental reset */
92#define EEH_LOG_TEMP 1 /* EEH temporary error log */
93#define EEH_LOG_PERM 2 /* EEH permanent error log */
94
95struct eeh_ops {
96 char *name;
97 int (*init)(void);
98 int (*set_option)(struct device_node *dn, int option);
99 int (*get_pe_addr)(struct device_node *dn);
100 int (*get_state)(struct device_node *dn, int *state);
101 int (*reset)(struct device_node *dn, int option);
102 int (*wait_state)(struct device_node *dn, int max_wait);
103 int (*get_log)(struct device_node *dn, int severity, char *drv_log, unsigned long len);
104 int (*configure_bridge)(struct device_node *dn);
105 int (*read_config)(struct device_node *dn, int where, int size, u32 *val);
106 int (*write_config)(struct device_node *dn, int where, int size, u32 val);
107};
108
109extern struct eeh_ops *eeh_ops;
110extern int eeh_subsystem_enabled;
42 111
43/* Max number of EEH freezes allowed before we consider the device 112/*
44 * to be permanently disabled. */ 113 * Max number of EEH freezes allowed before we consider the device
114 * to be permanently disabled.
115 */
45#define EEH_MAX_ALLOWED_FREEZES 5 116#define EEH_MAX_ALLOWED_FREEZES 5
46 117
118void * __devinit eeh_dev_init(struct device_node *dn, void *data);
119void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb);
120void __init eeh_dev_phb_init(void);
47void __init eeh_init(void); 121void __init eeh_init(void);
122#ifdef CONFIG_PPC_PSERIES
123int __init eeh_pseries_init(void);
124#endif
125int __init eeh_ops_register(struct eeh_ops *ops);
126int __exit eeh_ops_unregister(const char *name);
48unsigned long eeh_check_failure(const volatile void __iomem *token, 127unsigned long eeh_check_failure(const volatile void __iomem *token,
49 unsigned long val); 128 unsigned long val);
50int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev); 129int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev);
51void __init pci_addr_cache_build(void); 130void __init pci_addr_cache_build(void);
52
53/**
54 * eeh_add_device_early
55 * eeh_add_device_late
56 *
57 * Perform eeh initialization for devices added after boot.
58 * Call eeh_add_device_early before doing any i/o to the
59 * device (including config space i/o). Call eeh_add_device_late
60 * to finish the eeh setup for this device.
61 */
62void eeh_add_device_tree_early(struct device_node *); 131void eeh_add_device_tree_early(struct device_node *);
63void eeh_add_device_tree_late(struct pci_bus *); 132void eeh_add_device_tree_late(struct pci_bus *);
64
65/**
66 * eeh_remove_device_recursive - undo EEH for device & children.
67 * @dev: pci device to be removed
68 *
69 * As above, this removes the device; it also removes child
70 * pci devices as well.
71 */
72void eeh_remove_bus_device(struct pci_dev *); 133void eeh_remove_bus_device(struct pci_dev *);
73 134
74/** 135/**
@@ -87,8 +148,25 @@ void eeh_remove_bus_device(struct pci_dev *);
87#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8)) 148#define EEH_IO_ERROR_VALUE(size) (~0U >> ((4 - (size)) * 8))
88 149
89#else /* !CONFIG_EEH */ 150#else /* !CONFIG_EEH */
151
152static inline void *eeh_dev_init(struct device_node *dn, void *data)
153{
154 return NULL;
155}
156
157static inline void eeh_dev_phb_init_dynamic(struct pci_controller *phb) { }
158
159static inline void eeh_dev_phb_init(void) { }
160
90static inline void eeh_init(void) { } 161static inline void eeh_init(void) { }
91 162
163#ifdef CONFIG_PPC_PSERIES
164static inline int eeh_pseries_init(void)
165{
166 return 0;
167}
168#endif /* CONFIG_PPC_PSERIES */
169
92static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val) 170static inline unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned long val)
93{ 171{
94 return val; 172 return val;
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
index cc3cb04539ac..c68b012b7797 100644
--- a/arch/powerpc/include/asm/eeh_event.h
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -1,6 +1,4 @@
1/* 1/*
2 * eeh_event.h
3 *
4 * This program is free software; you can redistribute it and/or modify 2 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 3 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 4 * the Free Software Foundation; either version 2 of the License, or
@@ -22,32 +20,19 @@
22#define ASM_POWERPC_EEH_EVENT_H 20#define ASM_POWERPC_EEH_EVENT_H
23#ifdef __KERNEL__ 21#ifdef __KERNEL__
24 22
25/** EEH event -- structure holding pci controller data that describes 23/*
26 * a change in the isolation status of a PCI slot. A pointer 24 * structure holding pci controller data that describes a
27 * to this struct is passed as the data pointer in a notify callback. 25 * change in the isolation status of a PCI slot. A pointer
26 * to this struct is passed as the data pointer in a notify
27 * callback.
28 */ 28 */
29struct eeh_event { 29struct eeh_event {
30 struct list_head list; 30 struct list_head list; /* to form event queue */
31 struct device_node *dn; /* struct device node */ 31 struct eeh_dev *edev; /* EEH device */
32 struct pci_dev *dev; /* affected device */
33}; 32};
34 33
35/** 34int eeh_send_failure_event(struct eeh_dev *edev);
36 * eeh_send_failure_event - generate a PCI error event 35struct eeh_dev *handle_eeh_events(struct eeh_event *);
37 * @dev pci device
38 *
39 * This routine builds a PCI error event which will be delivered
40 * to all listeners on the eeh_notifier_chain.
41 *
42 * This routine can be called within an interrupt context;
43 * the actual event will be delivered in a normal context
44 * (from a workqueue).
45 */
46int eeh_send_failure_event (struct device_node *dn,
47 struct pci_dev *dev);
48
49/* Main recovery function */
50struct pci_dn * handle_eeh_events (struct eeh_event *);
51 36
52#endif /* __KERNEL__ */ 37#endif /* __KERNEL__ */
53#endif /* ASM_POWERPC_EEH_EVENT_H */ 38#endif /* ASM_POWERPC_EEH_EVENT_H */
diff --git a/arch/powerpc/include/asm/ehv_pic.h b/arch/powerpc/include/asm/ehv_pic.h
index a9e1f4f796f6..dc7d48e3ea90 100644
--- a/arch/powerpc/include/asm/ehv_pic.h
+++ b/arch/powerpc/include/asm/ehv_pic.h
@@ -25,7 +25,7 @@
25 25
26struct ehv_pic { 26struct ehv_pic {
27 /* The remapper for this EHV_PIC */ 27 /* The remapper for this EHV_PIC */
28 struct irq_host *irqhost; 28 struct irq_domain *irqhost;
29 29
30 /* The "linux" controller struct */ 30 /* The "linux" controller struct */
31 struct irq_chip hc_irq; 31 struct irq_chip hc_irq;
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h
index 8057f4f6980f..548da3aa0a30 100644
--- a/arch/powerpc/include/asm/exception-64s.h
+++ b/arch/powerpc/include/asm/exception-64s.h
@@ -232,23 +232,30 @@ label##_hv: \
232 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \ 232 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, \
233 EXC_HV, KVMTEST, vec) 233 EXC_HV, KVMTEST, vec)
234 234
235#define __SOFTEN_TEST(h) \ 235/* This associate vector numbers with bits in paca->irq_happened */
236#define SOFTEN_VALUE_0x500 PACA_IRQ_EE
237#define SOFTEN_VALUE_0x502 PACA_IRQ_EE
238#define SOFTEN_VALUE_0x900 PACA_IRQ_DEC
239#define SOFTEN_VALUE_0x982 PACA_IRQ_DEC
240
241#define __SOFTEN_TEST(h, vec) \
236 lbz r10,PACASOFTIRQEN(r13); \ 242 lbz r10,PACASOFTIRQEN(r13); \
237 cmpwi r10,0; \ 243 cmpwi r10,0; \
244 li r10,SOFTEN_VALUE_##vec; \
238 beq masked_##h##interrupt 245 beq masked_##h##interrupt
239#define _SOFTEN_TEST(h) __SOFTEN_TEST(h) 246#define _SOFTEN_TEST(h, vec) __SOFTEN_TEST(h, vec)
240 247
241#define SOFTEN_TEST_PR(vec) \ 248#define SOFTEN_TEST_PR(vec) \
242 KVMTEST_PR(vec); \ 249 KVMTEST_PR(vec); \
243 _SOFTEN_TEST(EXC_STD) 250 _SOFTEN_TEST(EXC_STD, vec)
244 251
245#define SOFTEN_TEST_HV(vec) \ 252#define SOFTEN_TEST_HV(vec) \
246 KVMTEST(vec); \ 253 KVMTEST(vec); \
247 _SOFTEN_TEST(EXC_HV) 254 _SOFTEN_TEST(EXC_HV, vec)
248 255
249#define SOFTEN_TEST_HV_201(vec) \ 256#define SOFTEN_TEST_HV_201(vec) \
250 KVMTEST(vec); \ 257 KVMTEST(vec); \
251 _SOFTEN_TEST(EXC_STD) 258 _SOFTEN_TEST(EXC_STD, vec)
252 259
253#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \ 260#define __MASKABLE_EXCEPTION_PSERIES(vec, label, h, extra) \
254 HMT_MEDIUM; \ 261 HMT_MEDIUM; \
@@ -272,73 +279,55 @@ label##_hv: \
272 _MASKABLE_EXCEPTION_PSERIES(vec, label, \ 279 _MASKABLE_EXCEPTION_PSERIES(vec, label, \
273 EXC_HV, SOFTEN_TEST_HV) 280 EXC_HV, SOFTEN_TEST_HV)
274 281
275#ifdef CONFIG_PPC_ISERIES 282/*
276#define DISABLE_INTS \ 283 * Our exception common code can be passed various "additions"
277 li r11,0; \ 284 * to specify the behaviour of interrupts, whether to kick the
278 stb r11,PACASOFTIRQEN(r13); \ 285 * runlatch, etc...
279BEGIN_FW_FTR_SECTION; \ 286 */
280 stb r11,PACAHARDIRQEN(r13); \ 287
281END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES); \ 288/* Exception addition: Hard disable interrupts */
282 TRACE_DISABLE_INTS; \ 289#define DISABLE_INTS SOFT_DISABLE_INTS(r10,r11)
283BEGIN_FW_FTR_SECTION; \
284 mfmsr r10; \
285 ori r10,r10,MSR_EE; \
286 mtmsrd r10,1; \
287END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
288#else
289#define DISABLE_INTS \
290 li r11,0; \
291 stb r11,PACASOFTIRQEN(r13); \
292 stb r11,PACAHARDIRQEN(r13); \
293 TRACE_DISABLE_INTS
294#endif /* CONFIG_PPC_ISERIES */
295 290
291/* Exception addition: Keep interrupt state */
296#define ENABLE_INTS \ 292#define ENABLE_INTS \
293 ld r11,PACAKMSR(r13); \
297 ld r12,_MSR(r1); \ 294 ld r12,_MSR(r1); \
298 mfmsr r11; \
299 rlwimi r11,r12,0,MSR_EE; \ 295 rlwimi r11,r12,0,MSR_EE; \
300 mtmsrd r11,1 296 mtmsrd r11,1
301 297
302#define STD_EXCEPTION_COMMON(trap, label, hdlr) \ 298#define ADD_NVGPRS \
303 .align 7; \ 299 bl .save_nvgprs
304 .globl label##_common; \ 300
305label##_common: \ 301#define RUNLATCH_ON \
306 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \ 302BEGIN_FTR_SECTION \
307 DISABLE_INTS; \ 303 clrrdi r3,r1,THREAD_SHIFT; \
308 bl .save_nvgprs; \ 304 ld r4,TI_LOCAL_FLAGS(r3); \
309 addi r3,r1,STACK_FRAME_OVERHEAD; \ 305 andi. r0,r4,_TLF_RUNLATCH; \
310 bl hdlr; \ 306 beql ppc64_runlatch_on_trampoline; \
311 b .ret_from_except 307END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
308
309#define EXCEPTION_COMMON(trap, label, hdlr, ret, additions) \
310 .align 7; \
311 .globl label##_common; \
312label##_common: \
313 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
314 additions; \
315 addi r3,r1,STACK_FRAME_OVERHEAD; \
316 bl hdlr; \
317 b ret
318
319#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
320 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except, \
321 ADD_NVGPRS;DISABLE_INTS)
312 322
313/* 323/*
314 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur 324 * Like STD_EXCEPTION_COMMON, but for exceptions that can occur
315 * in the idle task and therefore need the special idle handling. 325 * in the idle task and therefore need the special idle handling
326 * (finish nap and runlatch)
316 */ 327 */
317#define STD_EXCEPTION_COMMON_IDLE(trap, label, hdlr) \ 328#define STD_EXCEPTION_COMMON_ASYNC(trap, label, hdlr) \
318 .align 7; \ 329 EXCEPTION_COMMON(trap, label, hdlr, ret_from_except_lite, \
319 .globl label##_common; \ 330 FINISH_NAP;RUNLATCH_ON;DISABLE_INTS)
320label##_common: \
321 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
322 FINISH_NAP; \
323 DISABLE_INTS; \
324 bl .save_nvgprs; \
325 addi r3,r1,STACK_FRAME_OVERHEAD; \
326 bl hdlr; \
327 b .ret_from_except
328
329#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
330 .align 7; \
331 .globl label##_common; \
332label##_common: \
333 EXCEPTION_PROLOG_COMMON(trap, PACA_EXGEN); \
334 FINISH_NAP; \
335 DISABLE_INTS; \
336BEGIN_FTR_SECTION \
337 bl .ppc64_runlatch_on; \
338END_FTR_SECTION_IFSET(CPU_FTR_CTRL) \
339 addi r3,r1,STACK_FRAME_OVERHEAD; \
340 bl hdlr; \
341 b .ret_from_except_lite
342 331
343/* 332/*
344 * When the idle code in power4_idle puts the CPU into NAP mode, 333 * When the idle code in power4_idle puts the CPU into NAP mode,
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
new file mode 100644
index 000000000000..88dbf9659185
--- /dev/null
+++ b/arch/powerpc/include/asm/fadump.h
@@ -0,0 +1,218 @@
1/*
2 * Firmware Assisted dump header file.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
17 *
18 * Copyright 2011 IBM Corporation
19 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
20 */
21
22#ifndef __PPC64_FA_DUMP_H__
23#define __PPC64_FA_DUMP_H__
24
25#ifdef CONFIG_FA_DUMP
26
27/*
28 * The RMA region will be saved for later dumping when kernel crashes.
29 * RMA is Real Mode Area, the first block of logical memory address owned
30 * by logical partition, containing the storage that may be accessed with
31 * translate off.
32 */
33#define RMA_START 0x0
34#define RMA_END (ppc64_rma_size)
35
36/*
37 * On some Power systems where RMO is 128MB, it still requires minimum of
38 * 256MB for kernel to boot successfully. When kdump infrastructure is
39 * configured to save vmcore over network, we run into OOM issue while
40 * loading modules related to network setup. Hence we need aditional 64M
41 * of memory to avoid OOM issue.
42 */
43#define MIN_BOOT_MEM (((RMA_END < (0x1UL << 28)) ? (0x1UL << 28) : RMA_END) \
44 + (0x1UL << 26))
45
46#define memblock_num_regions(memblock_type) (memblock.memblock_type.cnt)
47
48#ifndef ELF_CORE_EFLAGS
49#define ELF_CORE_EFLAGS 0
50#endif
51
52/* Firmware provided dump sections */
53#define FADUMP_CPU_STATE_DATA 0x0001
54#define FADUMP_HPTE_REGION 0x0002
55#define FADUMP_REAL_MODE_REGION 0x0011
56
57/* Dump request flag */
58#define FADUMP_REQUEST_FLAG 0x00000001
59
60/* FAD commands */
61#define FADUMP_REGISTER 1
62#define FADUMP_UNREGISTER 2
63#define FADUMP_INVALIDATE 3
64
65/* Dump status flag */
66#define FADUMP_ERROR_FLAG 0x2000
67
68#define FADUMP_CPU_ID_MASK ((1UL << 32) - 1)
69
70#define CPU_UNKNOWN (~((u32)0))
71
72/* Utility macros */
73#define SKIP_TO_NEXT_CPU(reg_entry) \
74({ \
75 while (reg_entry->reg_id != REG_ID("CPUEND")) \
76 reg_entry++; \
77 reg_entry++; \
78})
79
80/* Kernel Dump section info */
81struct fadump_section {
82 u32 request_flag;
83 u16 source_data_type;
84 u16 error_flags;
85 u64 source_address;
86 u64 source_len;
87 u64 bytes_dumped;
88 u64 destination_address;
89};
90
91/* ibm,configure-kernel-dump header. */
92struct fadump_section_header {
93 u32 dump_format_version;
94 u16 dump_num_sections;
95 u16 dump_status_flag;
96 u32 offset_first_dump_section;
97
98 /* Fields for disk dump option. */
99 u32 dd_block_size;
100 u64 dd_block_offset;
101 u64 dd_num_blocks;
102 u32 dd_offset_disk_path;
103
104 /* Maximum time allowed to prevent an automatic dump-reboot. */
105 u32 max_time_auto;
106};
107
108/*
109 * Firmware Assisted dump memory structure. This structure is required for
110 * registering future kernel dump with power firmware through rtas call.
111 *
112 * No disk dump option. Hence disk dump path string section is not included.
113 */
114struct fadump_mem_struct {
115 struct fadump_section_header header;
116
117 /* Kernel dump sections */
118 struct fadump_section cpu_state_data;
119 struct fadump_section hpte_region;
120 struct fadump_section rmr_region;
121};
122
123/* Firmware-assisted dump configuration details. */
124struct fw_dump {
125 unsigned long cpu_state_data_size;
126 unsigned long hpte_region_size;
127 unsigned long boot_memory_size;
128 unsigned long reserve_dump_area_start;
129 unsigned long reserve_dump_area_size;
130 /* cmd line option during boot */
131 unsigned long reserve_bootvar;
132
133 unsigned long fadumphdr_addr;
134 unsigned long cpu_notes_buf;
135 unsigned long cpu_notes_buf_size;
136
137 int ibm_configure_kernel_dump;
138
139 unsigned long fadump_enabled:1;
140 unsigned long fadump_supported:1;
141 unsigned long dump_active:1;
142 unsigned long dump_registered:1;
143};
144
145/*
146 * Copy the ascii values for first 8 characters from a string into u64
147 * variable at their respective indexes.
148 * e.g.
149 * The string "FADMPINF" will be converted into 0x4641444d50494e46
150 */
151static inline u64 str_to_u64(const char *str)
152{
153 u64 val = 0;
154 int i;
155
156 for (i = 0; i < sizeof(val); i++)
157 val = (*str) ? (val << 8) | *str++ : val << 8;
158 return val;
159}
160#define STR_TO_HEX(x) str_to_u64(x)
161#define REG_ID(x) str_to_u64(x)
162
163#define FADUMP_CRASH_INFO_MAGIC STR_TO_HEX("FADMPINF")
164#define REGSAVE_AREA_MAGIC STR_TO_HEX("REGSAVE")
165
166/* The firmware-assisted dump format.
167 *
168 * The register save area is an area in the partition's memory used to preserve
169 * the register contents (CPU state data) for the active CPUs during a firmware
170 * assisted dump. The dump format contains register save area header followed
171 * by register entries. Each list of registers for a CPU starts with
172 * "CPUSTRT" and ends with "CPUEND".
173 */
174
175/* Register save area header. */
176struct fadump_reg_save_area_header {
177 u64 magic_number;
178 u32 version;
179 u32 num_cpu_offset;
180};
181
182/* Register entry. */
183struct fadump_reg_entry {
184 u64 reg_id;
185 u64 reg_value;
186};
187
188/* fadump crash info structure */
189struct fadump_crash_info_header {
190 u64 magic_number;
191 u64 elfcorehdr_addr;
192 u32 crashing_cpu;
193 struct pt_regs regs;
194 struct cpumask cpu_online_mask;
195};
196
197/* Crash memory ranges */
198#define INIT_CRASHMEM_RANGES (INIT_MEMBLOCK_REGIONS + 2)
199
200struct fad_crash_memory_ranges {
201 unsigned long long base;
202 unsigned long long size;
203};
204
205extern int early_init_dt_scan_fw_dump(unsigned long node,
206 const char *uname, int depth, void *data);
207extern int fadump_reserve_mem(void);
208extern int setup_fadump(void);
209extern int is_fadump_active(void);
210extern void crash_fadump(struct pt_regs *, const char *);
211extern void fadump_cleanup(void);
212
213extern void vmcore_cleanup(void);
214#else /* CONFIG_FA_DUMP */
215static inline int is_fadump_active(void) { return 0; }
216static inline void crash_fadump(struct pt_regs *regs, const char *str) { }
217#endif
218#endif
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h
index 14db29b18d0e..ad0b751b0d78 100644
--- a/arch/powerpc/include/asm/firmware.h
+++ b/arch/powerpc/include/asm/firmware.h
@@ -41,7 +41,6 @@
41#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000) 41#define FW_FEATURE_XDABR ASM_CONST(0x0000000000040000)
42#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000) 42#define FW_FEATURE_MULTITCE ASM_CONST(0x0000000000080000)
43#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000) 43#define FW_FEATURE_SPLPAR ASM_CONST(0x0000000000100000)
44#define FW_FEATURE_ISERIES ASM_CONST(0x0000000000200000)
45#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000) 44#define FW_FEATURE_LPAR ASM_CONST(0x0000000000400000)
46#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) 45#define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000)
47#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) 46#define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000)
@@ -65,8 +64,6 @@ enum {
65 FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | 64 FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR |
66 FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO, 65 FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO,
67 FW_FEATURE_PSERIES_ALWAYS = 0, 66 FW_FEATURE_PSERIES_ALWAYS = 0,
68 FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
69 FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR,
70 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2, 67 FW_FEATURE_POWERNV_POSSIBLE = FW_FEATURE_OPAL | FW_FEATURE_OPALv2,
71 FW_FEATURE_POWERNV_ALWAYS = 0, 68 FW_FEATURE_POWERNV_ALWAYS = 0,
72 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1, 69 FW_FEATURE_PS3_POSSIBLE = FW_FEATURE_LPAR | FW_FEATURE_PS3_LV1,
@@ -79,9 +76,6 @@ enum {
79#ifdef CONFIG_PPC_PSERIES 76#ifdef CONFIG_PPC_PSERIES
80 FW_FEATURE_PSERIES_POSSIBLE | 77 FW_FEATURE_PSERIES_POSSIBLE |
81#endif 78#endif
82#ifdef CONFIG_PPC_ISERIES
83 FW_FEATURE_ISERIES_POSSIBLE |
84#endif
85#ifdef CONFIG_PPC_POWERNV 79#ifdef CONFIG_PPC_POWERNV
86 FW_FEATURE_POWERNV_POSSIBLE | 80 FW_FEATURE_POWERNV_POSSIBLE |
87#endif 81#endif
@@ -99,9 +93,6 @@ enum {
99#ifdef CONFIG_PPC_PSERIES 93#ifdef CONFIG_PPC_PSERIES
100 FW_FEATURE_PSERIES_ALWAYS & 94 FW_FEATURE_PSERIES_ALWAYS &
101#endif 95#endif
102#ifdef CONFIG_PPC_ISERIES
103 FW_FEATURE_ISERIES_ALWAYS &
104#endif
105#ifdef CONFIG_PPC_POWERNV 96#ifdef CONFIG_PPC_POWERNV
106 FW_FEATURE_POWERNV_ALWAYS & 97 FW_FEATURE_POWERNV_ALWAYS &
107#endif 98#endif
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index bebd12463ec9..ce04530d2000 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -4,7 +4,7 @@
4 * Authors: Jeff Brown 4 * Authors: Jeff Brown
5 * Timur Tabi <timur@freescale.com> 5 * Timur Tabi <timur@freescale.com>
6 * 6 *
7 * Copyright 2004,2007 Freescale Semiconductor, Inc 7 * Copyright 2004,2007,2012 Freescale Semiconductor, Inc
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 10 * under the terms of the GNU General Public License as published by the
@@ -114,6 +114,10 @@ struct ccsr_guts_86xx {
114 __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ 114 __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */
115} __attribute__ ((packed)); 115} __attribute__ ((packed));
116 116
117
118/* Alternate function signal multiplex control */
119#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x))
120
117#ifdef CONFIG_PPC_86xx 121#ifdef CONFIG_PPC_86xx
118 122
119#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ 123#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h
index dbc264010d0b..caaf6e00630d 100644
--- a/arch/powerpc/include/asm/highmem.h
+++ b/arch/powerpc/include/asm/highmem.h
@@ -79,7 +79,7 @@ static inline void kunmap(struct page *page)
79 kunmap_high(page); 79 kunmap_high(page);
80} 80}
81 81
82static inline void *__kmap_atomic(struct page *page) 82static inline void *kmap_atomic(struct page *page)
83{ 83{
84 return kmap_atomic_prot(page, kmap_prot); 84 return kmap_atomic_prot(page, kmap_prot);
85} 85}
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index bb712c9488b3..51010bfc792e 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -11,6 +11,27 @@
11#include <asm/ptrace.h> 11#include <asm/ptrace.h>
12#include <asm/processor.h> 12#include <asm/processor.h>
13 13
14#ifdef CONFIG_PPC64
15
16/*
17 * PACA flags in paca->irq_happened.
18 *
19 * This bits are set when interrupts occur while soft-disabled
20 * and allow a proper replay. Additionally, PACA_IRQ_HARD_DIS
21 * is set whenever we manually hard disable.
22 */
23#define PACA_IRQ_HARD_DIS 0x01
24#define PACA_IRQ_DBELL 0x02
25#define PACA_IRQ_EE 0x04
26#define PACA_IRQ_DEC 0x08 /* Or FIT */
27#define PACA_IRQ_EE_EDGE 0x10 /* BookE only */
28
29#endif /* CONFIG_PPC64 */
30
31#ifndef __ASSEMBLY__
32
33extern void __replay_interrupt(unsigned int vector);
34
14extern void timer_interrupt(struct pt_regs *); 35extern void timer_interrupt(struct pt_regs *);
15 36
16#ifdef CONFIG_PPC64 37#ifdef CONFIG_PPC64
@@ -42,7 +63,6 @@ static inline unsigned long arch_local_irq_disable(void)
42} 63}
43 64
44extern void arch_local_irq_restore(unsigned long); 65extern void arch_local_irq_restore(unsigned long);
45extern void iseries_handle_interrupts(void);
46 66
47static inline void arch_local_irq_enable(void) 67static inline void arch_local_irq_enable(void)
48{ 68{
@@ -68,16 +88,33 @@ static inline bool arch_irqs_disabled(void)
68#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); 88#define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory");
69#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); 89#define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory");
70#else 90#else
71#define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) 91#define __hard_irq_enable() __mtmsrd(local_paca->kernel_msr | MSR_EE, 1)
72#define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) 92#define __hard_irq_disable() __mtmsrd(local_paca->kernel_msr, 1)
73#endif 93#endif
74 94
75#define hard_irq_disable() \ 95static inline void hard_irq_disable(void)
76 do { \ 96{
77 __hard_irq_disable(); \ 97 __hard_irq_disable();
78 get_paca()->soft_enabled = 0; \ 98 get_paca()->soft_enabled = 0;
79 get_paca()->hard_enabled = 0; \ 99 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
80 } while(0) 100}
101
102/*
103 * This is called by asynchronous interrupts to conditionally
104 * re-enable hard interrupts when soft-disabled after having
105 * cleared the source of the interrupt
106 */
107static inline void may_hard_irq_enable(void)
108{
109 get_paca()->irq_happened &= ~PACA_IRQ_HARD_DIS;
110 if (!(get_paca()->irq_happened & PACA_IRQ_EE))
111 __hard_irq_enable();
112}
113
114static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
115{
116 return !regs->softe;
117}
81 118
82#else /* CONFIG_PPC64 */ 119#else /* CONFIG_PPC64 */
83 120
@@ -139,6 +176,13 @@ static inline bool arch_irqs_disabled(void)
139 176
140#define hard_irq_disable() arch_local_irq_disable() 177#define hard_irq_disable() arch_local_irq_disable()
141 178
179static inline bool arch_irq_disabled_regs(struct pt_regs *regs)
180{
181 return !(regs->msr & MSR_EE);
182}
183
184static inline void may_hard_irq_enable(void) { }
185
142#endif /* CONFIG_PPC64 */ 186#endif /* CONFIG_PPC64 */
143 187
144#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST 188#define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST
@@ -149,5 +193,6 @@ static inline bool arch_irqs_disabled(void)
149 */ 193 */
150struct irq_chip; 194struct irq_chip;
151 195
196#endif /* __ASSEMBLY__ */
152#endif /* __KERNEL__ */ 197#endif /* __KERNEL__ */
153#endif /* _ASM_POWERPC_HW_IRQ_H */ 198#endif /* _ASM_POWERPC_HW_IRQ_H */
diff --git a/arch/powerpc/include/asm/i8259.h b/arch/powerpc/include/asm/i8259.h
index 105ade297aad..c3fdfbd5a673 100644
--- a/arch/powerpc/include/asm/i8259.h
+++ b/arch/powerpc/include/asm/i8259.h
@@ -6,7 +6,7 @@
6 6
7extern void i8259_init(struct device_node *node, unsigned long intack_addr); 7extern void i8259_init(struct device_node *node, unsigned long intack_addr);
8extern unsigned int i8259_irq(void); 8extern unsigned int i8259_irq(void);
9extern struct irq_host *i8259_get_host(void); 9extern struct irq_domain *i8259_get_host(void);
10 10
11#endif /* __KERNEL__ */ 11#endif /* __KERNEL__ */
12#endif /* _ASM_POWERPC_I8259_H */ 12#endif /* _ASM_POWERPC_I8259_H */
diff --git a/arch/powerpc/include/asm/iommu.h b/arch/powerpc/include/asm/iommu.h
index edfc9803ec91..957a83f43646 100644
--- a/arch/powerpc/include/asm/iommu.h
+++ b/arch/powerpc/include/asm/iommu.h
@@ -112,7 +112,6 @@ extern void iommu_unmap_page(struct iommu_table *tbl, dma_addr_t dma_handle,
112 struct dma_attrs *attrs); 112 struct dma_attrs *attrs);
113 113
114extern void iommu_init_early_pSeries(void); 114extern void iommu_init_early_pSeries(void);
115extern void iommu_init_early_iSeries(void);
116extern void iommu_init_early_dart(void); 115extern void iommu_init_early_dart(void);
117extern void iommu_init_early_pasemi(void); 116extern void iommu_init_early_pasemi(void);
118 117
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h
index c0e1bc319e35..cf417e510736 100644
--- a/arch/powerpc/include/asm/irq.h
+++ b/arch/powerpc/include/asm/irq.h
@@ -9,6 +9,7 @@
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11 11
12#include <linux/irqdomain.h>
12#include <linux/threads.h> 13#include <linux/threads.h>
13#include <linux/list.h> 14#include <linux/list.h>
14#include <linux/radix-tree.h> 15#include <linux/radix-tree.h>
@@ -26,267 +27,15 @@ extern atomic_t ppc_n_lost_interrupts;
26/* This number is used when no interrupt has been assigned */ 27/* This number is used when no interrupt has been assigned */
27#define NO_IRQ (0) 28#define NO_IRQ (0)
28 29
29/* This is a special irq number to return from get_irq() to tell that
30 * no interrupt happened _and_ ignore it (don't count it as bad). Some
31 * platforms like iSeries rely on that.
32 */
33#define NO_IRQ_IGNORE ((unsigned int)-1)
34
35/* Total number of virq in the platform */ 30/* Total number of virq in the platform */
36#define NR_IRQS CONFIG_NR_IRQS 31#define NR_IRQS CONFIG_NR_IRQS
37 32
38/* Number of irqs reserved for the legacy controller */
39#define NUM_ISA_INTERRUPTS 16
40
41/* Same thing, used by the generic IRQ code */ 33/* Same thing, used by the generic IRQ code */
42#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS 34#define NR_IRQS_LEGACY NUM_ISA_INTERRUPTS
43 35
44/* This type is the placeholder for a hardware interrupt number. It has to
45 * be big enough to enclose whatever representation is used by a given
46 * platform.
47 */
48typedef unsigned long irq_hw_number_t;
49
50/* Interrupt controller "host" data structure. This could be defined as a
51 * irq domain controller. That is, it handles the mapping between hardware
52 * and virtual interrupt numbers for a given interrupt domain. The host
53 * structure is generally created by the PIC code for a given PIC instance
54 * (though a host can cover more than one PIC if they have a flat number
55 * model). It's the host callbacks that are responsible for setting the
56 * irq_chip on a given irq_desc after it's been mapped.
57 *
58 * The host code and data structures are fairly agnostic to the fact that
59 * we use an open firmware device-tree. We do have references to struct
60 * device_node in two places: in irq_find_host() to find the host matching
61 * a given interrupt controller node, and of course as an argument to its
62 * counterpart host->ops->match() callback. However, those are treated as
63 * generic pointers by the core and the fact that it's actually a device-node
64 * pointer is purely a convention between callers and implementation. This
65 * code could thus be used on other architectures by replacing those two
66 * by some sort of arch-specific void * "token" used to identify interrupt
67 * controllers.
68 */
69struct irq_host;
70struct radix_tree_root;
71
72/* Functions below are provided by the host and called whenever a new mapping
73 * is created or an old mapping is disposed. The host can then proceed to
74 * whatever internal data structures management is required. It also needs
75 * to setup the irq_desc when returning from map().
76 */
77struct irq_host_ops {
78 /* Match an interrupt controller device node to a host, returns
79 * 1 on a match
80 */
81 int (*match)(struct irq_host *h, struct device_node *node);
82
83 /* Create or update a mapping between a virtual irq number and a hw
84 * irq number. This is called only once for a given mapping.
85 */
86 int (*map)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw);
87
88 /* Dispose of such a mapping */
89 void (*unmap)(struct irq_host *h, unsigned int virq);
90
91 /* Translate device-tree interrupt specifier from raw format coming
92 * from the firmware to a irq_hw_number_t (interrupt line number) and
93 * type (sense) that can be passed to set_irq_type(). In the absence
94 * of this callback, irq_create_of_mapping() and irq_of_parse_and_map()
95 * will return the hw number in the first cell and IRQ_TYPE_NONE for
96 * the type (which amount to keeping whatever default value the
97 * interrupt controller has for that line)
98 */
99 int (*xlate)(struct irq_host *h, struct device_node *ctrler,
100 const u32 *intspec, unsigned int intsize,
101 irq_hw_number_t *out_hwirq, unsigned int *out_type);
102};
103
104struct irq_host {
105 struct list_head link;
106
107 /* type of reverse mapping technique */
108 unsigned int revmap_type;
109#define IRQ_HOST_MAP_LEGACY 0 /* legacy 8259, gets irqs 1..15 */
110#define IRQ_HOST_MAP_NOMAP 1 /* no fast reverse mapping */
111#define IRQ_HOST_MAP_LINEAR 2 /* linear map of interrupts */
112#define IRQ_HOST_MAP_TREE 3 /* radix tree */
113 union {
114 struct {
115 unsigned int size;
116 unsigned int *revmap;
117 } linear;
118 struct radix_tree_root tree;
119 } revmap_data;
120 struct irq_host_ops *ops;
121 void *host_data;
122 irq_hw_number_t inval_irq;
123
124 /* Optional device node pointer */
125 struct device_node *of_node;
126};
127
128struct irq_data; 36struct irq_data;
129extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); 37extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d);
130extern irq_hw_number_t virq_to_hw(unsigned int virq); 38extern irq_hw_number_t virq_to_hw(unsigned int virq);
131extern bool virq_is_host(unsigned int virq, struct irq_host *host);
132
133/**
134 * irq_alloc_host - Allocate a new irq_host data structure
135 * @of_node: optional device-tree node of the interrupt controller
136 * @revmap_type: type of reverse mapping to use
137 * @revmap_arg: for IRQ_HOST_MAP_LINEAR linear only: size of the map
138 * @ops: map/unmap host callbacks
139 * @inval_irq: provide a hw number in that host space that is always invalid
140 *
141 * Allocates and initialize and irq_host structure. Note that in the case of
142 * IRQ_HOST_MAP_LEGACY, the map() callback will be called before this returns
143 * for all legacy interrupts except 0 (which is always the invalid irq for
144 * a legacy controller). For a IRQ_HOST_MAP_LINEAR, the map is allocated by
145 * this call as well. For a IRQ_HOST_MAP_TREE, the radix tree will be allocated
146 * later during boot automatically (the reverse mapping will use the slow path
147 * until that happens).
148 */
149extern struct irq_host *irq_alloc_host(struct device_node *of_node,
150 unsigned int revmap_type,
151 unsigned int revmap_arg,
152 struct irq_host_ops *ops,
153 irq_hw_number_t inval_irq);
154
155
156/**
157 * irq_find_host - Locates a host for a given device node
158 * @node: device-tree node of the interrupt controller
159 */
160extern struct irq_host *irq_find_host(struct device_node *node);
161
162
163/**
164 * irq_set_default_host - Set a "default" host
165 * @host: default host pointer
166 *
167 * For convenience, it's possible to set a "default" host that will be used
168 * whenever NULL is passed to irq_create_mapping(). It makes life easier for
169 * platforms that want to manipulate a few hard coded interrupt numbers that
170 * aren't properly represented in the device-tree.
171 */
172extern void irq_set_default_host(struct irq_host *host);
173
174
175/**
176 * irq_set_virq_count - Set the maximum number of virt irqs
177 * @count: number of linux virtual irqs, capped with NR_IRQS
178 *
179 * This is mainly for use by platforms like iSeries who want to program
180 * the virtual irq number in the controller to avoid the reverse mapping
181 */
182extern void irq_set_virq_count(unsigned int count);
183
184
185/**
186 * irq_create_mapping - Map a hardware interrupt into linux virq space
187 * @host: host owning this hardware interrupt or NULL for default host
188 * @hwirq: hardware irq number in that host space
189 *
190 * Only one mapping per hardware interrupt is permitted. Returns a linux
191 * virq number.
192 * If the sense/trigger is to be specified, set_irq_type() should be called
193 * on the number returned from that call.
194 */
195extern unsigned int irq_create_mapping(struct irq_host *host,
196 irq_hw_number_t hwirq);
197
198
199/**
200 * irq_dispose_mapping - Unmap an interrupt
201 * @virq: linux virq number of the interrupt to unmap
202 */
203extern void irq_dispose_mapping(unsigned int virq);
204
205/**
206 * irq_find_mapping - Find a linux virq from an hw irq number.
207 * @host: host owning this hardware interrupt
208 * @hwirq: hardware irq number in that host space
209 *
210 * This is a slow path, for use by generic code. It's expected that an
211 * irq controller implementation directly calls the appropriate low level
212 * mapping function.
213 */
214extern unsigned int irq_find_mapping(struct irq_host *host,
215 irq_hw_number_t hwirq);
216
217/**
218 * irq_create_direct_mapping - Allocate a virq for direct mapping
219 * @host: host to allocate the virq for or NULL for default host
220 *
221 * This routine is used for irq controllers which can choose the hardware
222 * interrupt numbers they generate. In such a case it's simplest to use
223 * the linux virq as the hardware interrupt number.
224 */
225extern unsigned int irq_create_direct_mapping(struct irq_host *host);
226
227/**
228 * irq_radix_revmap_insert - Insert a hw irq to linux virq number mapping.
229 * @host: host owning this hardware interrupt
230 * @virq: linux irq number
231 * @hwirq: hardware irq number in that host space
232 *
233 * This is for use by irq controllers that use a radix tree reverse
234 * mapping for fast lookup.
235 */
236extern void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
237 irq_hw_number_t hwirq);
238
239/**
240 * irq_radix_revmap_lookup - Find a linux virq from a hw irq number.
241 * @host: host owning this hardware interrupt
242 * @hwirq: hardware irq number in that host space
243 *
244 * This is a fast path, for use by irq controller code that uses radix tree
245 * revmaps
246 */
247extern unsigned int irq_radix_revmap_lookup(struct irq_host *host,
248 irq_hw_number_t hwirq);
249
250/**
251 * irq_linear_revmap - Find a linux virq from a hw irq number.
252 * @host: host owning this hardware interrupt
253 * @hwirq: hardware irq number in that host space
254 *
255 * This is a fast path, for use by irq controller code that uses linear
256 * revmaps. It does fallback to the slow path if the revmap doesn't exist
257 * yet and will create the revmap entry with appropriate locking
258 */
259
260extern unsigned int irq_linear_revmap(struct irq_host *host,
261 irq_hw_number_t hwirq);
262
263
264
265/**
266 * irq_alloc_virt - Allocate virtual irq numbers
267 * @host: host owning these new virtual irqs
268 * @count: number of consecutive numbers to allocate
269 * @hint: pass a hint number, the allocator will try to use a 1:1 mapping
270 *
271 * This is a low level function that is used internally by irq_create_mapping()
272 * and that can be used by some irq controllers implementations for things
273 * like allocating ranges of numbers for MSIs. The revmaps are left untouched.
274 */
275extern unsigned int irq_alloc_virt(struct irq_host *host,
276 unsigned int count,
277 unsigned int hint);
278
279/**
280 * irq_free_virt - Free virtual irq numbers
281 * @virq: virtual irq number of the first interrupt to free
282 * @count: number of interrupts to free
283 *
284 * This function is the opposite of irq_alloc_virt. It will not clear reverse
285 * maps, this should be done previously by unmap'ing the interrupt. In fact,
286 * all interrupts covered by the range being freed should have been unmapped
287 * prior to calling this.
288 */
289extern void irq_free_virt(unsigned int virq, unsigned int count);
290 39
291/** 40/**
292 * irq_early_init - Init irq remapping subsystem 41 * irq_early_init - Init irq remapping subsystem
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h
index b0b06d85788d..6f9b6e23dc5a 100644
--- a/arch/powerpc/include/asm/irqflags.h
+++ b/arch/powerpc/include/asm/irqflags.h
@@ -39,24 +39,31 @@
39#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) 39#define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on)
40#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) 40#define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off)
41 41
42#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \ 42/*
43 cmpdi en,0; \ 43 * This is used by assembly code to soft-disable interrupts
44 bne 95f; \ 44 */
45 stb en,PACASOFTIRQEN(r13); \ 45#define SOFT_DISABLE_INTS(__rA, __rB) \
46 TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) \ 46 lbz __rA,PACASOFTIRQEN(r13); \
47 b skip; \ 47 lbz __rB,PACAIRQHAPPENED(r13); \
4895: TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) \ 48 cmpwi cr0,__rA,0; \
49 li en,1; 49 li __rA,0; \
50#define TRACE_AND_RESTORE_IRQ(en) \ 50 ori __rB,__rB,PACA_IRQ_HARD_DIS; \
51 TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \ 51 stb __rB,PACAIRQHAPPENED(r13); \
52 stb en,PACASOFTIRQEN(r13); \ 52 beq 44f; \
5396: 53 stb __rA,PACASOFTIRQEN(r13); \
54 TRACE_DISABLE_INTS; \
5544:
56
54#else 57#else
55#define TRACE_ENABLE_INTS 58#define TRACE_ENABLE_INTS
56#define TRACE_DISABLE_INTS 59#define TRACE_DISABLE_INTS
57#define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) 60
58#define TRACE_AND_RESTORE_IRQ(en) \ 61#define SOFT_DISABLE_INTS(__rA, __rB) \
59 stb en,PACASOFTIRQEN(r13) 62 lbz __rA,PACAIRQHAPPENED(r13); \
63 li __rB,0; \
64 ori __rA,__rA,PACA_IRQ_HARD_DIS; \
65 stb __rB,PACASOFTIRQEN(r13); \
66 stb __rA,PACAIRQHAPPENED(r13)
60#endif 67#endif
61#endif 68#endif
62 69
diff --git a/arch/powerpc/include/asm/iseries/alpaca.h b/arch/powerpc/include/asm/iseries/alpaca.h
deleted file mode 100644
index c0cce6727a69..000000000000
--- a/arch/powerpc/include/asm/iseries/alpaca.h
+++ /dev/null
@@ -1,31 +0,0 @@
1/*
2 * Copyright © 2008 Stephen Rothwell IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ASM_POWERPC_ISERIES_ALPACA_H
19#define _ASM_POWERPC_ISERIES_ALPACA_H
20
21/*
22 * This is the part of the paca that the iSeries hypervisor
23 * needs to be statically initialised. Immediately after boot
24 * we switch to the normal Linux paca.
25 */
26struct alpaca {
27 struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
28 const void *reg_save_ptr; /* Pointer to LpRegSave for PLIC */
29};
30
31#endif /* _ASM_POWERPC_ISERIES_ALPACA_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call.h b/arch/powerpc/include/asm/iseries/hv_call.h
deleted file mode 100644
index 162d653ad51f..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_call.h
+++ /dev/null
@@ -1,111 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * This file contains the "hypervisor call" interface which is used to
19 * drive the hypervisor from the OS.
20 */
21#ifndef _ASM_POWERPC_ISERIES_HV_CALL_H
22#define _ASM_POWERPC_ISERIES_HV_CALL_H
23
24#include <asm/iseries/hv_call_sc.h>
25#include <asm/iseries/hv_types.h>
26#include <asm/paca.h>
27
28/* Type of yield for HvCallBaseYieldProcessor */
29#define HvCall_YieldTimed 0 /* Yield until specified time (tb) */
30#define HvCall_YieldToActive 1 /* Yield until all active procs have run */
31#define HvCall_YieldToProc 2 /* Yield until the specified processor has run */
32
33/* interrupt masks for setEnabledInterrupts */
34#define HvCall_MaskIPI 0x00000001
35#define HvCall_MaskLpEvent 0x00000002
36#define HvCall_MaskLpProd 0x00000004
37#define HvCall_MaskTimeout 0x00000008
38
39/* Log buffer formats */
40#define HvCall_LogBuffer_ASCII 0
41#define HvCall_LogBuffer_EBCDIC 1
42
43#define HvCallBaseAckDeferredInts HvCallBase + 0
44#define HvCallBaseCpmPowerOff HvCallBase + 1
45#define HvCallBaseGetHwPatch HvCallBase + 2
46#define HvCallBaseReIplSpAttn HvCallBase + 3
47#define HvCallBaseSetASR HvCallBase + 4
48#define HvCallBaseSetASRAndRfi HvCallBase + 5
49#define HvCallBaseSetIMR HvCallBase + 6
50#define HvCallBaseSendIPI HvCallBase + 7
51#define HvCallBaseTerminateMachine HvCallBase + 8
52#define HvCallBaseTerminateMachineSrc HvCallBase + 9
53#define HvCallBaseProcessPlicInterrupts HvCallBase + 10
54#define HvCallBaseIsPrimaryCpmOrMsdIpl HvCallBase + 11
55#define HvCallBaseSetVirtualSIT HvCallBase + 12
56#define HvCallBaseVaryOffThisProcessor HvCallBase + 13
57#define HvCallBaseVaryOffMemoryChunk HvCallBase + 14
58#define HvCallBaseVaryOffInteractivePercentage HvCallBase + 15
59#define HvCallBaseSendLpProd HvCallBase + 16
60#define HvCallBaseSetEnabledInterrupts HvCallBase + 17
61#define HvCallBaseYieldProcessor HvCallBase + 18
62#define HvCallBaseVaryOffSharedProcUnits HvCallBase + 19
63#define HvCallBaseSetVirtualDecr HvCallBase + 20
64#define HvCallBaseClearLogBuffer HvCallBase + 21
65#define HvCallBaseGetLogBufferCodePage HvCallBase + 22
66#define HvCallBaseGetLogBufferFormat HvCallBase + 23
67#define HvCallBaseGetLogBufferLength HvCallBase + 24
68#define HvCallBaseReadLogBuffer HvCallBase + 25
69#define HvCallBaseSetLogBufferFormatAndCodePage HvCallBase + 26
70#define HvCallBaseWriteLogBuffer HvCallBase + 27
71#define HvCallBaseRouter28 HvCallBase + 28
72#define HvCallBaseRouter29 HvCallBase + 29
73#define HvCallBaseRouter30 HvCallBase + 30
74#define HvCallBaseSetDebugBus HvCallBase + 31
75
76#define HvCallCcSetDABR HvCallCc + 7
77
78static inline void HvCall_setVirtualDecr(void)
79{
80 /*
81 * Ignore any error return codes - most likely means that the
82 * target value for the LP has been increased and this vary off
83 * would bring us below the new target.
84 */
85 HvCall0(HvCallBaseSetVirtualDecr);
86}
87
88static inline void HvCall_yieldProcessor(unsigned typeOfYield, u64 yieldParm)
89{
90 HvCall2(HvCallBaseYieldProcessor, typeOfYield, yieldParm);
91}
92
93static inline void HvCall_setEnabledInterrupts(u64 enabledInterrupts)
94{
95 HvCall1(HvCallBaseSetEnabledInterrupts, enabledInterrupts);
96}
97
98static inline void HvCall_setLogBufferFormatAndCodepage(int format,
99 u32 codePage)
100{
101 HvCall2(HvCallBaseSetLogBufferFormatAndCodePage, format, codePage);
102}
103
104extern void HvCall_writeLogBuffer(const void *buffer, u64 bufLen);
105
106static inline void HvCall_sendIPI(struct paca_struct *targetPaca)
107{
108 HvCall1(HvCallBaseSendIPI, targetPaca->paca_index);
109}
110
111#endif /* _ASM_POWERPC_ISERIES_HV_CALL_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_event.h b/arch/powerpc/include/asm/iseries/hv_call_event.h
deleted file mode 100644
index cc029d388e11..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_event.h
+++ /dev/null
@@ -1,201 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * This file contains the "hypervisor call" interface which is used to
19 * drive the hypervisor from the OS.
20 */
21#ifndef _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
22#define _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H
23
24#include <linux/types.h>
25#include <linux/dma-mapping.h>
26
27#include <asm/iseries/hv_call_sc.h>
28#include <asm/iseries/hv_types.h>
29#include <asm/abs_addr.h>
30
31struct HvLpEvent;
32
33typedef u8 HvLpEvent_Type;
34typedef u8 HvLpEvent_AckInd;
35typedef u8 HvLpEvent_AckType;
36
37typedef u8 HvLpDma_Direction;
38typedef u8 HvLpDma_AddressType;
39
40typedef u64 HvLpEvent_Rc;
41typedef u64 HvLpDma_Rc;
42
43#define HvCallEventAckLpEvent HvCallEvent + 0
44#define HvCallEventCancelLpEvent HvCallEvent + 1
45#define HvCallEventCloseLpEventPath HvCallEvent + 2
46#define HvCallEventDmaBufList HvCallEvent + 3
47#define HvCallEventDmaSingle HvCallEvent + 4
48#define HvCallEventDmaToSp HvCallEvent + 5
49#define HvCallEventGetOverflowLpEvents HvCallEvent + 6
50#define HvCallEventGetSourceLpInstanceId HvCallEvent + 7
51#define HvCallEventGetTargetLpInstanceId HvCallEvent + 8
52#define HvCallEventOpenLpEventPath HvCallEvent + 9
53#define HvCallEventSetLpEventStack HvCallEvent + 10
54#define HvCallEventSignalLpEvent HvCallEvent + 11
55#define HvCallEventSignalLpEventParms HvCallEvent + 12
56#define HvCallEventSetInterLpQueueIndex HvCallEvent + 13
57#define HvCallEventSetLpEventQueueInterruptProc HvCallEvent + 14
58#define HvCallEventRouter15 HvCallEvent + 15
59
60static inline void HvCallEvent_getOverflowLpEvents(u8 queueIndex)
61{
62 HvCall1(HvCallEventGetOverflowLpEvents, queueIndex);
63}
64
65static inline void HvCallEvent_setInterLpQueueIndex(u8 queueIndex)
66{
67 HvCall1(HvCallEventSetInterLpQueueIndex, queueIndex);
68}
69
70static inline void HvCallEvent_setLpEventStack(u8 queueIndex,
71 char *eventStackAddr, u32 eventStackSize)
72{
73 HvCall3(HvCallEventSetLpEventStack, queueIndex,
74 virt_to_abs(eventStackAddr), eventStackSize);
75}
76
77static inline void HvCallEvent_setLpEventQueueInterruptProc(u8 queueIndex,
78 u16 lpLogicalProcIndex)
79{
80 HvCall2(HvCallEventSetLpEventQueueInterruptProc, queueIndex,
81 lpLogicalProcIndex);
82}
83
84static inline HvLpEvent_Rc HvCallEvent_signalLpEvent(struct HvLpEvent *event)
85{
86 return HvCall1(HvCallEventSignalLpEvent, virt_to_abs(event));
87}
88
89static inline HvLpEvent_Rc HvCallEvent_signalLpEventFast(HvLpIndex targetLp,
90 HvLpEvent_Type type, u16 subtype, HvLpEvent_AckInd ackInd,
91 HvLpEvent_AckType ackType, HvLpInstanceId sourceInstanceId,
92 HvLpInstanceId targetInstanceId, u64 correlationToken,
93 u64 eventData1, u64 eventData2, u64 eventData3,
94 u64 eventData4, u64 eventData5)
95{
96 /* Pack the misc bits into a single Dword to pass to PLIC */
97 union {
98 struct {
99 u8 ack_and_target;
100 u8 type;
101 u16 subtype;
102 HvLpInstanceId src_inst;
103 HvLpInstanceId target_inst;
104 } parms;
105 u64 dword;
106 } packed;
107
108 packed.parms.ack_and_target = (ackType << 7) | (ackInd << 6) | targetLp;
109 packed.parms.type = type;
110 packed.parms.subtype = subtype;
111 packed.parms.src_inst = sourceInstanceId;
112 packed.parms.target_inst = targetInstanceId;
113
114 return HvCall7(HvCallEventSignalLpEventParms, packed.dword,
115 correlationToken, eventData1, eventData2,
116 eventData3, eventData4, eventData5);
117}
118
119extern void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag);
120extern void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle);
121extern dma_addr_t iseries_hv_map(void *vaddr, size_t size,
122 enum dma_data_direction direction);
123extern void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
124 enum dma_data_direction direction);
125
126static inline HvLpEvent_Rc HvCallEvent_ackLpEvent(struct HvLpEvent *event)
127{
128 return HvCall1(HvCallEventAckLpEvent, virt_to_abs(event));
129}
130
131static inline HvLpEvent_Rc HvCallEvent_cancelLpEvent(struct HvLpEvent *event)
132{
133 return HvCall1(HvCallEventCancelLpEvent, virt_to_abs(event));
134}
135
136static inline HvLpInstanceId HvCallEvent_getSourceLpInstanceId(
137 HvLpIndex targetLp, HvLpEvent_Type type)
138{
139 return HvCall2(HvCallEventGetSourceLpInstanceId, targetLp, type);
140}
141
142static inline HvLpInstanceId HvCallEvent_getTargetLpInstanceId(
143 HvLpIndex targetLp, HvLpEvent_Type type)
144{
145 return HvCall2(HvCallEventGetTargetLpInstanceId, targetLp, type);
146}
147
148static inline void HvCallEvent_openLpEventPath(HvLpIndex targetLp,
149 HvLpEvent_Type type)
150{
151 HvCall2(HvCallEventOpenLpEventPath, targetLp, type);
152}
153
154static inline void HvCallEvent_closeLpEventPath(HvLpIndex targetLp,
155 HvLpEvent_Type type)
156{
157 HvCall2(HvCallEventCloseLpEventPath, targetLp, type);
158}
159
160static inline HvLpDma_Rc HvCallEvent_dmaBufList(HvLpEvent_Type type,
161 HvLpIndex remoteLp, HvLpDma_Direction direction,
162 HvLpInstanceId localInstanceId,
163 HvLpInstanceId remoteInstanceId,
164 HvLpDma_AddressType localAddressType,
165 HvLpDma_AddressType remoteAddressType,
166 /* Do these need to be converted to absolute addresses? */
167 u64 localBufList, u64 remoteBufList, u32 transferLength)
168{
169 /* Pack the misc bits into a single Dword to pass to PLIC */
170 union {
171 struct {
172 u8 flags;
173 HvLpIndex remote;
174 u8 type;
175 u8 reserved;
176 HvLpInstanceId local_inst;
177 HvLpInstanceId remote_inst;
178 } parms;
179 u64 dword;
180 } packed;
181
182 packed.parms.flags = (direction << 7) |
183 (localAddressType << 6) | (remoteAddressType << 5);
184 packed.parms.remote = remoteLp;
185 packed.parms.type = type;
186 packed.parms.reserved = 0;
187 packed.parms.local_inst = localInstanceId;
188 packed.parms.remote_inst = remoteInstanceId;
189
190 return HvCall4(HvCallEventDmaBufList, packed.dword, localBufList,
191 remoteBufList, transferLength);
192}
193
194static inline HvLpDma_Rc HvCallEvent_dmaToSp(void *local, u32 remote,
195 u32 length, HvLpDma_Direction dir)
196{
197 return HvCall4(HvCallEventDmaToSp, virt_to_abs(local), remote,
198 length, dir);
199}
200
201#endif /* _ASM_POWERPC_ISERIES_HV_CALL_EVENT_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_sc.h b/arch/powerpc/include/asm/iseries/hv_call_sc.h
deleted file mode 100644
index f5d210959250..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_sc.h
+++ /dev/null
@@ -1,50 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ASM_POWERPC_ISERIES_HV_CALL_SC_H
19#define _ASM_POWERPC_ISERIES_HV_CALL_SC_H
20
21#include <linux/types.h>
22
23#define HvCallBase 0x8000000000000000ul
24#define HvCallCc 0x8001000000000000ul
25#define HvCallCfg 0x8002000000000000ul
26#define HvCallEvent 0x8003000000000000ul
27#define HvCallHpt 0x8004000000000000ul
28#define HvCallPci 0x8005000000000000ul
29#define HvCallSm 0x8007000000000000ul
30#define HvCallXm 0x8009000000000000ul
31
32extern u64 HvCall0(u64);
33extern u64 HvCall1(u64, u64);
34extern u64 HvCall2(u64, u64, u64);
35extern u64 HvCall3(u64, u64, u64, u64);
36extern u64 HvCall4(u64, u64, u64, u64, u64);
37extern u64 HvCall5(u64, u64, u64, u64, u64, u64);
38extern u64 HvCall6(u64, u64, u64, u64, u64, u64, u64);
39extern u64 HvCall7(u64, u64, u64, u64, u64, u64, u64, u64);
40
41extern u64 HvCall0Ret16(u64, void *);
42extern u64 HvCall1Ret16(u64, void *, u64);
43extern u64 HvCall2Ret16(u64, void *, u64, u64);
44extern u64 HvCall3Ret16(u64, void *, u64, u64, u64);
45extern u64 HvCall4Ret16(u64, void *, u64, u64, u64, u64);
46extern u64 HvCall5Ret16(u64, void *, u64, u64, u64, u64, u64);
47extern u64 HvCall6Ret16(u64, void *, u64, u64, u64, u64, u64, u64);
48extern u64 HvCall7Ret16(u64, void *, u64, u64 ,u64 ,u64 ,u64 ,u64 ,u64);
49
50#endif /* _ASM_POWERPC_ISERIES_HV_CALL_SC_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_call_xm.h b/arch/powerpc/include/asm/iseries/hv_call_xm.h
deleted file mode 100644
index 392ac3f54df0..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_call_xm.h
+++ /dev/null
@@ -1,61 +0,0 @@
1/*
2 * This file contains the "hypervisor call" interface which is used to
3 * drive the hypervisor from SLIC.
4 */
5#ifndef _ASM_POWERPC_ISERIES_HV_CALL_XM_H
6#define _ASM_POWERPC_ISERIES_HV_CALL_XM_H
7
8#include <asm/iseries/hv_call_sc.h>
9#include <asm/iseries/hv_types.h>
10
11#define HvCallXmGetTceTableParms HvCallXm + 0
12#define HvCallXmTestBus HvCallXm + 1
13#define HvCallXmConnectBusUnit HvCallXm + 2
14#define HvCallXmLoadTod HvCallXm + 8
15#define HvCallXmTestBusUnit HvCallXm + 9
16#define HvCallXmSetTce HvCallXm + 11
17#define HvCallXmSetTces HvCallXm + 13
18
19static inline void HvCallXm_getTceTableParms(u64 cb)
20{
21 HvCall1(HvCallXmGetTceTableParms, cb);
22}
23
24static inline u64 HvCallXm_setTce(u64 tceTableToken, u64 tceOffset, u64 tce)
25{
26 return HvCall3(HvCallXmSetTce, tceTableToken, tceOffset, tce);
27}
28
29static inline u64 HvCallXm_setTces(u64 tceTableToken, u64 tceOffset,
30 u64 numTces, u64 tce1, u64 tce2, u64 tce3, u64 tce4)
31{
32 return HvCall7(HvCallXmSetTces, tceTableToken, tceOffset, numTces,
33 tce1, tce2, tce3, tce4);
34}
35
36static inline u64 HvCallXm_testBus(u16 busNumber)
37{
38 return HvCall1(HvCallXmTestBus, busNumber);
39}
40
41static inline u64 HvCallXm_testBusUnit(u16 busNumber, u8 subBusNumber,
42 u8 deviceId)
43{
44 return HvCall2(HvCallXmTestBusUnit, busNumber,
45 (subBusNumber << 8) | deviceId);
46}
47
48static inline u64 HvCallXm_connectBusUnit(u16 busNumber, u8 subBusNumber,
49 u8 deviceId, u64 interruptToken)
50{
51 return HvCall5(HvCallXmConnectBusUnit, busNumber,
52 (subBusNumber << 8) | deviceId, interruptToken, 0,
53 0 /* HvLpConfig::mapDsaToQueueIndex(HvLpDSA(busNumber, xBoard, xCard)) */);
54}
55
56static inline u64 HvCallXm_loadTod(void)
57{
58 return HvCall0(HvCallXmLoadTod);
59}
60
61#endif /* _ASM_POWERPC_ISERIES_HV_CALL_XM_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_lp_config.h b/arch/powerpc/include/asm/iseries/hv_lp_config.h
deleted file mode 100644
index a006fd1e4a2c..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_lp_config.h
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
19#define _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H
20
21/*
22 * This file contains the interface to the LPAR configuration data
23 * to determine which resources should be allocated to each partition.
24 */
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28
29enum {
30 HvCallCfg_Cur = 0,
31 HvCallCfg_Init = 1,
32 HvCallCfg_Max = 2,
33 HvCallCfg_Min = 3
34};
35
36#define HvCallCfgGetSystemPhysicalProcessors HvCallCfg + 6
37#define HvCallCfgGetPhysicalProcessors HvCallCfg + 7
38#define HvCallCfgGetMsChunks HvCallCfg + 9
39#define HvCallCfgGetSharedPoolIndex HvCallCfg + 20
40#define HvCallCfgGetSharedProcUnits HvCallCfg + 21
41#define HvCallCfgGetNumProcsInSharedPool HvCallCfg + 22
42#define HvCallCfgGetVirtualLanIndexMap HvCallCfg + 30
43#define HvCallCfgGetHostingLpIndex HvCallCfg + 32
44
45extern HvLpIndex HvLpConfig_getLpIndex_outline(void);
46extern HvLpIndex HvLpConfig_getLpIndex(void);
47extern HvLpIndex HvLpConfig_getPrimaryLpIndex(void);
48
49static inline u64 HvLpConfig_getMsChunks(void)
50{
51 return HvCall2(HvCallCfgGetMsChunks, HvLpConfig_getLpIndex(),
52 HvCallCfg_Cur);
53}
54
55static inline u64 HvLpConfig_getSystemPhysicalProcessors(void)
56{
57 return HvCall0(HvCallCfgGetSystemPhysicalProcessors);
58}
59
60static inline u64 HvLpConfig_getNumProcsInSharedPool(HvLpSharedPoolIndex sPI)
61{
62 return (u16)HvCall1(HvCallCfgGetNumProcsInSharedPool, sPI);
63}
64
65static inline u64 HvLpConfig_getPhysicalProcessors(void)
66{
67 return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
68 HvCallCfg_Cur);
69}
70
71static inline HvLpSharedPoolIndex HvLpConfig_getSharedPoolIndex(void)
72{
73 return HvCall1(HvCallCfgGetSharedPoolIndex, HvLpConfig_getLpIndex());
74}
75
76static inline u64 HvLpConfig_getSharedProcUnits(void)
77{
78 return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
79 HvCallCfg_Cur);
80}
81
82static inline u64 HvLpConfig_getMaxSharedProcUnits(void)
83{
84 return HvCall2(HvCallCfgGetSharedProcUnits, HvLpConfig_getLpIndex(),
85 HvCallCfg_Max);
86}
87
88static inline u64 HvLpConfig_getMaxPhysicalProcessors(void)
89{
90 return HvCall2(HvCallCfgGetPhysicalProcessors, HvLpConfig_getLpIndex(),
91 HvCallCfg_Max);
92}
93
94static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMapForLp(
95 HvLpIndex lp)
96{
97 /*
98 * This is a new function in V5R1 so calls to this on older
99 * hypervisors will return -1
100 */
101 u64 retVal = HvCall1(HvCallCfgGetVirtualLanIndexMap, lp);
102 if (retVal == -1)
103 retVal = 0;
104 return retVal;
105}
106
107static inline HvLpVirtualLanIndexMap HvLpConfig_getVirtualLanIndexMap(void)
108{
109 return HvLpConfig_getVirtualLanIndexMapForLp(
110 HvLpConfig_getLpIndex_outline());
111}
112
113static inline int HvLpConfig_doLpsCommunicateOnVirtualLan(HvLpIndex lp1,
114 HvLpIndex lp2)
115{
116 HvLpVirtualLanIndexMap virtualLanIndexMap1 =
117 HvLpConfig_getVirtualLanIndexMapForLp(lp1);
118 HvLpVirtualLanIndexMap virtualLanIndexMap2 =
119 HvLpConfig_getVirtualLanIndexMapForLp(lp2);
120 return ((virtualLanIndexMap1 & virtualLanIndexMap2) != 0);
121}
122
123static inline HvLpIndex HvLpConfig_getHostingLpIndex(HvLpIndex lp)
124{
125 return HvCall1(HvCallCfgGetHostingLpIndex, lp);
126}
127
128#endif /* _ASM_POWERPC_ISERIES_HV_LP_CONFIG_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_lp_event.h b/arch/powerpc/include/asm/iseries/hv_lp_event.h
deleted file mode 100644
index 8f5da7d77202..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_lp_event.h
+++ /dev/null
@@ -1,162 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19/* This file contains the class for HV events in the system. */
20
21#ifndef _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
22#define _ASM_POWERPC_ISERIES_HV_LP_EVENT_H
23
24#include <asm/types.h>
25#include <asm/ptrace.h>
26#include <asm/iseries/hv_types.h>
27#include <asm/iseries/hv_call_event.h>
28
29/*
30 * HvLpEvent is the structure for Lp Event messages passed between
31 * partitions through PLIC.
32 */
33
34struct HvLpEvent {
35 u8 flags; /* Event flags x00-x00 */
36 u8 xType; /* Type of message x01-x01 */
37 u16 xSubtype; /* Subtype for event x02-x03 */
38 u8 xSourceLp; /* Source LP x04-x04 */
39 u8 xTargetLp; /* Target LP x05-x05 */
40 u8 xSizeMinus1; /* Size of Derived class - 1 x06-x06 */
41 u8 xRc; /* RC for Ack flows x07-x07 */
42 u16 xSourceInstanceId; /* Source sides instance id x08-x09 */
43 u16 xTargetInstanceId; /* Target sides instance id x0A-x0B */
44 union {
45 u32 xSubtypeData; /* Data usable by the subtype x0C-x0F */
46 u16 xSubtypeDataShort[2]; /* Data as 2 shorts */
47 u8 xSubtypeDataChar[4]; /* Data as 4 chars */
48 } x;
49
50 u64 xCorrelationToken; /* Unique value for source/type x10-x17 */
51};
52
53typedef void (*LpEventHandler)(struct HvLpEvent *);
54
55/* Register a handler for an event type - returns 0 on success */
56extern int HvLpEvent_registerHandler(HvLpEvent_Type eventType,
57 LpEventHandler hdlr);
58
59/*
60 * Unregister a handler for an event type
61 *
62 * This call will sleep until the handler being removed is guaranteed to
63 * be no longer executing on any CPU. Do not call with locks held.
64 *
65 * returns 0 on success
66 * Unregister will fail if there are any paths open for the type
67 */
68extern int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType);
69
70/*
71 * Open an Lp Event Path for an event type
72 * returns 0 on success
73 * openPath will fail if there is no handler registered for the event type.
74 * The lpIndex specified is the partition index for the target partition
75 * (for VirtualIo, VirtualLan and SessionMgr) other types specify zero)
76 */
77extern int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
78
79/*
80 * Close an Lp Event Path for a type and partition
81 * returns 0 on success
82 */
83extern int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex);
84
85#define HvLpEvent_Type_Hypervisor 0
86#define HvLpEvent_Type_MachineFac 1
87#define HvLpEvent_Type_SessionMgr 2
88#define HvLpEvent_Type_SpdIo 3
89#define HvLpEvent_Type_VirtualBus 4
90#define HvLpEvent_Type_PciIo 5
91#define HvLpEvent_Type_RioIo 6
92#define HvLpEvent_Type_VirtualLan 7
93#define HvLpEvent_Type_VirtualIo 8
94#define HvLpEvent_Type_NumTypes 9
95
96#define HvLpEvent_Rc_Good 0
97#define HvLpEvent_Rc_BufferNotAvailable 1
98#define HvLpEvent_Rc_Cancelled 2
99#define HvLpEvent_Rc_GenericError 3
100#define HvLpEvent_Rc_InvalidAddress 4
101#define HvLpEvent_Rc_InvalidPartition 5
102#define HvLpEvent_Rc_InvalidSize 6
103#define HvLpEvent_Rc_InvalidSubtype 7
104#define HvLpEvent_Rc_InvalidSubtypeData 8
105#define HvLpEvent_Rc_InvalidType 9
106#define HvLpEvent_Rc_PartitionDead 10
107#define HvLpEvent_Rc_PathClosed 11
108#define HvLpEvent_Rc_SubtypeError 12
109
110#define HvLpEvent_Function_Ack 0
111#define HvLpEvent_Function_Int 1
112
113#define HvLpEvent_AckInd_NoAck 0
114#define HvLpEvent_AckInd_DoAck 1
115
116#define HvLpEvent_AckType_ImmediateAck 0
117#define HvLpEvent_AckType_DeferredAck 1
118
119#define HV_LP_EVENT_INT 0x01
120#define HV_LP_EVENT_DO_ACK 0x02
121#define HV_LP_EVENT_DEFERRED_ACK 0x04
122#define HV_LP_EVENT_VALID 0x80
123
124#define HvLpDma_Direction_LocalToRemote 0
125#define HvLpDma_Direction_RemoteToLocal 1
126
127#define HvLpDma_AddressType_TceIndex 0
128#define HvLpDma_AddressType_RealAddress 1
129
130#define HvLpDma_Rc_Good 0
131#define HvLpDma_Rc_Error 1
132#define HvLpDma_Rc_PartitionDead 2
133#define HvLpDma_Rc_PathClosed 3
134#define HvLpDma_Rc_InvalidAddress 4
135#define HvLpDma_Rc_InvalidLength 5
136
137static inline int hvlpevent_is_valid(struct HvLpEvent *h)
138{
139 return h->flags & HV_LP_EVENT_VALID;
140}
141
142static inline void hvlpevent_invalidate(struct HvLpEvent *h)
143{
144 h->flags &= ~ HV_LP_EVENT_VALID;
145}
146
147static inline int hvlpevent_is_int(struct HvLpEvent *h)
148{
149 return h->flags & HV_LP_EVENT_INT;
150}
151
152static inline int hvlpevent_is_ack(struct HvLpEvent *h)
153{
154 return !hvlpevent_is_int(h);
155}
156
157static inline int hvlpevent_need_ack(struct HvLpEvent *h)
158{
159 return h->flags & HV_LP_EVENT_DO_ACK;
160}
161
162#endif /* _ASM_POWERPC_ISERIES_HV_LP_EVENT_H */
diff --git a/arch/powerpc/include/asm/iseries/hv_types.h b/arch/powerpc/include/asm/iseries/hv_types.h
deleted file mode 100644
index c3e6d2a1d1c3..000000000000
--- a/arch/powerpc/include/asm/iseries/hv_types.h
+++ /dev/null
@@ -1,112 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ASM_POWERPC_ISERIES_HV_TYPES_H
19#define _ASM_POWERPC_ISERIES_HV_TYPES_H
20
21/*
22 * General typedefs for the hypervisor.
23 */
24
25#include <asm/types.h>
26
27typedef u8 HvLpIndex;
28typedef u16 HvLpInstanceId;
29typedef u64 HvLpTOD;
30typedef u64 HvLpSystemSerialNum;
31typedef u8 HvLpDeviceSerialNum[12];
32typedef u16 HvLpSanHwSet;
33typedef u16 HvLpBus;
34typedef u16 HvLpBoard;
35typedef u16 HvLpCard;
36typedef u8 HvLpDeviceType[4];
37typedef u8 HvLpDeviceModel[3];
38typedef u64 HvIoToken;
39typedef u8 HvLpName[8];
40typedef u32 HvIoId;
41typedef u64 HvRealMemoryIndex;
42typedef u32 HvLpIndexMap; /* Must hold HVMAXARCHITECTEDLPS bits!!! */
43typedef u16 HvLpVrmIndex;
44typedef u32 HvXmGenerationId;
45typedef u8 HvLpBusPool;
46typedef u8 HvLpSharedPoolIndex;
47typedef u16 HvLpSharedProcUnitsX100;
48typedef u8 HvLpVirtualLanIndex;
49typedef u16 HvLpVirtualLanIndexMap; /* Must hold HVMAXARCHITECTEDVIRTUALLANS bits!!! */
50typedef u16 HvBusNumber; /* Hypervisor Bus Number */
51typedef u8 HvSubBusNumber; /* Hypervisor SubBus Number */
52typedef u8 HvAgentId; /* Hypervisor DevFn */
53
54
55#define HVMAXARCHITECTEDLPS 32
56#define HVMAXARCHITECTEDVIRTUALLANS 16
57#define HVMAXARCHITECTEDVIRTUALDISKS 32
58#define HVMAXARCHITECTEDVIRTUALCDROMS 8
59#define HVMAXARCHITECTEDVIRTUALTAPES 8
60#define HVCHUNKSIZE (256 * 1024)
61#define HVPAGESIZE (4 * 1024)
62#define HVLPMINMEGSPRIMARY 256
63#define HVLPMINMEGSSECONDARY 64
64#define HVCHUNKSPERMEG 4
65#define HVPAGESPERMEG 256
66#define HVPAGESPERCHUNK 64
67
68#define HvLpIndexInvalid ((HvLpIndex)0xff)
69
70/*
71 * Enums for the sub-components under PLIC
72 * Used in HvCall and HvPrimaryCall
73 */
74enum {
75 HvCallCompId = 0,
76 HvCallCpuCtlsCompId = 1,
77 HvCallCfgCompId = 2,
78 HvCallEventCompId = 3,
79 HvCallHptCompId = 4,
80 HvCallPciCompId = 5,
81 HvCallSlmCompId = 6,
82 HvCallSmCompId = 7,
83 HvCallSpdCompId = 8,
84 HvCallXmCompId = 9,
85 HvCallRioCompId = 10,
86 HvCallRsvd3CompId = 11,
87 HvCallRsvd2CompId = 12,
88 HvCallRsvd1CompId = 13,
89 HvCallMaxCompId = 14,
90 HvPrimaryCallCompId = 0,
91 HvPrimaryCallCfgCompId = 1,
92 HvPrimaryCallPciCompId = 2,
93 HvPrimaryCallSmCompId = 3,
94 HvPrimaryCallSpdCompId = 4,
95 HvPrimaryCallXmCompId = 5,
96 HvPrimaryCallRioCompId = 6,
97 HvPrimaryCallRsvd7CompId = 7,
98 HvPrimaryCallRsvd6CompId = 8,
99 HvPrimaryCallRsvd5CompId = 9,
100 HvPrimaryCallRsvd4CompId = 10,
101 HvPrimaryCallRsvd3CompId = 11,
102 HvPrimaryCallRsvd2CompId = 12,
103 HvPrimaryCallRsvd1CompId = 13,
104 HvPrimaryCallMaxCompId = HvCallMaxCompId
105};
106
107struct HvLpBufferList {
108 u64 addr;
109 u64 len;
110};
111
112#endif /* _ASM_POWERPC_ISERIES_HV_TYPES_H */
diff --git a/arch/powerpc/include/asm/iseries/iommu.h b/arch/powerpc/include/asm/iseries/iommu.h
deleted file mode 100644
index 1b9692c60899..000000000000
--- a/arch/powerpc/include/asm/iseries/iommu.h
+++ /dev/null
@@ -1,37 +0,0 @@
1#ifndef _ASM_POWERPC_ISERIES_IOMMU_H
2#define _ASM_POWERPC_ISERIES_IOMMU_H
3
4/*
5 * Copyright (C) 2005 Stephen Rothwell, IBM Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the:
19 * Free Software Foundation, Inc.,
20 * 59 Temple Place, Suite 330,
21 * Boston, MA 02111-1307 USA
22 */
23
24struct pci_dev;
25struct vio_dev;
26struct device_node;
27struct iommu_table;
28
29/* Get table parameters from HV */
30extern void iommu_table_getparms_iSeries(unsigned long busno,
31 unsigned char slotno, unsigned char virtbus,
32 struct iommu_table *tbl);
33
34extern struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev);
35extern void iommu_vio_init(void);
36
37#endif /* _ASM_POWERPC_ISERIES_IOMMU_H */
diff --git a/arch/powerpc/include/asm/iseries/it_lp_queue.h b/arch/powerpc/include/asm/iseries/it_lp_queue.h
deleted file mode 100644
index 428278838821..000000000000
--- a/arch/powerpc/include/asm/iseries/it_lp_queue.h
+++ /dev/null
@@ -1,78 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
19#define _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H
20
21/*
22 * This control block defines the simple LP queue structure that is
23 * shared between the hypervisor (PLIC) and the OS in order to send
24 * events to an LP.
25 */
26
27#include <asm/types.h>
28#include <asm/ptrace.h>
29
30#define IT_LP_MAX_QUEUES 8
31
32#define IT_LP_NOT_USED 0 /* Queue will not be used by PLIC */
33#define IT_LP_DEDICATED_IO 1 /* Queue dedicated to IO processor specified */
34#define IT_LP_DEDICATED_LP 2 /* Queue dedicated to LP specified */
35#define IT_LP_SHARED 3 /* Queue shared for both IO and LP */
36
37#define IT_LP_EVENT_STACK_SIZE 4096
38#define IT_LP_EVENT_MAX_SIZE 256
39#define IT_LP_EVENT_ALIGN 64
40
41struct hvlpevent_queue {
42/*
43 * The hq_current_event is the pointer to the next event stack entry
44 * that will become valid. The OS must peek at this entry to determine
45 * if it is valid. PLIC will set the valid indicator as the very last
46 * store into that entry.
47 *
48 * When the OS has completed processing of the event then it will mark
49 * the event as invalid so that PLIC knows it can store into that event
50 * location again.
51 *
52 * If the event stack fills and there are overflow events, then PLIC
53 * will set the hq_overflow_pending flag in which case the OS will
54 * have to fetch the additional LP events once they have drained the
55 * event stack.
56 *
57 * The first 16-bytes are known by both the OS and PLIC. The remainder
58 * of the cache line is for use by the OS.
59 */
60 u8 hq_overflow_pending; /* 0x00 Overflow events are pending */
61 u8 hq_status; /* 0x01 DedicatedIo or DedicatedLp or NotUsed */
62 u16 hq_proc_index; /* 0x02 Logical Proc Index for correlation */
63 u8 hq_reserved1[12]; /* 0x04 */
64 char *hq_current_event; /* 0x10 */
65 char *hq_last_event; /* 0x18 */
66 char *hq_event_stack; /* 0x20 */
67 u8 hq_index; /* 0x28 unique sequential index. */
68 u8 hq_reserved2[3]; /* 0x29-2b */
69 spinlock_t hq_lock;
70};
71
72extern struct hvlpevent_queue hvlpevent_queue;
73
74extern int hvlpevent_is_pending(void);
75extern void process_hvlpevents(void);
76extern void setup_hvlpevent_queue(void);
77
78#endif /* _ASM_POWERPC_ISERIES_IT_LP_QUEUE_H */
diff --git a/arch/powerpc/include/asm/iseries/lpar_map.h b/arch/powerpc/include/asm/iseries/lpar_map.h
deleted file mode 100644
index 5e9f3e128ee2..000000000000
--- a/arch/powerpc/include/asm/iseries/lpar_map.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ASM_POWERPC_ISERIES_LPAR_MAP_H
19#define _ASM_POWERPC_ISERIES_LPAR_MAP_H
20
21#ifndef __ASSEMBLY__
22
23#include <asm/types.h>
24
25#endif
26
27/*
28 * The iSeries hypervisor will set up mapping for one or more
29 * ESID/VSID pairs (in SLB/segment registers) and will set up
30 * mappings of one or more ranges of pages to VAs.
31 * We will have the hypervisor set up the ESID->VSID mapping
32 * for the four kernel segments (C-F). With shared processors,
33 * the hypervisor will clear all segment registers and reload
34 * these four whenever the processor is switched from one
35 * partition to another.
36 */
37
38/* The Vsid and Esid identified below will be used by the hypervisor
39 * to set up a memory mapping for part of the load area before giving
40 * control to the Linux kernel. The load area is 64 MB, but this must
41 * not attempt to map the whole load area. The Hashed Page Table may
42 * need to be located within the load area (if the total partition size
43 * is 64 MB), but cannot be mapped. Typically, this should specify
44 * to map half (32 MB) of the load area.
45 *
46 * The hypervisor will set up page table entries for the number of
47 * pages specified.
48 *
49 * In 32-bit mode, the hypervisor will load all four of the
50 * segment registers (identified by the low-order four bits of the
51 * Esid field. In 64-bit mode, the hypervisor will load one SLB
52 * entry to map the Esid to the Vsid.
53*/
54
55#define HvEsidsToMap 2
56#define HvRangesToMap 1
57
58/* Hypervisor initially maps 32MB of the load area */
59#define HvPagesToMap 8192
60
61#ifndef __ASSEMBLY__
62struct LparMap {
63 u64 xNumberEsids; // Number of ESID/VSID pairs
64 u64 xNumberRanges; // Number of VA ranges to map
65 u64 xSegmentTableOffs; // Page number within load area of seg table
66 u64 xRsvd[5];
67 struct {
68 u64 xKernelEsid; // Esid used to map kernel load
69 u64 xKernelVsid; // Vsid used to map kernel load
70 } xEsids[HvEsidsToMap];
71 struct {
72 u64 xPages; // Number of pages to be mapped
73 u64 xOffset; // Offset from start of load area
74 u64 xVPN; // Virtual Page Number
75 } xRanges[HvRangesToMap];
76};
77
78extern const struct LparMap xLparMap;
79
80#endif /* __ASSEMBLY__ */
81
82/* the fixed address where the LparMap exists */
83#define LPARMAP_PHYS 0x7000
84
85#endif /* _ASM_POWERPC_ISERIES_LPAR_MAP_H */
diff --git a/arch/powerpc/include/asm/iseries/mf.h b/arch/powerpc/include/asm/iseries/mf.h
deleted file mode 100644
index eb851a9c9e5c..000000000000
--- a/arch/powerpc/include/asm/iseries/mf.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
3 * Copyright (C) 2004 Stephen Rothwell IBM Corporation
4 *
5 * This modules exists as an interface between a Linux secondary partition
6 * running on an iSeries and the primary partition's Virtual Service
7 * Processor (VSP) object. The VSP has final authority over powering on/off
8 * all partitions in the iSeries. It also provides miscellaneous low-level
9 * machine facility type operations.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
24 */
25#ifndef _ASM_POWERPC_ISERIES_MF_H
26#define _ASM_POWERPC_ISERIES_MF_H
27
28#include <linux/types.h>
29
30#include <asm/iseries/hv_types.h>
31#include <asm/iseries/hv_call_event.h>
32
33struct rtc_time;
34
35typedef void (*MFCompleteHandler)(void *clientToken, int returnCode);
36
37extern void mf_allocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
38 unsigned size, unsigned amount, MFCompleteHandler hdlr,
39 void *userToken);
40extern void mf_deallocate_lp_events(HvLpIndex targetLp, HvLpEvent_Type type,
41 unsigned count, MFCompleteHandler hdlr, void *userToken);
42
43extern void mf_power_off(void);
44extern void mf_reboot(char *cmd);
45
46extern void mf_display_src(u32 word);
47extern void mf_display_progress(u16 value);
48
49extern void mf_init(void);
50
51#endif /* _ASM_POWERPC_ISERIES_MF_H */
diff --git a/arch/powerpc/include/asm/iseries/vio.h b/arch/powerpc/include/asm/iseries/vio.h
deleted file mode 100644
index f9ac0d00b951..000000000000
--- a/arch/powerpc/include/asm/iseries/vio.h
+++ /dev/null
@@ -1,265 +0,0 @@
1/* -*- linux-c -*-
2 *
3 * iSeries Virtual I/O Message Path header
4 *
5 * Authors: Dave Boutcher <boutcher@us.ibm.com>
6 * Ryan Arnold <ryanarn@us.ibm.com>
7 * Colin Devilbiss <devilbis@us.ibm.com>
8 *
9 * (C) Copyright 2000 IBM Corporation
10 *
11 * This header file is used by the iSeries virtual I/O device
12 * drivers. It defines the interfaces to the common functions
13 * (implemented in drivers/char/viopath.h) as well as defining
14 * common functions and structures. Currently (at the time I
15 * wrote this comment) the iSeries virtual I/O device drivers
16 * that use this are
17 * drivers/block/viodasd.c
18 * drivers/char/viocons.c
19 * drivers/char/viotape.c
20 * drivers/cdrom/viocd.c
21 *
22 * The iSeries virtual ethernet support (veth.c) uses a whole
23 * different set of functions.
24 *
25 * This program is free software; you can redistribute it and/or
26 * modify it under the terms of the GNU General Public License as
27 * published by the Free Software Foundation; either version 2 of the
28 * License, or (at your option) anyu later version.
29 *
30 * This program is distributed in the hope that it will be useful, but
31 * WITHOUT ANY WARRANTY; without even the implied warranty of
32 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
33 * General Public License for more details.
34 *
35 * You should have received a copy of the GNU General Public License
36 * along with this program; if not, write to the Free Software Foundation,
37 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
38 *
39 */
40#ifndef _ASM_POWERPC_ISERIES_VIO_H
41#define _ASM_POWERPC_ISERIES_VIO_H
42
43#include <asm/iseries/hv_types.h>
44#include <asm/iseries/hv_lp_event.h>
45
46/*
47 * iSeries virtual I/O events use the subtype field in
48 * HvLpEvent to figure out what kind of vio event is coming
49 * in. We use a table to route these, and this defines
50 * the maximum number of distinct subtypes
51 */
52#define VIO_MAX_SUBTYPES 8
53
54#define VIOMAXBLOCKDMA 12
55
56struct open_data {
57 u64 disk_size;
58 u16 max_disk;
59 u16 cylinders;
60 u16 tracks;
61 u16 sectors;
62 u16 bytes_per_sector;
63};
64
65struct rw_data {
66 u64 offset;
67 struct {
68 u32 token;
69 u32 reserved;
70 u64 len;
71 } dma_info[VIOMAXBLOCKDMA];
72};
73
74struct vioblocklpevent {
75 struct HvLpEvent event;
76 u32 reserved;
77 u16 version;
78 u16 sub_result;
79 u16 disk;
80 u16 flags;
81 union {
82 struct open_data open_data;
83 struct rw_data rw_data;
84 u64 changed;
85 } u;
86};
87
88#define vioblockflags_ro 0x0001
89
90enum vioblocksubtype {
91 vioblockopen = 0x0001,
92 vioblockclose = 0x0002,
93 vioblockread = 0x0003,
94 vioblockwrite = 0x0004,
95 vioblockflush = 0x0005,
96 vioblockcheck = 0x0007
97};
98
99struct viocdlpevent {
100 struct HvLpEvent event;
101 u32 reserved;
102 u16 version;
103 u16 sub_result;
104 u16 disk;
105 u16 flags;
106 u32 token;
107 u64 offset; /* On open, max number of disks */
108 u64 len; /* On open, size of the disk */
109 u32 block_size; /* Only set on open */
110 u32 media_size; /* Only set on open */
111};
112
113enum viocdsubtype {
114 viocdopen = 0x0001,
115 viocdclose = 0x0002,
116 viocdread = 0x0003,
117 viocdwrite = 0x0004,
118 viocdlockdoor = 0x0005,
119 viocdgetinfo = 0x0006,
120 viocdcheck = 0x0007
121};
122
123struct viotapelpevent {
124 struct HvLpEvent event;
125 u32 reserved;
126 u16 version;
127 u16 sub_type_result;
128 u16 tape;
129 u16 flags;
130 u32 token;
131 u64 len;
132 union {
133 struct {
134 u32 tape_op;
135 u32 count;
136 } op;
137 struct {
138 u32 type;
139 u32 resid;
140 u32 dsreg;
141 u32 gstat;
142 u32 erreg;
143 u32 file_no;
144 u32 block_no;
145 } get_status;
146 struct {
147 u32 block_no;
148 } get_pos;
149 } u;
150};
151
152enum viotapesubtype {
153 viotapeopen = 0x0001,
154 viotapeclose = 0x0002,
155 viotaperead = 0x0003,
156 viotapewrite = 0x0004,
157 viotapegetinfo = 0x0005,
158 viotapeop = 0x0006,
159 viotapegetpos = 0x0007,
160 viotapesetpos = 0x0008,
161 viotapegetstatus = 0x0009
162};
163
164/*
165 * Each subtype can register a handler to process their events.
166 * The handler must have this interface.
167 */
168typedef void (vio_event_handler_t) (struct HvLpEvent * event);
169
170extern int viopath_open(HvLpIndex remoteLp, int subtype, int numReq);
171extern int viopath_close(HvLpIndex remoteLp, int subtype, int numReq);
172extern int vio_setHandler(int subtype, vio_event_handler_t * beh);
173extern int vio_clearHandler(int subtype);
174extern int viopath_isactive(HvLpIndex lp);
175extern HvLpInstanceId viopath_sourceinst(HvLpIndex lp);
176extern HvLpInstanceId viopath_targetinst(HvLpIndex lp);
177extern void vio_set_hostlp(void);
178extern void *vio_get_event_buffer(int subtype);
179extern void vio_free_event_buffer(int subtype, void *buffer);
180
181extern struct vio_dev *vio_create_viodasd(u32 unit);
182
183extern HvLpIndex viopath_hostLp;
184extern HvLpIndex viopath_ourLp;
185
186#define VIOCHAR_MAX_DATA 200
187
188#define VIOMAJOR_SUBTYPE_MASK 0xff00
189#define VIOMINOR_SUBTYPE_MASK 0x00ff
190#define VIOMAJOR_SUBTYPE_SHIFT 8
191
192#define VIOVERSION 0x0101
193
194/*
195 * This is the general structure for VIO errors; each module should have
196 * a table of them, and each table should be terminated by an entry of
197 * { 0, 0, NULL }. Then, to find a specific error message, a module
198 * should pass its local table and the return code.
199 */
200struct vio_error_entry {
201 u16 rc;
202 int errno;
203 const char *msg;
204};
205extern const struct vio_error_entry *vio_lookup_rc(
206 const struct vio_error_entry *local_table, u16 rc);
207
208enum viosubtypes {
209 viomajorsubtype_monitor = 0x0100,
210 viomajorsubtype_blockio = 0x0200,
211 viomajorsubtype_chario = 0x0300,
212 viomajorsubtype_config = 0x0400,
213 viomajorsubtype_cdio = 0x0500,
214 viomajorsubtype_tape = 0x0600,
215 viomajorsubtype_scsi = 0x0700
216};
217
218enum vioconfigsubtype {
219 vioconfigget = 0x0001,
220};
221
222enum viorc {
223 viorc_good = 0x0000,
224 viorc_noConnection = 0x0001,
225 viorc_noReceiver = 0x0002,
226 viorc_noBufferAvailable = 0x0003,
227 viorc_invalidMessageType = 0x0004,
228 viorc_invalidRange = 0x0201,
229 viorc_invalidToken = 0x0202,
230 viorc_DMAError = 0x0203,
231 viorc_useError = 0x0204,
232 viorc_releaseError = 0x0205,
233 viorc_invalidDisk = 0x0206,
234 viorc_openRejected = 0x0301
235};
236
237/*
238 * The structure of the events that flow between us and OS/400 for chario
239 * events. You can't mess with this unless the OS/400 side changes too.
240 */
241struct viocharlpevent {
242 struct HvLpEvent event;
243 u32 reserved;
244 u16 version;
245 u16 subtype_result_code;
246 u8 virtual_device;
247 u8 len;
248 u8 data[VIOCHAR_MAX_DATA];
249};
250
251#define VIOCHAR_WINDOW 10
252
253enum viocharsubtype {
254 viocharopen = 0x0001,
255 viocharclose = 0x0002,
256 viochardata = 0x0003,
257 viocharack = 0x0004,
258 viocharconfig = 0x0005
259};
260
261enum viochar_rc {
262 viochar_rc_ebusy = 1
263};
264
265#endif /* _ASM_POWERPC_ISERIES_VIO_H */
diff --git a/arch/powerpc/include/asm/jump_label.h b/arch/powerpc/include/asm/jump_label.h
index 938986e412f1..ae098c438f00 100644
--- a/arch/powerpc/include/asm/jump_label.h
+++ b/arch/powerpc/include/asm/jump_label.h
@@ -17,7 +17,7 @@
17#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG) 17#define JUMP_ENTRY_TYPE stringify_in_c(FTR_ENTRY_LONG)
18#define JUMP_LABEL_NOP_SIZE 4 18#define JUMP_LABEL_NOP_SIZE 4
19 19
20static __always_inline bool arch_static_branch(struct jump_label_key *key) 20static __always_inline bool arch_static_branch(struct static_key *key)
21{ 21{
22 asm goto("1:\n\t" 22 asm goto("1:\n\t"
23 "nop\n\t" 23 "nop\n\t"
diff --git a/arch/powerpc/include/asm/keylargo.h b/arch/powerpc/include/asm/keylargo.h
index fc195d0b3c34..2156315d8a90 100644
--- a/arch/powerpc/include/asm/keylargo.h
+++ b/arch/powerpc/include/asm/keylargo.h
@@ -21,7 +21,7 @@
21#define KEYLARGO_FCR4 0x48 21#define KEYLARGO_FCR4 0x48
22#define KEYLARGO_FCR5 0x4c /* Pangea only */ 22#define KEYLARGO_FCR5 0x4c /* Pangea only */
23 23
24/* K2 aditional FCRs */ 24/* K2 additional FCRs */
25#define K2_FCR6 0x34 25#define K2_FCR6 0x34
26#define K2_FCR7 0x30 26#define K2_FCR7 0x30
27#define K2_FCR8 0x2c 27#define K2_FCR8 0x2c
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index f7727d91ac6b..b921c3f48928 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -265,12 +265,9 @@ struct kvm_debug_exit_arch {
265struct kvm_guest_debug_arch { 265struct kvm_guest_debug_arch {
266}; 266};
267 267
268#define KVM_REG_MASK 0x001f 268/* definition of registers in kvm_run */
269#define KVM_REG_EXT_MASK 0xffe0 269struct kvm_sync_regs {
270#define KVM_REG_GPR 0x0000 270};
271#define KVM_REG_FPR 0x0020
272#define KVM_REG_QPR 0x0040
273#define KVM_REG_FQPR 0x0060
274 271
275#define KVM_INTERRUPT_SET -1U 272#define KVM_INTERRUPT_SET -1U
276#define KVM_INTERRUPT_UNSET -2U 273#define KVM_INTERRUPT_UNSET -2U
@@ -292,4 +289,41 @@ struct kvm_allocate_rma {
292 __u64 rma_size; 289 __u64 rma_size;
293}; 290};
294 291
292struct kvm_book3e_206_tlb_entry {
293 __u32 mas8;
294 __u32 mas1;
295 __u64 mas2;
296 __u64 mas7_3;
297};
298
299struct kvm_book3e_206_tlb_params {
300 /*
301 * For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
302 *
303 * - The number of ways of TLB0 must be a power of two between 2 and
304 * 16.
305 * - TLB1 must be fully associative.
306 * - The size of TLB0 must be a multiple of the number of ways, and
307 * the number of sets must be a power of two.
308 * - The size of TLB1 may not exceed 64 entries.
309 * - TLB0 supports 4 KiB pages.
310 * - The page sizes supported by TLB1 are as indicated by
311 * TLB1CFG (if MMUCFG[MAVN] = 0) or TLB1PS (if MMUCFG[MAVN] = 1)
312 * as returned by KVM_GET_SREGS.
313 * - TLB2 and TLB3 are reserved, and their entries in tlb_sizes[]
314 * and tlb_ways[] must be zero.
315 *
316 * tlb_ways[n] = tlb_sizes[n] means the array is fully associative.
317 *
318 * KVM will adjust TLBnCFG based on the sizes configured here,
319 * though arrays greater than 2048 entries will have TLBnCFG[NENTRY]
320 * set to zero.
321 */
322 __u32 tlb_sizes[4];
323 __u32 tlb_ways[4];
324 __u32 reserved[8];
325};
326
327#define KVM_REG_PPC_HIOR (KVM_REG_PPC | KVM_REG_SIZE_U64 | 0x1)
328
295#endif /* __LINUX_KVM_POWERPC_H */ 329#endif /* __LINUX_KVM_POWERPC_H */
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index 69c7377d2071..aa795ccef294 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -90,6 +90,8 @@ struct kvmppc_vcpu_book3s {
90#endif 90#endif
91 int context_id[SID_CONTEXTS]; 91 int context_id[SID_CONTEXTS];
92 92
93 bool hior_explicit; /* HIOR is set by ioctl, not PVR */
94
93 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; 95 struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE];
94 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; 96 struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG];
95 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; 97 struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE];
@@ -119,6 +121,11 @@ extern void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu);
119extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte); 121extern int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte);
120extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr); 122extern int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr);
121extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu); 123extern void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu);
124extern int kvmppc_book3s_hv_page_fault(struct kvm_run *run,
125 struct kvm_vcpu *vcpu, unsigned long addr,
126 unsigned long status);
127extern long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr,
128 unsigned long slb_v, unsigned long valid);
122 129
123extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte); 130extern void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte);
124extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu); 131extern struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu);
@@ -138,6 +145,21 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat,
138extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); 145extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr);
139extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); 146extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu);
140extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); 147extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
148extern void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
149 unsigned long *rmap, long pte_index, int realmode);
150extern void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
151 unsigned long pte_index);
152void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
153 unsigned long pte_index);
154extern void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long addr,
155 unsigned long *nb_ret);
156extern void kvmppc_unpin_guest_page(struct kvm *kvm, void *addr);
157extern long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
158 long pte_index, unsigned long pteh, unsigned long ptel);
159extern long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
160 long pte_index, unsigned long pteh, unsigned long ptel);
161extern long kvmppc_hv_get_dirty_log(struct kvm *kvm,
162 struct kvm_memory_slot *memslot);
141 163
142extern void kvmppc_entry_trampoline(void); 164extern void kvmppc_entry_trampoline(void);
143extern void kvmppc_hv_entry_trampoline(void); 165extern void kvmppc_hv_entry_trampoline(void);
@@ -183,7 +205,9 @@ static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
183static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 205static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
184{ 206{
185 if ( num < 14 ) { 207 if ( num < 14 ) {
186 to_svcpu(vcpu)->gpr[num] = val; 208 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
209 svcpu->gpr[num] = val;
210 svcpu_put(svcpu);
187 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val; 211 to_book3s(vcpu)->shadow_vcpu->gpr[num] = val;
188 } else 212 } else
189 vcpu->arch.gpr[num] = val; 213 vcpu->arch.gpr[num] = val;
@@ -191,80 +215,120 @@ static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
191 215
192static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num) 216static inline ulong kvmppc_get_gpr(struct kvm_vcpu *vcpu, int num)
193{ 217{
194 if ( num < 14 ) 218 if ( num < 14 ) {
195 return to_svcpu(vcpu)->gpr[num]; 219 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
196 else 220 ulong r = svcpu->gpr[num];
221 svcpu_put(svcpu);
222 return r;
223 } else
197 return vcpu->arch.gpr[num]; 224 return vcpu->arch.gpr[num];
198} 225}
199 226
200static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val) 227static inline void kvmppc_set_cr(struct kvm_vcpu *vcpu, u32 val)
201{ 228{
202 to_svcpu(vcpu)->cr = val; 229 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
230 svcpu->cr = val;
231 svcpu_put(svcpu);
203 to_book3s(vcpu)->shadow_vcpu->cr = val; 232 to_book3s(vcpu)->shadow_vcpu->cr = val;
204} 233}
205 234
206static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu) 235static inline u32 kvmppc_get_cr(struct kvm_vcpu *vcpu)
207{ 236{
208 return to_svcpu(vcpu)->cr; 237 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
238 u32 r;
239 r = svcpu->cr;
240 svcpu_put(svcpu);
241 return r;
209} 242}
210 243
211static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val) 244static inline void kvmppc_set_xer(struct kvm_vcpu *vcpu, u32 val)
212{ 245{
213 to_svcpu(vcpu)->xer = val; 246 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
247 svcpu->xer = val;
214 to_book3s(vcpu)->shadow_vcpu->xer = val; 248 to_book3s(vcpu)->shadow_vcpu->xer = val;
249 svcpu_put(svcpu);
215} 250}
216 251
217static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu) 252static inline u32 kvmppc_get_xer(struct kvm_vcpu *vcpu)
218{ 253{
219 return to_svcpu(vcpu)->xer; 254 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
255 u32 r;
256 r = svcpu->xer;
257 svcpu_put(svcpu);
258 return r;
220} 259}
221 260
222static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val) 261static inline void kvmppc_set_ctr(struct kvm_vcpu *vcpu, ulong val)
223{ 262{
224 to_svcpu(vcpu)->ctr = val; 263 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
264 svcpu->ctr = val;
265 svcpu_put(svcpu);
225} 266}
226 267
227static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu) 268static inline ulong kvmppc_get_ctr(struct kvm_vcpu *vcpu)
228{ 269{
229 return to_svcpu(vcpu)->ctr; 270 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
271 ulong r;
272 r = svcpu->ctr;
273 svcpu_put(svcpu);
274 return r;
230} 275}
231 276
232static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val) 277static inline void kvmppc_set_lr(struct kvm_vcpu *vcpu, ulong val)
233{ 278{
234 to_svcpu(vcpu)->lr = val; 279 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
280 svcpu->lr = val;
281 svcpu_put(svcpu);
235} 282}
236 283
237static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu) 284static inline ulong kvmppc_get_lr(struct kvm_vcpu *vcpu)
238{ 285{
239 return to_svcpu(vcpu)->lr; 286 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
287 ulong r;
288 r = svcpu->lr;
289 svcpu_put(svcpu);
290 return r;
240} 291}
241 292
242static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val) 293static inline void kvmppc_set_pc(struct kvm_vcpu *vcpu, ulong val)
243{ 294{
244 to_svcpu(vcpu)->pc = val; 295 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
296 svcpu->pc = val;
297 svcpu_put(svcpu);
245} 298}
246 299
247static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu) 300static inline ulong kvmppc_get_pc(struct kvm_vcpu *vcpu)
248{ 301{
249 return to_svcpu(vcpu)->pc; 302 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
303 ulong r;
304 r = svcpu->pc;
305 svcpu_put(svcpu);
306 return r;
250} 307}
251 308
252static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu) 309static inline u32 kvmppc_get_last_inst(struct kvm_vcpu *vcpu)
253{ 310{
254 ulong pc = kvmppc_get_pc(vcpu); 311 ulong pc = kvmppc_get_pc(vcpu);
255 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 312 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
313 u32 r;
256 314
257 /* Load the instruction manually if it failed to do so in the 315 /* Load the instruction manually if it failed to do so in the
258 * exit path */ 316 * exit path */
259 if (svcpu->last_inst == KVM_INST_FETCH_FAILED) 317 if (svcpu->last_inst == KVM_INST_FETCH_FAILED)
260 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false); 318 kvmppc_ld(vcpu, &pc, sizeof(u32), &svcpu->last_inst, false);
261 319
262 return svcpu->last_inst; 320 r = svcpu->last_inst;
321 svcpu_put(svcpu);
322 return r;
263} 323}
264 324
265static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu) 325static inline ulong kvmppc_get_fault_dar(struct kvm_vcpu *vcpu)
266{ 326{
267 return to_svcpu(vcpu)->fault_dar; 327 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
328 ulong r;
329 r = svcpu->fault_dar;
330 svcpu_put(svcpu);
331 return r;
268} 332}
269 333
270static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu) 334static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/include/asm/kvm_book3s_32.h b/arch/powerpc/include/asm/kvm_book3s_32.h
index de604db135f5..38040ff82063 100644
--- a/arch/powerpc/include/asm/kvm_book3s_32.h
+++ b/arch/powerpc/include/asm/kvm_book3s_32.h
@@ -20,11 +20,15 @@
20#ifndef __ASM_KVM_BOOK3S_32_H__ 20#ifndef __ASM_KVM_BOOK3S_32_H__
21#define __ASM_KVM_BOOK3S_32_H__ 21#define __ASM_KVM_BOOK3S_32_H__
22 22
23static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) 23static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
24{ 24{
25 return to_book3s(vcpu)->shadow_vcpu; 25 return to_book3s(vcpu)->shadow_vcpu;
26} 26}
27 27
28static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
29{
30}
31
28#define PTE_SIZE 12 32#define PTE_SIZE 12
29#define VSID_ALL 0 33#define VSID_ALL 0
30#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */ 34#define SR_INVALID 0x00000001 /* VSID 1 should always be unused */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index d0ac94f98f9e..b0c08b142770 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -21,14 +21,56 @@
21#define __ASM_KVM_BOOK3S_64_H__ 21#define __ASM_KVM_BOOK3S_64_H__
22 22
23#ifdef CONFIG_KVM_BOOK3S_PR 23#ifdef CONFIG_KVM_BOOK3S_PR
24static inline struct kvmppc_book3s_shadow_vcpu *to_svcpu(struct kvm_vcpu *vcpu) 24static inline struct kvmppc_book3s_shadow_vcpu *svcpu_get(struct kvm_vcpu *vcpu)
25{ 25{
26 preempt_disable();
26 return &get_paca()->shadow_vcpu; 27 return &get_paca()->shadow_vcpu;
27} 28}
29
30static inline void svcpu_put(struct kvmppc_book3s_shadow_vcpu *svcpu)
31{
32 preempt_enable();
33}
28#endif 34#endif
29 35
30#define SPAPR_TCE_SHIFT 12 36#define SPAPR_TCE_SHIFT 12
31 37
38#ifdef CONFIG_KVM_BOOK3S_64_HV
39/* For now use fixed-size 16MB page table */
40#define HPT_ORDER 24
41#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
42#define HPT_NPTE (HPT_NPTEG << 3) /* 8 PTEs per PTEG */
43#define HPT_HASH_MASK (HPT_NPTEG - 1)
44#endif
45
46#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
47
48/*
49 * We use a lock bit in HPTE dword 0 to synchronize updates and
50 * accesses to each HPTE, and another bit to indicate non-present
51 * HPTEs.
52 */
53#define HPTE_V_HVLOCK 0x40UL
54#define HPTE_V_ABSENT 0x20UL
55
56static inline long try_lock_hpte(unsigned long *hpte, unsigned long bits)
57{
58 unsigned long tmp, old;
59
60 asm volatile(" ldarx %0,0,%2\n"
61 " and. %1,%0,%3\n"
62 " bne 2f\n"
63 " ori %0,%0,%4\n"
64 " stdcx. %0,0,%2\n"
65 " beq+ 2f\n"
66 " li %1,%3\n"
67 "2: isync"
68 : "=&r" (tmp), "=&r" (old)
69 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
70 : "cc", "memory");
71 return old == 0;
72}
73
32static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r, 74static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
33 unsigned long pte_index) 75 unsigned long pte_index)
34{ 76{
@@ -62,4 +104,140 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
62 return rb; 104 return rb;
63} 105}
64 106
107static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
108{
109 /* only handle 4k, 64k and 16M pages for now */
110 if (!(h & HPTE_V_LARGE))
111 return 1ul << 12; /* 4k page */
112 if ((l & 0xf000) == 0x1000 && cpu_has_feature(CPU_FTR_ARCH_206))
113 return 1ul << 16; /* 64k page */
114 if ((l & 0xff000) == 0)
115 return 1ul << 24; /* 16M page */
116 return 0; /* error */
117}
118
119static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
120{
121 return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
122}
123
124static inline int hpte_is_writable(unsigned long ptel)
125{
126 unsigned long pp = ptel & (HPTE_R_PP0 | HPTE_R_PP);
127
128 return pp != PP_RXRX && pp != PP_RXXX;
129}
130
131static inline unsigned long hpte_make_readonly(unsigned long ptel)
132{
133 if ((ptel & HPTE_R_PP0) || (ptel & HPTE_R_PP) == PP_RWXX)
134 ptel = (ptel & ~HPTE_R_PP) | PP_RXXX;
135 else
136 ptel |= PP_RXRX;
137 return ptel;
138}
139
140static inline int hpte_cache_flags_ok(unsigned long ptel, unsigned long io_type)
141{
142 unsigned int wimg = ptel & HPTE_R_WIMG;
143
144 /* Handle SAO */
145 if (wimg == (HPTE_R_W | HPTE_R_I | HPTE_R_M) &&
146 cpu_has_feature(CPU_FTR_ARCH_206))
147 wimg = HPTE_R_M;
148
149 if (!io_type)
150 return wimg == HPTE_R_M;
151
152 return (wimg & (HPTE_R_W | HPTE_R_I)) == io_type;
153}
154
155/*
156 * Lock and read a linux PTE. If it's present and writable, atomically
157 * set dirty and referenced bits and return the PTE, otherwise return 0.
158 */
159static inline pte_t kvmppc_read_update_linux_pte(pte_t *p, int writing)
160{
161 pte_t pte, tmp;
162
163 /* wait until _PAGE_BUSY is clear then set it atomically */
164 __asm__ __volatile__ (
165 "1: ldarx %0,0,%3\n"
166 " andi. %1,%0,%4\n"
167 " bne- 1b\n"
168 " ori %1,%0,%4\n"
169 " stdcx. %1,0,%3\n"
170 " bne- 1b"
171 : "=&r" (pte), "=&r" (tmp), "=m" (*p)
172 : "r" (p), "i" (_PAGE_BUSY)
173 : "cc");
174
175 if (pte_present(pte)) {
176 pte = pte_mkyoung(pte);
177 if (writing && pte_write(pte))
178 pte = pte_mkdirty(pte);
179 }
180
181 *p = pte; /* clears _PAGE_BUSY */
182
183 return pte;
184}
185
186/* Return HPTE cache control bits corresponding to Linux pte bits */
187static inline unsigned long hpte_cache_bits(unsigned long pte_val)
188{
189#if _PAGE_NO_CACHE == HPTE_R_I && _PAGE_WRITETHRU == HPTE_R_W
190 return pte_val & (HPTE_R_W | HPTE_R_I);
191#else
192 return ((pte_val & _PAGE_NO_CACHE) ? HPTE_R_I : 0) +
193 ((pte_val & _PAGE_WRITETHRU) ? HPTE_R_W : 0);
194#endif
195}
196
197static inline bool hpte_read_permission(unsigned long pp, unsigned long key)
198{
199 if (key)
200 return PP_RWRX <= pp && pp <= PP_RXRX;
201 return 1;
202}
203
204static inline bool hpte_write_permission(unsigned long pp, unsigned long key)
205{
206 if (key)
207 return pp == PP_RWRW;
208 return pp <= PP_RWRW;
209}
210
211static inline int hpte_get_skey_perm(unsigned long hpte_r, unsigned long amr)
212{
213 unsigned long skey;
214
215 skey = ((hpte_r & HPTE_R_KEY_HI) >> 57) |
216 ((hpte_r & HPTE_R_KEY_LO) >> 9);
217 return (amr >> (62 - 2 * skey)) & 3;
218}
219
220static inline void lock_rmap(unsigned long *rmap)
221{
222 do {
223 while (test_bit(KVMPPC_RMAP_LOCK_BIT, rmap))
224 cpu_relax();
225 } while (test_and_set_bit_lock(KVMPPC_RMAP_LOCK_BIT, rmap));
226}
227
228static inline void unlock_rmap(unsigned long *rmap)
229{
230 __clear_bit_unlock(KVMPPC_RMAP_LOCK_BIT, rmap);
231}
232
233static inline bool slot_is_aligned(struct kvm_memory_slot *memslot,
234 unsigned long pagesize)
235{
236 unsigned long mask = (pagesize >> PAGE_SHIFT) - 1;
237
238 if (pagesize <= PAGE_SIZE)
239 return 1;
240 return !(memslot->base_gfn & mask) && !(memslot->npages & mask);
241}
242
65#endif /* __ASM_KVM_BOOK3S_64_H__ */ 243#endif /* __ASM_KVM_BOOK3S_64_H__ */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
index adbfca9dd100..8cd50a514271 100644
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ b/arch/powerpc/include/asm/kvm_e500.h
@@ -22,46 +22,55 @@
22#define E500_PID_NUM 3 22#define E500_PID_NUM 3
23#define E500_TLB_NUM 2 23#define E500_TLB_NUM 2
24 24
25struct tlbe{
26 u32 mas1;
27 u32 mas2;
28 u32 mas3;
29 u32 mas7;
30};
31
32#define E500_TLB_VALID 1 25#define E500_TLB_VALID 1
33#define E500_TLB_DIRTY 2 26#define E500_TLB_DIRTY 2
34 27
35struct tlbe_priv { 28struct tlbe_ref {
36 pfn_t pfn; 29 pfn_t pfn;
37 unsigned int flags; /* E500_TLB_* */ 30 unsigned int flags; /* E500_TLB_* */
38}; 31};
39 32
33struct tlbe_priv {
34 struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
35};
36
40struct vcpu_id_table; 37struct vcpu_id_table;
41 38
39struct kvmppc_e500_tlb_params {
40 int entries, ways, sets;
41};
42
42struct kvmppc_vcpu_e500 { 43struct kvmppc_vcpu_e500 {
43 /* Unmodified copy of the guest's TLB. */ 44 /* Unmodified copy of the guest's TLB -- shared with host userspace. */
44 struct tlbe *gtlb_arch[E500_TLB_NUM]; 45 struct kvm_book3e_206_tlb_entry *gtlb_arch;
46
47 /* Starting entry number in gtlb_arch[] */
48 int gtlb_offset[E500_TLB_NUM];
45 49
46 /* KVM internal information associated with each guest TLB entry */ 50 /* KVM internal information associated with each guest TLB entry */
47 struct tlbe_priv *gtlb_priv[E500_TLB_NUM]; 51 struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
48 52
49 unsigned int gtlb_size[E500_TLB_NUM]; 53 struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
54
50 unsigned int gtlb_nv[E500_TLB_NUM]; 55 unsigned int gtlb_nv[E500_TLB_NUM];
51 56
57 /*
58 * information associated with each host TLB entry --
59 * TLB1 only for now. If/when guest TLB1 entries can be
60 * mapped with host TLB0, this will be used for that too.
61 *
62 * We don't want to use this for guest TLB0 because then we'd
63 * have the overhead of doing the translation again even if
64 * the entry is still in the guest TLB (e.g. we swapped out
65 * and back, and our host TLB entries got evicted).
66 */
67 struct tlbe_ref *tlb_refs[E500_TLB_NUM];
68 unsigned int host_tlb1_nv;
69
52 u32 host_pid[E500_PID_NUM]; 70 u32 host_pid[E500_PID_NUM];
53 u32 pid[E500_PID_NUM]; 71 u32 pid[E500_PID_NUM];
54 u32 svr; 72 u32 svr;
55 73
56 u32 mas0;
57 u32 mas1;
58 u32 mas2;
59 u32 mas3;
60 u32 mas4;
61 u32 mas5;
62 u32 mas6;
63 u32 mas7;
64
65 /* vcpu id table */ 74 /* vcpu id table */
66 struct vcpu_id_table *idt; 75 struct vcpu_id_table *idt;
67 76
@@ -73,6 +82,9 @@ struct kvmppc_vcpu_e500 {
73 u32 tlb1cfg; 82 u32 tlb1cfg;
74 u64 mcar; 83 u64 mcar;
75 84
85 struct page **shared_tlb_pages;
86 int num_shared_tlb_pages;
87
76 struct kvm_vcpu vcpu; 88 struct kvm_vcpu vcpu;
77}; 89};
78 90
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index bf8af5d5d5dc..52eb9c1f4fe0 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -32,17 +32,32 @@
32#include <linux/atomic.h> 32#include <linux/atomic.h>
33#include <asm/kvm_asm.h> 33#include <asm/kvm_asm.h>
34#include <asm/processor.h> 34#include <asm/processor.h>
35#include <asm/page.h>
35 36
36#define KVM_MAX_VCPUS NR_CPUS 37#define KVM_MAX_VCPUS NR_CPUS
37#define KVM_MAX_VCORES NR_CPUS 38#define KVM_MAX_VCORES NR_CPUS
38#define KVM_MEMORY_SLOTS 32 39#define KVM_MEMORY_SLOTS 32
39/* memory slots that does not exposed to userspace */ 40/* memory slots that does not exposed to userspace */
40#define KVM_PRIVATE_MEM_SLOTS 4 41#define KVM_PRIVATE_MEM_SLOTS 4
42#define KVM_MEM_SLOTS_NUM (KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
41 43
42#ifdef CONFIG_KVM_MMIO 44#ifdef CONFIG_KVM_MMIO
43#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 45#define KVM_COALESCED_MMIO_PAGE_OFFSET 1
44#endif 46#endif
45 47
48#ifdef CONFIG_KVM_BOOK3S_64_HV
49#include <linux/mmu_notifier.h>
50
51#define KVM_ARCH_WANT_MMU_NOTIFIER
52
53struct kvm;
54extern int kvm_unmap_hva(struct kvm *kvm, unsigned long hva);
55extern int kvm_age_hva(struct kvm *kvm, unsigned long hva);
56extern int kvm_test_age_hva(struct kvm *kvm, unsigned long hva);
57extern void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte);
58
59#endif
60
46/* We don't currently support large pages. */ 61/* We don't currently support large pages. */
47#define KVM_HPAGE_GFN_SHIFT(x) 0 62#define KVM_HPAGE_GFN_SHIFT(x) 0
48#define KVM_NR_PAGE_SIZES 1 63#define KVM_NR_PAGE_SIZES 1
@@ -158,34 +173,72 @@ struct kvmppc_spapr_tce_table {
158 struct page *pages[0]; 173 struct page *pages[0];
159}; 174};
160 175
161struct kvmppc_rma_info { 176struct kvmppc_linear_info {
162 void *base_virt; 177 void *base_virt;
163 unsigned long base_pfn; 178 unsigned long base_pfn;
164 unsigned long npages; 179 unsigned long npages;
165 struct list_head list; 180 struct list_head list;
166 atomic_t use_count; 181 atomic_t use_count;
182 int type;
183};
184
185/*
186 * The reverse mapping array has one entry for each HPTE,
187 * which stores the guest's view of the second word of the HPTE
188 * (including the guest physical address of the mapping),
189 * plus forward and backward pointers in a doubly-linked ring
190 * of HPTEs that map the same host page. The pointers in this
191 * ring are 32-bit HPTE indexes, to save space.
192 */
193struct revmap_entry {
194 unsigned long guest_rpte;
195 unsigned int forw, back;
196};
197
198/*
199 * We use the top bit of each memslot->rmap entry as a lock bit,
200 * and bit 32 as a present flag. The bottom 32 bits are the
201 * index in the guest HPT of a HPTE that points to the page.
202 */
203#define KVMPPC_RMAP_LOCK_BIT 63
204#define KVMPPC_RMAP_RC_SHIFT 32
205#define KVMPPC_RMAP_REFERENCED (HPTE_R_R << KVMPPC_RMAP_RC_SHIFT)
206#define KVMPPC_RMAP_CHANGED (HPTE_R_C << KVMPPC_RMAP_RC_SHIFT)
207#define KVMPPC_RMAP_PRESENT 0x100000000ul
208#define KVMPPC_RMAP_INDEX 0xfffffffful
209
210/* Low-order bits in kvm->arch.slot_phys[][] */
211#define KVMPPC_PAGE_ORDER_MASK 0x1f
212#define KVMPPC_PAGE_NO_CACHE HPTE_R_I /* 0x20 */
213#define KVMPPC_PAGE_WRITETHRU HPTE_R_W /* 0x40 */
214#define KVMPPC_GOT_PAGE 0x80
215
216struct kvm_arch_memory_slot {
167}; 217};
168 218
169struct kvm_arch { 219struct kvm_arch {
170#ifdef CONFIG_KVM_BOOK3S_64_HV 220#ifdef CONFIG_KVM_BOOK3S_64_HV
171 unsigned long hpt_virt; 221 unsigned long hpt_virt;
172 unsigned long ram_npages; 222 struct revmap_entry *revmap;
173 unsigned long ram_psize;
174 unsigned long ram_porder;
175 struct kvmppc_pginfo *ram_pginfo;
176 unsigned int lpid; 223 unsigned int lpid;
177 unsigned int host_lpid; 224 unsigned int host_lpid;
178 unsigned long host_lpcr; 225 unsigned long host_lpcr;
179 unsigned long sdr1; 226 unsigned long sdr1;
180 unsigned long host_sdr1; 227 unsigned long host_sdr1;
181 int tlbie_lock; 228 int tlbie_lock;
182 int n_rma_pages;
183 unsigned long lpcr; 229 unsigned long lpcr;
184 unsigned long rmor; 230 unsigned long rmor;
185 struct kvmppc_rma_info *rma; 231 struct kvmppc_linear_info *rma;
232 unsigned long vrma_slb_v;
233 int rma_setup_done;
234 int using_mmu_notifiers;
186 struct list_head spapr_tce_tables; 235 struct list_head spapr_tce_tables;
236 spinlock_t slot_phys_lock;
237 unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
238 int slot_npages[KVM_MEM_SLOTS_NUM];
187 unsigned short last_vcpu[NR_CPUS]; 239 unsigned short last_vcpu[NR_CPUS];
188 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 240 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
241 struct kvmppc_linear_info *hpt_li;
189#endif /* CONFIG_KVM_BOOK3S_64_HV */ 242#endif /* CONFIG_KVM_BOOK3S_64_HV */
190}; 243};
191 244
@@ -318,10 +371,6 @@ struct kvm_vcpu_arch {
318 u32 vrsave; /* also USPRG0 */ 371 u32 vrsave; /* also USPRG0 */
319 u32 mmucr; 372 u32 mmucr;
320 ulong shadow_msr; 373 ulong shadow_msr;
321 ulong sprg4;
322 ulong sprg5;
323 ulong sprg6;
324 ulong sprg7;
325 ulong csrr0; 374 ulong csrr0;
326 ulong csrr1; 375 ulong csrr1;
327 ulong dsrr0; 376 ulong dsrr0;
@@ -329,16 +378,14 @@ struct kvm_vcpu_arch {
329 ulong mcsrr0; 378 ulong mcsrr0;
330 ulong mcsrr1; 379 ulong mcsrr1;
331 ulong mcsr; 380 ulong mcsr;
332 ulong esr;
333 u32 dec; 381 u32 dec;
334 u32 decar; 382 u32 decar;
335 u32 tbl; 383 u32 tbl;
336 u32 tbu; 384 u32 tbu;
337 u32 tcr; 385 u32 tcr;
338 u32 tsr; 386 ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
339 u32 ivor[64]; 387 u32 ivor[64];
340 ulong ivpr; 388 ulong ivpr;
341 u32 pir;
342 u32 pvr; 389 u32 pvr;
343 390
344 u32 shadow_pid; 391 u32 shadow_pid;
@@ -427,9 +474,14 @@ struct kvm_vcpu_arch {
427#ifdef CONFIG_KVM_BOOK3S_64_HV 474#ifdef CONFIG_KVM_BOOK3S_64_HV
428 struct kvm_vcpu_arch_shared shregs; 475 struct kvm_vcpu_arch_shared shregs;
429 476
477 unsigned long pgfault_addr;
478 long pgfault_index;
479 unsigned long pgfault_hpte[2];
480
430 struct list_head run_list; 481 struct list_head run_list;
431 struct task_struct *run_task; 482 struct task_struct *run_task;
432 struct kvm_run *kvm_run; 483 struct kvm_run *kvm_run;
484 pgd_t *pgdir;
433#endif 485#endif
434}; 486};
435 487
@@ -438,4 +490,12 @@ struct kvm_vcpu_arch {
438#define KVMPPC_VCPU_BUSY_IN_HOST 1 490#define KVMPPC_VCPU_BUSY_IN_HOST 1
439#define KVMPPC_VCPU_RUNNABLE 2 491#define KVMPPC_VCPU_RUNNABLE 2
440 492
493/* Values for vcpu->arch.io_gpr */
494#define KVM_MMIO_REG_MASK 0x001f
495#define KVM_MMIO_REG_EXT_MASK 0xffe0
496#define KVM_MMIO_REG_GPR 0x0000
497#define KVM_MMIO_REG_FPR 0x0020
498#define KVM_MMIO_REG_QPR 0x0040
499#define KVM_MMIO_REG_FQPR 0x0060
500
441#endif /* __POWERPC_KVM_HOST_H__ */ 501#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 50533f9adf40..7b754e743003 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -22,6 +22,16 @@
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24 24
25/*
26 * Additions to this struct must only occur at the end, and should be
27 * accompanied by a KVM_MAGIC_FEAT flag to advertise that they are present
28 * (albeit not necessarily relevant to the current target hardware platform).
29 *
30 * Struct fields are always 32 or 64 bit aligned, depending on them being 32
31 * or 64 bit wide respectively.
32 *
33 * See Documentation/virtual/kvm/ppc-pv.txt
34 */
25struct kvm_vcpu_arch_shared { 35struct kvm_vcpu_arch_shared {
26 __u64 scratch1; 36 __u64 scratch1;
27 __u64 scratch2; 37 __u64 scratch2;
@@ -33,11 +43,35 @@ struct kvm_vcpu_arch_shared {
33 __u64 sprg3; 43 __u64 sprg3;
34 __u64 srr0; 44 __u64 srr0;
35 __u64 srr1; 45 __u64 srr1;
36 __u64 dar; 46 __u64 dar; /* dear on BookE */
37 __u64 msr; 47 __u64 msr;
38 __u32 dsisr; 48 __u32 dsisr;
39 __u32 int_pending; /* Tells the guest if we have an interrupt */ 49 __u32 int_pending; /* Tells the guest if we have an interrupt */
40 __u32 sr[16]; 50 __u32 sr[16];
51 __u32 mas0;
52 __u32 mas1;
53 __u64 mas7_3;
54 __u64 mas2;
55 __u32 mas4;
56 __u32 mas6;
57 __u32 esr;
58 __u32 pir;
59
60 /*
61 * SPRG4-7 are user-readable, so we can only keep these consistent
62 * between the shared area and the real registers when there's an
63 * intervening exit to KVM. This also applies to SPRG3 on some
64 * chips.
65 *
66 * This suffices for access by guest userspace, since in PR-mode
67 * KVM, an exit must occur when changing the guest's MSR[PR].
68 * If the guest kernel writes to SPRG3-7 via the shared area, it
69 * must also use the shared area for reading while in kernel space.
70 */
71 __u64 sprg4;
72 __u64 sprg5;
73 __u64 sprg6;
74 __u64 sprg7;
41}; 75};
42 76
43#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */ 77#define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */
@@ -47,7 +81,10 @@ struct kvm_vcpu_arch_shared {
47 81
48#define KVM_FEATURE_MAGIC_PAGE 1 82#define KVM_FEATURE_MAGIC_PAGE 1
49 83
50#define KVM_MAGIC_FEAT_SR (1 << 0) 84#define KVM_MAGIC_FEAT_SR (1 << 0)
85
86/* MASn, ESR, PIR, and high SPRGs */
87#define KVM_MAGIC_FEAT_MAS0_TO_SPRG7 (1 << 1)
51 88
52#ifdef __KERNEL__ 89#ifdef __KERNEL__
53 90
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 46efd1a265c9..9d6dee0f7d48 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -66,6 +66,7 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run,
66extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); 66extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
67extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); 67extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
68extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); 68extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
69extern void kvmppc_decrementer_func(unsigned long data);
69extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); 70extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
70 71
71/* Core-specific hooks */ 72/* Core-specific hooks */
@@ -94,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
94extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 95extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
95extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); 96extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
96 97
97extern void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu); 98extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
98extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); 99extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
99extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); 100extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
100extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); 101extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
@@ -120,15 +121,17 @@ extern long kvmppc_alloc_hpt(struct kvm *kvm);
120extern void kvmppc_free_hpt(struct kvm *kvm); 121extern void kvmppc_free_hpt(struct kvm *kvm);
121extern long kvmppc_prepare_vrma(struct kvm *kvm, 122extern long kvmppc_prepare_vrma(struct kvm *kvm,
122 struct kvm_userspace_memory_region *mem); 123 struct kvm_userspace_memory_region *mem);
123extern void kvmppc_map_vrma(struct kvm *kvm, 124extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
124 struct kvm_userspace_memory_region *mem); 125 struct kvm_memory_slot *memslot, unsigned long porder);
125extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); 126extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
126extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, 127extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
127 struct kvm_create_spapr_tce *args); 128 struct kvm_create_spapr_tce *args);
128extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, 129extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
129 struct kvm_allocate_rma *rma); 130 struct kvm_allocate_rma *rma);
130extern struct kvmppc_rma_info *kvm_alloc_rma(void); 131extern struct kvmppc_linear_info *kvm_alloc_rma(void);
131extern void kvm_release_rma(struct kvmppc_rma_info *ri); 132extern void kvm_release_rma(struct kvmppc_linear_info *ri);
133extern struct kvmppc_linear_info *kvm_alloc_hpt(void);
134extern void kvm_release_hpt(struct kvmppc_linear_info *li);
132extern int kvmppc_core_init_vm(struct kvm *kvm); 135extern int kvmppc_core_init_vm(struct kvm *kvm);
133extern void kvmppc_core_destroy_vm(struct kvm *kvm); 136extern void kvmppc_core_destroy_vm(struct kvm *kvm);
134extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, 137extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
@@ -175,6 +178,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
175void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 178void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
176int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); 179int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
177 180
181int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
182int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
183
178void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); 184void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
179 185
180#ifdef CONFIG_KVM_BOOK3S_64_HV 186#ifdef CONFIG_KVM_BOOK3S_64_HV
@@ -183,14 +189,19 @@ static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
183 paca[cpu].kvm_hstate.xics_phys = addr; 189 paca[cpu].kvm_hstate.xics_phys = addr;
184} 190}
185 191
186extern void kvm_rma_init(void); 192extern void kvm_linear_init(void);
187 193
188#else 194#else
189static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) 195static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
190{} 196{}
191 197
192static inline void kvm_rma_init(void) 198static inline void kvm_linear_init(void)
193{} 199{}
194#endif 200#endif
195 201
202int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
203 struct kvm_config_tlb *cfg);
204int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
205 struct kvm_dirty_tlb *cfg);
206
196#endif /* __POWERPC_KVM_PPC_H__ */ 207#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h
index e0298d26ce5d..a76254af0aaa 100644
--- a/arch/powerpc/include/asm/lppaca.h
+++ b/arch/powerpc/include/asm/lppaca.h
@@ -41,15 +41,7 @@
41 * We only have to have statically allocated lppaca structs on 41 * We only have to have statically allocated lppaca structs on
42 * legacy iSeries, which supports at most 64 cpus. 42 * legacy iSeries, which supports at most 64 cpus.
43 */ 43 */
44#ifdef CONFIG_PPC_ISERIES
45#if NR_CPUS < 64
46#define NR_LPPACAS NR_CPUS
47#else
48#define NR_LPPACAS 64
49#endif
50#else /* not iSeries */
51#define NR_LPPACAS 1 44#define NR_LPPACAS 1
52#endif
53 45
54 46
55/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k 47/* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index bf37931d1ad6..42ce570812c1 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -99,9 +99,7 @@ struct machdep_calls {
99 99
100 void (*init_IRQ)(void); 100 void (*init_IRQ)(void);
101 101
102 /* Return an irq, or NO_IRQ to indicate there are none pending. 102 /* Return an irq, or NO_IRQ to indicate there are none pending. */
103 * If for some reason there is no irq, but the interrupt
104 * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */
105 unsigned int (*get_irq)(void); 103 unsigned int (*get_irq)(void);
106 104
107 /* PCI stuff */ 105 /* PCI stuff */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index f5f89cafebd0..cdb5421877e2 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -41,9 +41,10 @@
41/* MAS registers bit definitions */ 41/* MAS registers bit definitions */
42 42
43#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) 43#define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000)
44#define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000)
45#define MAS0_NV(x) ((x) & 0x00000FFF)
46#define MAS0_ESEL_MASK 0x0FFF0000 44#define MAS0_ESEL_MASK 0x0FFF0000
45#define MAS0_ESEL_SHIFT 16
46#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK)
47#define MAS0_NV(x) ((x) & 0x00000FFF)
47#define MAS0_HES 0x00004000 48#define MAS0_HES 0x00004000
48#define MAS0_WQ_ALLWAYS 0x00000000 49#define MAS0_WQ_ALLWAYS 0x00000000
49#define MAS0_WQ_COND 0x00001000 50#define MAS0_WQ_COND 0x00001000
@@ -167,6 +168,7 @@
167#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ 168#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */
168#define TLBnCFG_MAXSIZE_SHIFT 16 169#define TLBnCFG_MAXSIZE_SHIFT 16
169#define TLBnCFG_ASSOC 0xff000000 /* Associativity */ 170#define TLBnCFG_ASSOC 0xff000000 /* Associativity */
171#define TLBnCFG_ASSOC_SHIFT 24
170 172
171/* TLBnPS encoding */ 173/* TLBnPS encoding */
172#define TLBnPS_4K 0x00000004 174#define TLBnPS_4K 0x00000004
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index 412ba493cb98..1c65a59881ea 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -108,11 +108,11 @@ extern char initial_stab[];
108#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000) 108#define HPTE_V_VRMA_MASK ASM_CONST(0x4001ffffff000000)
109 109
110/* Values for PP (assumes Ks=0, Kp=1) */ 110/* Values for PP (assumes Ks=0, Kp=1) */
111/* pp0 will always be 0 for linux */
112#define PP_RWXX 0 /* Supervisor read/write, User none */ 111#define PP_RWXX 0 /* Supervisor read/write, User none */
113#define PP_RWRX 1 /* Supervisor read/write, User read */ 112#define PP_RWRX 1 /* Supervisor read/write, User read */
114#define PP_RWRW 2 /* Supervisor read/write, User read/write */ 113#define PP_RWRW 2 /* Supervisor read/write, User read/write */
115#define PP_RXRX 3 /* Supervisor read, User read */ 114#define PP_RXRX 3 /* Supervisor read, User read */
115#define PP_RXXX (HPTE_R_PP0 | 2) /* Supervisor read, user none */
116 116
117#ifndef __ASSEMBLY__ 117#ifndef __ASSEMBLY__
118 118
@@ -267,7 +267,6 @@ extern void demote_segment_4k(struct mm_struct *mm, unsigned long addr);
267 267
268extern void hpte_init_native(void); 268extern void hpte_init_native(void);
269extern void hpte_init_lpar(void); 269extern void hpte_init_lpar(void);
270extern void hpte_init_iSeries(void);
271extern void hpte_init_beat(void); 270extern void hpte_init_beat(void);
272extern void hpte_init_beat_v3(void); 271extern void hpte_init_beat_v3(void);
273 272
@@ -325,9 +324,6 @@ extern void slb_set_size(u16 size);
325 * WARNING - If you change these you must make sure the asm 324 * WARNING - If you change these you must make sure the asm
326 * implementations in slb_allocate (slb_low.S), do_stab_bolted 325 * implementations in slb_allocate (slb_low.S), do_stab_bolted
327 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly. 326 * (head.S) and ASM_VSID_SCRAMBLE (below) are changed accordingly.
328 *
329 * You'll also need to change the precomputed VSID values in head.S
330 * which are used by the iSeries firmware.
331 */ 327 */
332 328
333#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */ 329#define VSID_MULTIPLIER_256M ASM_CONST(200730139) /* 28-bit prime */
@@ -484,14 +480,6 @@ static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
484 | (ea >> SID_SHIFT_1T), 1T); 480 | (ea >> SID_SHIFT_1T), 1T);
485} 481}
486 482
487/*
488 * This is only used on legacy iSeries in lparmap.c,
489 * hence the 256MB segment assumption.
490 */
491#define VSID_SCRAMBLE(pvsid) (((pvsid) * VSID_MULTIPLIER_256M) % \
492 VSID_MODULUS_256M)
493#define KERNEL_VSID(ea) VSID_SCRAMBLE(GET_ESID(ea))
494
495#endif /* __ASSEMBLY__ */ 483#endif /* __ASSEMBLY__ */
496 484
497#endif /* _ASM_POWERPC_MMU_HASH64_H_ */ 485#endif /* _ASM_POWERPC_MMU_HASH64_H_ */
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h
index 67b4d9837236..c65b9294376e 100644
--- a/arch/powerpc/include/asm/mpic.h
+++ b/arch/powerpc/include/asm/mpic.h
@@ -255,7 +255,7 @@ struct mpic
255 struct device_node *node; 255 struct device_node *node;
256 256
257 /* The remapper for this MPIC */ 257 /* The remapper for this MPIC */
258 struct irq_host *irqhost; 258 struct irq_domain *irqhost;
259 259
260 /* The "linux" controller struct */ 260 /* The "linux" controller struct */
261 struct irq_chip hc_irq; 261 struct irq_chip hc_irq;
@@ -273,7 +273,6 @@ struct mpic
273 unsigned int isu_size; 273 unsigned int isu_size;
274 unsigned int isu_shift; 274 unsigned int isu_shift;
275 unsigned int isu_mask; 275 unsigned int isu_mask;
276 unsigned int irq_count;
277 /* Number of sources */ 276 /* Number of sources */
278 unsigned int num_sources; 277 unsigned int num_sources;
279 /* default senses array */ 278 /* default senses array */
@@ -349,8 +348,6 @@ struct mpic
349#define MPIC_U3_HT_IRQS 0x00000004 348#define MPIC_U3_HT_IRQS 0x00000004
350/* Broken IPI registers (autodetected) */ 349/* Broken IPI registers (autodetected) */
351#define MPIC_BROKEN_IPI 0x00000008 350#define MPIC_BROKEN_IPI 0x00000008
352/* MPIC wants a reset */
353#define MPIC_WANTS_RESET 0x00000010
354/* Spurious vector requires EOI */ 351/* Spurious vector requires EOI */
355#define MPIC_SPV_EOI 0x00000020 352#define MPIC_SPV_EOI 0x00000020
356/* No passthrough disable */ 353/* No passthrough disable */
@@ -363,15 +360,11 @@ struct mpic
363#define MPIC_ENABLE_MCK 0x00000200 360#define MPIC_ENABLE_MCK 0x00000200
364/* Disable bias among target selection, spread interrupts evenly */ 361/* Disable bias among target selection, spread interrupts evenly */
365#define MPIC_NO_BIAS 0x00000400 362#define MPIC_NO_BIAS 0x00000400
366/* Ignore NIRQS as reported by FRR */
367#define MPIC_BROKEN_FRR_NIRQS 0x00000800
368/* Destination only supports a single CPU at a time */ 363/* Destination only supports a single CPU at a time */
369#define MPIC_SINGLE_DEST_CPU 0x00001000 364#define MPIC_SINGLE_DEST_CPU 0x00001000
370/* Enable CoreInt delivery of interrupts */ 365/* Enable CoreInt delivery of interrupts */
371#define MPIC_ENABLE_COREINT 0x00002000 366#define MPIC_ENABLE_COREINT 0x00002000
372/* Disable resetting of the MPIC. 367/* Do not reset the MPIC during initialization */
373 * NOTE: This flag trumps MPIC_WANTS_RESET.
374 */
375#define MPIC_NO_RESET 0x00004000 368#define MPIC_NO_RESET 0x00004000
376/* Freescale MPIC (compatible includes "fsl,mpic") */ 369/* Freescale MPIC (compatible includes "fsl,mpic") */
377#define MPIC_FSL 0x00008000 370#define MPIC_FSL 0x00008000
diff --git a/arch/powerpc/include/asm/mpic_msgr.h b/arch/powerpc/include/asm/mpic_msgr.h
new file mode 100644
index 000000000000..3ec37dc9003e
--- /dev/null
+++ b/arch/powerpc/include/asm/mpic_msgr.h
@@ -0,0 +1,132 @@
1/*
2 * Copyright 2011-2012, Meador Inge, Mentor Graphics Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; version 2 of the
7 * License.
8 *
9 */
10
11#ifndef _ASM_MPIC_MSGR_H
12#define _ASM_MPIC_MSGR_H
13
14#include <linux/types.h>
15#include <linux/spinlock.h>
16
17struct mpic_msgr {
18 u32 __iomem *base;
19 u32 __iomem *mer;
20 int irq;
21 unsigned char in_use;
22 raw_spinlock_t lock;
23 int num;
24};
25
26/* Get a message register
27 *
28 * @reg_num: the MPIC message register to get
29 *
30 * A pointer to the message register is returned. If
31 * the message register asked for is already in use, then
32 * EBUSY is returned. If the number given is not associated
33 * with an actual message register, then ENODEV is returned.
34 * Successfully getting the register marks it as in use.
35 */
36extern struct mpic_msgr *mpic_msgr_get(unsigned int reg_num);
37
38/* Relinquish a message register
39 *
40 * @msgr: the message register to return
41 *
42 * Disables the given message register and marks it as free.
43 * After this call has completed successully the message
44 * register is available to be acquired by a call to
45 * mpic_msgr_get.
46 */
47extern void mpic_msgr_put(struct mpic_msgr *msgr);
48
49/* Enable a message register
50 *
51 * @msgr: the message register to enable
52 *
53 * The given message register is enabled for sending
54 * messages.
55 */
56extern void mpic_msgr_enable(struct mpic_msgr *msgr);
57
58/* Disable a message register
59 *
60 * @msgr: the message register to disable
61 *
62 * The given message register is disabled for sending
63 * messages.
64 */
65extern void mpic_msgr_disable(struct mpic_msgr *msgr);
66
67/* Write a message to a message register
68 *
69 * @msgr: the message register to write to
70 * @message: the message to write
71 *
72 * The given 32-bit message is written to the given message
73 * register. Writing to an enabled message registers fires
74 * an interrupt.
75 */
76static inline void mpic_msgr_write(struct mpic_msgr *msgr, u32 message)
77{
78 out_be32(msgr->base, message);
79}
80
81/* Read a message from a message register
82 *
83 * @msgr: the message register to read from
84 *
85 * Returns the 32-bit value currently in the given message register.
86 * Upon reading the register any interrupts for that register are
87 * cleared.
88 */
89static inline u32 mpic_msgr_read(struct mpic_msgr *msgr)
90{
91 return in_be32(msgr->base);
92}
93
94/* Clear a message register
95 *
96 * @msgr: the message register to clear
97 *
98 * Clears any interrupts associated with the given message register.
99 */
100static inline void mpic_msgr_clear(struct mpic_msgr *msgr)
101{
102 (void) mpic_msgr_read(msgr);
103}
104
105/* Set the destination CPU for the message register
106 *
107 * @msgr: the message register whose destination is to be set
108 * @cpu_num: the Linux CPU number to bind the message register to
109 *
110 * Note that the CPU number given is the CPU number used by the kernel
111 * and *not* the actual hardware CPU number.
112 */
113static inline void mpic_msgr_set_destination(struct mpic_msgr *msgr,
114 u32 cpu_num)
115{
116 out_be32(msgr->base, 1 << get_hard_smp_processor_id(cpu_num));
117}
118
119/* Get the IRQ number for the message register
120 * @msgr: the message register whose IRQ is to be returned
121 *
122 * Returns the IRQ number associated with the given message register.
123 * NO_IRQ is returned if this message register is not capable of
124 * receiving interrupts. What message register can and cannot receive
125 * interrupts is specified in the device tree for the system.
126 */
127static inline int mpic_msgr_get_irq(struct mpic_msgr *msgr)
128{
129 return msgr->irq;
130}
131
132#endif
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index 269c05a36d91..daf813fea91f 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -132,7 +132,7 @@ struct paca_struct {
132 u64 saved_msr; /* MSR saved here by enter_rtas */ 132 u64 saved_msr; /* MSR saved here by enter_rtas */
133 u16 trap_save; /* Used when bad stack is encountered */ 133 u16 trap_save; /* Used when bad stack is encountered */
134 u8 soft_enabled; /* irq soft-enable flag */ 134 u8 soft_enabled; /* irq soft-enable flag */
135 u8 hard_enabled; /* set if irqs are enabled in MSR */ 135 u8 irq_happened; /* irq happened while soft-disabled */
136 u8 io_sync; /* writel() needs spin_unlock sync */ 136 u8 io_sync; /* writel() needs spin_unlock sync */
137 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ 137 u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
138 u8 nap_state_lost; /* NV GPR values lost in power7_idle */ 138 u8 nap_state_lost; /* NV GPR values lost in power7_idle */
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 5d487657322e..ac39e6a3b25a 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -155,14 +155,7 @@ struct pci_dn {
155 155
156 struct pci_dev *pcidev; /* back-pointer to the pci device */ 156 struct pci_dev *pcidev; /* back-pointer to the pci device */
157#ifdef CONFIG_EEH 157#ifdef CONFIG_EEH
158 int class_code; /* pci device class */ 158 struct eeh_dev *edev; /* eeh device */
159 int eeh_mode; /* See eeh.h for possible EEH_MODEs */
160 int eeh_config_addr;
161 int eeh_pe_config_addr; /* new-style partition endpoint address */
162 int eeh_check_count; /* # times driver ignored error */
163 int eeh_freeze_count; /* # times this device froze up. */
164 int eeh_false_positives; /* # times this device reported #ff's */
165 u32 config_space[16]; /* saved PCI config space */
166#endif 159#endif
167#define IODA_INVALID_PE (-1) 160#define IODA_INVALID_PE (-1)
168#ifdef CONFIG_PPC_POWERNV 161#ifdef CONFIG_PPC_POWERNV
@@ -185,6 +178,13 @@ static inline int pci_device_from_OF_node(struct device_node *np,
185 return 0; 178 return 0;
186} 179}
187 180
181#if defined(CONFIG_EEH)
182static inline struct eeh_dev *of_node_to_eeh_dev(struct device_node *dn)
183{
184 return PCI_DN(dn)->edev;
185}
186#endif
187
188/** Find the bus corresponding to the indicated device node */ 188/** Find the bus corresponding to the indicated device node */
189extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn); 189extern struct pci_bus *pcibios_find_pci_bus(struct device_node *dn);
190 190
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h
index f54b3d26ce9d..6653f2743c4e 100644
--- a/arch/powerpc/include/asm/pci.h
+++ b/arch/powerpc/include/asm/pci.h
@@ -154,14 +154,6 @@ extern int pci_mmap_legacy_page_range(struct pci_bus *bus,
154 154
155#endif /* CONFIG_PPC64 */ 155#endif /* CONFIG_PPC64 */
156 156
157extern void pcibios_resource_to_bus(struct pci_dev *dev,
158 struct pci_bus_region *region,
159 struct resource *res);
160
161extern void pcibios_bus_to_resource(struct pci_dev *dev,
162 struct resource *res,
163 struct pci_bus_region *region);
164
165extern void pcibios_claim_one_bus(struct pci_bus *b); 157extern void pcibios_claim_one_bus(struct pci_bus *b);
166 158
167extern void pcibios_finish_adding_to_bus(struct pci_bus *bus); 159extern void pcibios_finish_adding_to_bus(struct pci_bus *bus);
@@ -190,6 +182,7 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
190 const struct resource *rsrc, 182 const struct resource *rsrc,
191 resource_size_t *start, resource_size_t *end); 183 resource_size_t *start, resource_size_t *end);
192 184
185extern resource_size_t pcibios_io_space_offset(struct pci_controller *hose);
193extern void pcibios_setup_bus_devices(struct pci_bus *bus); 186extern void pcibios_setup_bus_devices(struct pci_bus *bus);
194extern void pcibios_setup_bus_self(struct pci_bus *bus); 187extern void pcibios_setup_bus_self(struct pci_bus *bus);
195extern void pcibios_setup_phb_io_space(struct pci_controller *hose); 188extern void pcibios_setup_phb_io_space(struct pci_controller *hose);
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 8f1df1208d23..078019b5b353 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -47,6 +47,8 @@ struct power_pmu {
47 */ 47 */
48#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */ 48#define PPMU_LIMITED_PMC5_6 1 /* PMC5/6 have limited function */
49#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */ 49#define PPMU_ALT_SIPR 2 /* uses alternate posn for SIPR/HV */
50#define PPMU_NO_SIPR 4 /* no SIPR/HV in MMCRA at all */
51#define PPMU_NO_CONT_SAMPLING 8 /* no continuous sampling */
50 52
51/* 53/*
52 * Values for flags to get_alternatives() 54 * Values for flags to get_alternatives()
@@ -61,8 +63,6 @@ struct pt_regs;
61extern unsigned long perf_misc_flags(struct pt_regs *regs); 63extern unsigned long perf_misc_flags(struct pt_regs *regs);
62extern unsigned long perf_instruction_pointer(struct pt_regs *regs); 64extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
63 65
64#define PERF_EVENT_INDEX_OFFSET 1
65
66/* 66/*
67 * Only override the default definitions in include/linux/perf_event.h 67 * Only override the default definitions in include/linux/perf_event.h
68 * if we have hardware PMU support. 68 * if we have hardware PMU support.
diff --git a/arch/powerpc/include/asm/phyp_dump.h b/arch/powerpc/include/asm/phyp_dump.h
deleted file mode 100644
index fa74c6c3e106..000000000000
--- a/arch/powerpc/include/asm/phyp_dump.h
+++ /dev/null
@@ -1,47 +0,0 @@
1/*
2 * Hypervisor-assisted dump
3 *
4 * Linas Vepstas, Manish Ahuja 2008
5 * Copyright 2008 IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#ifndef _PPC64_PHYP_DUMP_H
14#define _PPC64_PHYP_DUMP_H
15
16#ifdef CONFIG_PHYP_DUMP
17
18/* The RMR region will be saved for later dumping
19 * whenever the kernel crashes. Set this to 256MB. */
20#define PHYP_DUMP_RMR_START 0x0
21#define PHYP_DUMP_RMR_END (1UL<<28)
22
23struct phyp_dump {
24 /* Memory that is reserved during very early boot. */
25 unsigned long init_reserve_start;
26 unsigned long init_reserve_size;
27 /* cmd line options during boot */
28 unsigned long reserve_bootvar;
29 unsigned long phyp_dump_at_boot;
30 /* Check status during boot if dump supported, active & present*/
31 unsigned long phyp_dump_configured;
32 unsigned long phyp_dump_is_active;
33 /* store cpu & hpte size */
34 unsigned long cpu_state_size;
35 unsigned long hpte_region_size;
36 /* previous scratch area values */
37 unsigned long reserved_scratch_addr;
38 unsigned long reserved_scratch_size;
39};
40
41extern struct phyp_dump *phyp_dump_info;
42
43int early_init_dt_scan_phyp_dump(unsigned long node,
44 const char *uname, int depth, void *data);
45
46#endif /* CONFIG_PHYP_DUMP */
47#endif /* _PPC64_PHYP_DUMP_H */
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h
index e980faae4225..d81f99430fe7 100644
--- a/arch/powerpc/include/asm/ppc-opcode.h
+++ b/arch/powerpc/include/asm/ppc-opcode.h
@@ -45,6 +45,7 @@
45#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff 45#define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff
46#define PPC_INST_MTSPR_DSCR 0x7c1103a6 46#define PPC_INST_MTSPR_DSCR 0x7c1103a6
47#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff 47#define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff
48#define PPC_INST_SLBFEE 0x7c0007a7
48 49
49#define PPC_INST_STRING 0x7c00042a 50#define PPC_INST_STRING 0x7c00042a
50#define PPC_INST_STRING_MASK 0xfc0007fe 51#define PPC_INST_STRING_MASK 0xfc0007fe
@@ -183,7 +184,8 @@
183 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) 184 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
184#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \ 185#define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \
185 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) 186 __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b))
186 187#define PPC_SLBFEE_DOT(t, b) stringify_in_c(.long PPC_INST_SLBFEE | \
188 __PPC_RT(t) | __PPC_RB(b))
187 189
188/* 190/*
189 * Define what the VSX XX1 form instructions will look like, then add 191 * Define what the VSX XX1 form instructions will look like, then add
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h
index 43268f15004e..80fa704d410f 100644
--- a/arch/powerpc/include/asm/ppc-pci.h
+++ b/arch/powerpc/include/asm/ppc-pci.h
@@ -45,94 +45,21 @@ extern void init_pci_config_tokens (void);
45extern unsigned long get_phb_buid (struct device_node *); 45extern unsigned long get_phb_buid (struct device_node *);
46extern int rtas_setup_phb(struct pci_controller *phb); 46extern int rtas_setup_phb(struct pci_controller *phb);
47 47
48extern unsigned long pci_probe_only;
49
50/* ---- EEH internal-use-only related routines ---- */
51#ifdef CONFIG_EEH 48#ifdef CONFIG_EEH
52 49
50void pci_addr_cache_build(void);
53void pci_addr_cache_insert_device(struct pci_dev *dev); 51void pci_addr_cache_insert_device(struct pci_dev *dev);
54void pci_addr_cache_remove_device(struct pci_dev *dev); 52void pci_addr_cache_remove_device(struct pci_dev *dev);
55void pci_addr_cache_build(void); 53struct pci_dev *pci_addr_cache_get_device(unsigned long addr);
56struct pci_dev *pci_get_device_by_addr(unsigned long addr); 54void eeh_slot_error_detail(struct eeh_dev *edev, int severity);
57 55int eeh_pci_enable(struct eeh_dev *edev, int function);
58/** 56int eeh_reset_pe(struct eeh_dev *);
59 * eeh_slot_error_detail -- record and EEH error condition to the log 57void eeh_restore_bars(struct eeh_dev *);
60 * @pdn: pci device node
61 * @severity: EEH_LOG_TEMP_FAILURE or EEH_LOG_PERM_FAILURE
62 *
63 * Obtains the EEH error details from the RTAS subsystem,
64 * and then logs these details with the RTAS error log system.
65 */
66#define EEH_LOG_TEMP_FAILURE 1
67#define EEH_LOG_PERM_FAILURE 2
68void eeh_slot_error_detail (struct pci_dn *pdn, int severity);
69
70/**
71 * rtas_pci_enable - enable IO transfers for this slot
72 * @pdn: pci device node
73 * @function: either EEH_THAW_MMIO or EEH_THAW_DMA
74 *
75 * Enable I/O transfers to this slot
76 */
77#define EEH_THAW_MMIO 2
78#define EEH_THAW_DMA 3
79int rtas_pci_enable(struct pci_dn *pdn, int function);
80
81/**
82 * rtas_set_slot_reset -- unfreeze a frozen slot
83 * @pdn: pci device node
84 *
85 * Clear the EEH-frozen condition on a slot. This routine
86 * does this by asserting the PCI #RST line for 1/8th of
87 * a second; this routine will sleep while the adapter is
88 * being reset.
89 *
90 * Returns a non-zero value if the reset failed.
91 */
92int rtas_set_slot_reset (struct pci_dn *);
93int eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs);
94
95/**
96 * eeh_restore_bars - Restore device configuration info.
97 * @pdn: pci device node
98 *
99 * A reset of a PCI device will clear out its config space.
100 * This routines will restore the config space for this
101 * device, and is children, to values previously obtained
102 * from the firmware.
103 */
104void eeh_restore_bars(struct pci_dn *);
105
106/**
107 * rtas_configure_bridge -- firmware initialization of pci bridge
108 * @pdn: pci device node
109 *
110 * Ask the firmware to configure all PCI bridges devices
111 * located behind the indicated node. Required after a
112 * pci device reset. Does essentially the same hing as
113 * eeh_restore_bars, but for brdges, and lets firmware
114 * do the work.
115 */
116void rtas_configure_bridge(struct pci_dn *);
117
118int rtas_write_config(struct pci_dn *, int where, int size, u32 val); 58int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
119int rtas_read_config(struct pci_dn *, int where, int size, u32 *val); 59int rtas_read_config(struct pci_dn *, int where, int size, u32 *val);
120 60void eeh_mark_slot(struct device_node *dn, int mode_flag);
121/** 61void eeh_clear_slot(struct device_node *dn, int mode_flag);
122 * eeh_mark_slot -- set mode flags for pertition endpoint 62struct device_node *eeh_find_device_pe(struct device_node *dn);
123 * @pdn: pci device node
124 *
125 * mark and clear slots: find "partition endpoint" PE and set or
126 * clear the flags for each subnode of the PE.
127 */
128void eeh_mark_slot (struct device_node *dn, int mode_flag);
129void eeh_clear_slot (struct device_node *dn, int mode_flag);
130
131/**
132 * find_device_pe -- Find the associated "Partiationable Endpoint" PE
133 * @pdn: pci device node
134 */
135struct device_node * find_device_pe(struct device_node *dn);
136 63
137void eeh_sysfs_add_device(struct pci_dev *pdev); 64void eeh_sysfs_add_device(struct pci_dev *pdev);
138void eeh_sysfs_remove_device(struct pci_dev *pdev); 65void eeh_sysfs_remove_device(struct pci_dev *pdev);
@@ -142,6 +69,11 @@ static inline const char *eeh_pci_name(struct pci_dev *pdev)
142 return pdev ? pci_name(pdev) : "<null>"; 69 return pdev ? pci_name(pdev) : "<null>";
143} 70}
144 71
72static inline const char *eeh_driver_name(struct pci_dev *pdev)
73{
74 return (pdev && pdev->driver) ? pdev->driver->name : "<null>";
75}
76
145#endif /* CONFIG_EEH */ 77#endif /* CONFIG_EEH */
146 78
147#else /* CONFIG_PCI */ 79#else /* CONFIG_PCI */
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h
index 368f72f79808..50f73aa2ba21 100644
--- a/arch/powerpc/include/asm/ppc_asm.h
+++ b/arch/powerpc/include/asm/ppc_asm.h
@@ -60,6 +60,8 @@ BEGIN_FW_FTR_SECTION; \
60 cmpd cr1,r11,r10; \ 60 cmpd cr1,r11,r10; \
61 beq+ cr1,33f; \ 61 beq+ cr1,33f; \
62 bl .accumulate_stolen_time; \ 62 bl .accumulate_stolen_time; \
63 ld r12,_MSR(r1); \
64 andi. r10,r12,MSR_PR; /* Restore cr0 (coming from user) */ \
6333: \ 6533: \
64END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) 66END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
65 67
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h
index 78a205162fd7..84cc7840cd18 100644
--- a/arch/powerpc/include/asm/ptrace.h
+++ b/arch/powerpc/include/asm/ptrace.h
@@ -83,8 +83,18 @@ struct pt_regs {
83 83
84#ifndef __ASSEMBLY__ 84#ifndef __ASSEMBLY__
85 85
86#define instruction_pointer(regs) ((regs)->nip) 86#define GET_IP(regs) ((regs)->nip)
87#define user_stack_pointer(regs) ((regs)->gpr[1]) 87#define GET_USP(regs) ((regs)->gpr[1])
88#define GET_FP(regs) (0)
89#define SET_FP(regs, val)
90
91#ifdef CONFIG_SMP
92extern unsigned long profile_pc(struct pt_regs *regs);
93#define profile_pc profile_pc
94#endif
95
96#include <asm-generic/ptrace.h>
97
88#define kernel_stack_pointer(regs) ((regs)->gpr[1]) 98#define kernel_stack_pointer(regs) ((regs)->gpr[1])
89static inline int is_syscall_success(struct pt_regs *regs) 99static inline int is_syscall_success(struct pt_regs *regs)
90{ 100{
@@ -99,12 +109,6 @@ static inline long regs_return_value(struct pt_regs *regs)
99 return -regs->gpr[3]; 109 return -regs->gpr[3];
100} 110}
101 111
102#ifdef CONFIG_SMP
103extern unsigned long profile_pc(struct pt_regs *regs);
104#else
105#define profile_pc(regs) instruction_pointer(regs)
106#endif
107
108#ifdef __powerpc64__ 112#ifdef __powerpc64__
109#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1) 113#define user_mode(regs) ((((regs)->msr) >> MSR_PR_LG) & 0x1)
110#else 114#else
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 7fdc2c0b7fa0..9d7f0fb69028 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -216,6 +216,7 @@
216#define DSISR_ISSTORE 0x02000000 /* access was a store */ 216#define DSISR_ISSTORE 0x02000000 /* access was a store */
217#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ 217#define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
218#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */ 218#define DSISR_NOSEGMENT 0x00200000 /* STAB/SLB miss */
219#define DSISR_KEYFAULT 0x00200000 /* Key fault */
219#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */ 220#define SPRN_TBRL 0x10C /* Time Base Read Lower Register (user, R/O) */
220#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */ 221#define SPRN_TBRU 0x10D /* Time Base Read Upper Register (user, R/O) */
221#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ 222#define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */
@@ -237,6 +238,7 @@
237#define LPCR_ISL (1ul << (63-2)) 238#define LPCR_ISL (1ul << (63-2))
238#define LPCR_VC_SH (63-2) 239#define LPCR_VC_SH (63-2)
239#define LPCR_DPFD_SH (63-11) 240#define LPCR_DPFD_SH (63-11)
241#define LPCR_VRMASD (0x1ful << (63-16))
240#define LPCR_VRMA_L (1ul << (63-12)) 242#define LPCR_VRMA_L (1ul << (63-12))
241#define LPCR_VRMA_LP0 (1ul << (63-15)) 243#define LPCR_VRMA_LP0 (1ul << (63-15))
242#define LPCR_VRMA_LP1 (1ul << (63-16)) 244#define LPCR_VRMA_LP1 (1ul << (63-16))
@@ -493,6 +495,9 @@
493#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */ 495#define SPRN_SPRG7 0x117 /* Special Purpose Register General 7 */
494#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ 496#define SPRN_SRR0 0x01A /* Save/Restore Register 0 */
495#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ 497#define SPRN_SRR1 0x01B /* Save/Restore Register 1 */
498#define SRR1_ISI_NOPT 0x40000000 /* ISI: Not found in hash */
499#define SRR1_ISI_N_OR_G 0x10000000 /* ISI: Access is no-exec or G */
500#define SRR1_ISI_PROT 0x08000000 /* ISI: Other protection fault */
496#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ 501#define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */
497#define SRR1_WAKESYSERR 0x00300000 /* System error */ 502#define SRR1_WAKESYSERR 0x00300000 /* System error */
498#define SRR1_WAKEEE 0x00200000 /* External interrupt */ 503#define SRR1_WAKEEE 0x00200000 /* External interrupt */
@@ -1079,30 +1084,12 @@
1079 1084
1080#define proc_trap() asm volatile("trap") 1085#define proc_trap() asm volatile("trap")
1081 1086
1082#ifdef CONFIG_PPC64 1087#define __get_SP() ({unsigned long sp; \
1083 1088 asm volatile("mr %0,1": "=r" (sp)); sp;})
1084extern void ppc64_runlatch_on(void);
1085extern void __ppc64_runlatch_off(void);
1086
1087#define ppc64_runlatch_off() \
1088 do { \
1089 if (cpu_has_feature(CPU_FTR_CTRL) && \
1090 test_thread_flag(TIF_RUNLATCH)) \
1091 __ppc64_runlatch_off(); \
1092 } while (0)
1093 1089
1094extern unsigned long scom970_read(unsigned int address); 1090extern unsigned long scom970_read(unsigned int address);
1095extern void scom970_write(unsigned int address, unsigned long value); 1091extern void scom970_write(unsigned int address, unsigned long value);
1096 1092
1097#else
1098#define ppc64_runlatch_on()
1099#define ppc64_runlatch_off()
1100
1101#endif /* CONFIG_PPC64 */
1102
1103#define __get_SP() ({unsigned long sp; \
1104 asm volatile("mr %0,1": "=r" (sp)); sp;})
1105
1106struct pt_regs; 1093struct pt_regs;
1107 1094
1108extern void ppc_save_regs(struct pt_regs *regs); 1095extern void ppc_save_regs(struct pt_regs *regs);
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 500fe1dc43e6..8a97aa7289d3 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -62,6 +62,7 @@
62#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */ 62#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
63#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */ 63#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
64#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */ 64#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
65#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
65#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */ 66#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
66#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */ 67#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
67#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */ 68#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h
index 01c143bb77ae..f0a4db31ecb6 100644
--- a/arch/powerpc/include/asm/rtas.h
+++ b/arch/powerpc/include/asm/rtas.h
@@ -74,7 +74,6 @@ struct rtas_suspend_me_data {
74/* RTAS event classes */ 74/* RTAS event classes */
75#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */ 75#define RTAS_INTERNAL_ERROR 0x80000000 /* set bit 0 */
76#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */ 76#define RTAS_EPOW_WARNING 0x40000000 /* set bit 1 */
77#define RTAS_POWERMGM_EVENTS 0x20000000 /* set bit 2 */
78#define RTAS_HOTPLUG_EVENTS 0x10000000 /* set bit 3 */ 77#define RTAS_HOTPLUG_EVENTS 0x10000000 /* set bit 3 */
79#define RTAS_IO_EVENTS 0x08000000 /* set bit 4 */ 78#define RTAS_IO_EVENTS 0x08000000 /* set bit 4 */
80#define RTAS_EVENT_SCAN_ALL_EVENTS 0xffffffff 79#define RTAS_EVENT_SCAN_ALL_EVENTS 0xffffffff
@@ -204,6 +203,39 @@ struct rtas_ext_event_log_v6 {
204 /* Variable length. */ 203 /* Variable length. */
205}; 204};
206 205
206/* pSeries event log format */
207
208/* Two bytes ASCII section IDs */
209#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
210#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
211#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
212#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
213#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
214#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
215#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
216#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
217#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
218#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
219#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
220#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
221#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
222#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
223#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
224#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
225
226/* Vendor specific Platform Event Log Format, Version 6, section header */
227struct pseries_errorlog {
228 uint16_t id; /* 0x00 2-byte ASCII section ID */
229 uint16_t length; /* 0x02 Section length in bytes */
230 uint8_t version; /* 0x04 Section version */
231 uint8_t subtype; /* 0x05 Section subtype */
232 uint16_t creator_component; /* 0x06 Creator component ID */
233 uint8_t data[]; /* 0x08 Start of section data */
234};
235
236struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
237 uint16_t section_id);
238
207/* 239/*
208 * This can be set by the rtas_flash module so that it can get called 240 * This can be set by the rtas_flash module so that it can get called
209 * as the absolutely last thing before the kernel terminates. 241 * as the absolutely last thing before the kernel terminates.
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index adba970ce918..ebc24dc5b1a1 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -122,7 +122,6 @@ extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
122extern void smp_muxed_ipi_message_pass(int cpu, int msg); 122extern void smp_muxed_ipi_message_pass(int cpu, int msg);
123extern irqreturn_t smp_ipi_demux(void); 123extern irqreturn_t smp_ipi_demux(void);
124 124
125void smp_init_iSeries(void);
126void smp_init_pSeries(void); 125void smp_init_pSeries(void);
127void smp_init_cell(void); 126void smp_init_cell(void);
128void smp_init_celleb(void); 127void smp_init_celleb(void);
diff --git a/arch/powerpc/include/asm/socket.h b/arch/powerpc/include/asm/socket.h
index 2fc2af8fbf59..3d5179bb122f 100644
--- a/arch/powerpc/include/asm/socket.h
+++ b/arch/powerpc/include/asm/socket.h
@@ -71,5 +71,9 @@
71 71
72#define SO_WIFI_STATUS 41 72#define SO_WIFI_STATUS 41
73#define SCM_WIFI_STATUS SO_WIFI_STATUS 73#define SCM_WIFI_STATUS SO_WIFI_STATUS
74#define SO_PEEK_OFF 42
75
76/* Instruct lower device to use last 4-bytes of skb data as FCS */
77#define SO_NOFCS 43
74 78
75#endif /* _ASM_POWERPC_SOCKET_H */ 79#endif /* _ASM_POWERPC_SOCKET_H */
diff --git a/arch/powerpc/include/asm/spinlock.h b/arch/powerpc/include/asm/spinlock.h
index f9611bd69ed2..7124fc06ad47 100644
--- a/arch/powerpc/include/asm/spinlock.h
+++ b/arch/powerpc/include/asm/spinlock.h
@@ -23,7 +23,6 @@
23#ifdef CONFIG_PPC64 23#ifdef CONFIG_PPC64
24#include <asm/paca.h> 24#include <asm/paca.h>
25#include <asm/hvcall.h> 25#include <asm/hvcall.h>
26#include <asm/iseries/hv_call.h>
27#endif 26#endif
28#include <asm/asm-compat.h> 27#include <asm/asm-compat.h>
29#include <asm/synch.h> 28#include <asm/synch.h>
@@ -95,12 +94,12 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
95 * value. 94 * value.
96 */ 95 */
97 96
98#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 97#if defined(CONFIG_PPC_SPLPAR)
99/* We only yield to the hypervisor if we are in shared processor mode */ 98/* We only yield to the hypervisor if we are in shared processor mode */
100#define SHARED_PROCESSOR (get_lppaca()->shared_proc) 99#define SHARED_PROCESSOR (get_lppaca()->shared_proc)
101extern void __spin_yield(arch_spinlock_t *lock); 100extern void __spin_yield(arch_spinlock_t *lock);
102extern void __rw_yield(arch_rwlock_t *lock); 101extern void __rw_yield(arch_rwlock_t *lock);
103#else /* SPLPAR || ISERIES */ 102#else /* SPLPAR */
104#define __spin_yield(x) barrier() 103#define __spin_yield(x) barrier()
105#define __rw_yield(x) barrier() 104#define __rw_yield(x) barrier()
106#define SHARED_PROCESSOR 0 105#define SHARED_PROCESSOR 0
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h
index c377457d1b89..a02883d5af43 100644
--- a/arch/powerpc/include/asm/system.h
+++ b/arch/powerpc/include/asm/system.h
@@ -550,5 +550,43 @@ extern void reloc_got2(unsigned long);
550 550
551extern struct dentry *powerpc_debugfs_root; 551extern struct dentry *powerpc_debugfs_root;
552 552
553#ifdef CONFIG_PPC64
554
555extern void __ppc64_runlatch_on(void);
556extern void __ppc64_runlatch_off(void);
557
558/*
559 * We manually hard enable-disable, this is called
560 * in the idle loop and we don't want to mess up
561 * with soft-disable/enable & interrupt replay.
562 */
563#define ppc64_runlatch_off() \
564 do { \
565 if (cpu_has_feature(CPU_FTR_CTRL) && \
566 test_thread_local_flags(_TLF_RUNLATCH)) { \
567 unsigned long msr = mfmsr(); \
568 __hard_irq_disable(); \
569 __ppc64_runlatch_off(); \
570 if (msr & MSR_EE) \
571 __hard_irq_enable(); \
572 } \
573 } while (0)
574
575#define ppc64_runlatch_on() \
576 do { \
577 if (cpu_has_feature(CPU_FTR_CTRL) && \
578 !test_thread_local_flags(_TLF_RUNLATCH)) { \
579 unsigned long msr = mfmsr(); \
580 __hard_irq_disable(); \
581 __ppc64_runlatch_on(); \
582 if (msr & MSR_EE) \
583 __hard_irq_enable(); \
584 } \
585 } while (0)
586#else
587#define ppc64_runlatch_on()
588#define ppc64_runlatch_off()
589#endif /* CONFIG_PPC64 */
590
553#endif /* __KERNEL__ */ 591#endif /* __KERNEL__ */
554#endif /* _ASM_POWERPC_SYSTEM_H */ 592#endif /* _ASM_POWERPC_SYSTEM_H */
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index 964714940961..4a741c7efd02 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -110,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
110#define TIF_NOERROR 12 /* Force successful syscall return */ 110#define TIF_NOERROR 12 /* Force successful syscall return */
111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ 111#define TIF_NOTIFY_RESUME 13 /* callback before returning to user */
112#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ 112#define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */
113#define TIF_RUNLATCH 16 /* Is the runlatch enabled? */
114 113
115/* as above, but as bit values */ 114/* as above, but as bit values */
116#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) 115#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
@@ -141,11 +140,13 @@ static inline struct thread_info *current_thread_info(void)
141#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ 140#define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */
142#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ 141#define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */
143#define TLF_LAZY_MMU 3 /* tlb_batch is active */ 142#define TLF_LAZY_MMU 3 /* tlb_batch is active */
143#define TLF_RUNLATCH 4 /* Is the runlatch enabled? */
144 144
145#define _TLF_NAPPING (1 << TLF_NAPPING) 145#define _TLF_NAPPING (1 << TLF_NAPPING)
146#define _TLF_SLEEPING (1 << TLF_SLEEPING) 146#define _TLF_SLEEPING (1 << TLF_SLEEPING)
147#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) 147#define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK)
148#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU) 148#define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU)
149#define _TLF_RUNLATCH (1 << TLF_RUNLATCH)
149 150
150#ifndef __ASSEMBLY__ 151#ifndef __ASSEMBLY__
151#define HAVE_SET_RESTORE_SIGMASK 1 152#define HAVE_SET_RESTORE_SIGMASK 1
@@ -156,6 +157,12 @@ static inline void set_restore_sigmask(void)
156 set_bit(TIF_SIGPENDING, &ti->flags); 157 set_bit(TIF_SIGPENDING, &ti->flags);
157} 158}
158 159
160static inline bool test_thread_local_flags(unsigned int flags)
161{
162 struct thread_info *ti = current_thread_info();
163 return (ti->local_flags & flags) != 0;
164}
165
159#ifdef CONFIG_PPC64 166#ifdef CONFIG_PPC64
160#define is_32bit_task() (test_thread_flag(TIF_32BIT)) 167#define is_32bit_task() (test_thread_flag(TIF_32BIT))
161#else 168#else
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 7eb10fb96cd0..2136f58a54e8 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -18,11 +18,6 @@
18#include <linux/percpu.h> 18#include <linux/percpu.h>
19 19
20#include <asm/processor.h> 20#include <asm/processor.h>
21#ifdef CONFIG_PPC_ISERIES
22#include <asm/paca.h>
23#include <asm/firmware.h>
24#include <asm/iseries/hv_call.h>
25#endif
26 21
27/* time.c */ 22/* time.c */
28extern unsigned long tb_ticks_per_jiffy; 23extern unsigned long tb_ticks_per_jiffy;
@@ -167,15 +162,6 @@ static inline void set_dec(int val)
167#ifndef CONFIG_BOOKE 162#ifndef CONFIG_BOOKE
168 --val; 163 --val;
169#endif 164#endif
170#ifdef CONFIG_PPC_ISERIES
171 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
172 get_lppaca()->shared_proc) {
173 get_lppaca()->virtual_decr = val;
174 if (get_dec() > val)
175 HvCall_setVirtualDecr();
176 return;
177 }
178#endif
179 mtspr(SPRN_DEC, val); 165 mtspr(SPRN_DEC, val);
180#endif /* not 40x or 8xx_CPU6 */ 166#endif /* not 40x or 8xx_CPU6 */
181} 167}
@@ -217,7 +203,6 @@ DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array);
217#endif 203#endif
218 204
219extern void secondary_cpu_time_init(void); 205extern void secondary_cpu_time_init(void);
220extern void iSeries_time_init_early(void);
221 206
222DECLARE_PER_CPU(u64, decrementers_next_tb); 207DECLARE_PER_CPU(u64, decrementers_next_tb);
223 208
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h
index 8338aef5a4d3..b3038817b8dc 100644
--- a/arch/powerpc/include/asm/udbg.h
+++ b/arch/powerpc/include/asm/udbg.h
@@ -44,7 +44,6 @@ extern void __init udbg_init_debug_lpar_hvsi(void);
44extern void __init udbg_init_pmac_realmode(void); 44extern void __init udbg_init_pmac_realmode(void);
45extern void __init udbg_init_maple_realmode(void); 45extern void __init udbg_init_maple_realmode(void);
46extern void __init udbg_init_pas_realmode(void); 46extern void __init udbg_init_pas_realmode(void);
47extern void __init udbg_init_iseries(void);
48extern void __init udbg_init_rtas_panel(void); 47extern void __init udbg_init_rtas_panel(void);
49extern void __init udbg_init_rtas_console(void); 48extern void __init udbg_init_rtas_console(void);
50extern void __init udbg_init_debug_beat(void); 49extern void __init udbg_init_debug_beat(void);
diff --git a/arch/powerpc/include/asm/vio.h b/arch/powerpc/include/asm/vio.h
index 0a290a195946..6bfd5ffe1d4f 100644
--- a/arch/powerpc/include/asm/vio.h
+++ b/arch/powerpc/include/asm/vio.h
@@ -69,6 +69,7 @@ struct vio_dev {
69}; 69};
70 70
71struct vio_driver { 71struct vio_driver {
72 const char *name;
72 const struct vio_device_id *id_table; 73 const struct vio_device_id *id_table;
73 int (*probe)(struct vio_dev *dev, const struct vio_device_id *id); 74 int (*probe)(struct vio_dev *dev, const struct vio_device_id *id);
74 int (*remove)(struct vio_dev *dev); 75 int (*remove)(struct vio_dev *dev);
@@ -76,10 +77,17 @@ struct vio_driver {
76 * be loaded in a CMO environment if it uses DMA. 77 * be loaded in a CMO environment if it uses DMA.
77 */ 78 */
78 unsigned long (*get_desired_dma)(struct vio_dev *dev); 79 unsigned long (*get_desired_dma)(struct vio_dev *dev);
80 const struct dev_pm_ops *pm;
79 struct device_driver driver; 81 struct device_driver driver;
80}; 82};
81 83
82extern int vio_register_driver(struct vio_driver *drv); 84extern int __vio_register_driver(struct vio_driver *drv, struct module *owner,
85 const char *mod_name);
86/*
87 * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded
88 */
89#define vio_register_driver(driver) \
90 __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
83extern void vio_unregister_driver(struct vio_driver *drv); 91extern void vio_unregister_driver(struct vio_driver *drv);
84 92
85extern int vio_cmo_entitlement_update(size_t); 93extern int vio_cmo_entitlement_update(size_t);
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index c48de98ba94e..4ae9a09c3b89 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -86,7 +86,7 @@ struct ics {
86extern unsigned int xics_default_server; 86extern unsigned int xics_default_server;
87extern unsigned int xics_default_distrib_server; 87extern unsigned int xics_default_distrib_server;
88extern unsigned int xics_interrupt_server_size; 88extern unsigned int xics_interrupt_server_size;
89extern struct irq_host *xics_host; 89extern struct irq_domain *xics_host;
90 90
91struct xics_cppr { 91struct xics_cppr {
92 unsigned char stack[MAX_NUM_PRIORITIES]; 92 unsigned char stack[MAX_NUM_PRIORITIES];
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index ee728e433aa2..f5808a35688c 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -60,6 +60,7 @@ obj-$(CONFIG_IBMVIO) += vio.o
60obj-$(CONFIG_IBMEBUS) += ibmebus.o 60obj-$(CONFIG_IBMEBUS) += ibmebus.o
61obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o 61obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
62obj-$(CONFIG_CRASH_DUMP) += crash_dump.o 62obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
63obj-$(CONFIG_FA_DUMP) += fadump.o
63ifeq ($(CONFIG_PPC32),y) 64ifeq ($(CONFIG_PPC32),y)
64obj-$(CONFIG_E500) += idle_e500.o 65obj-$(CONFIG_E500) += idle_e500.o
65endif 66endif
@@ -113,15 +114,6 @@ obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o
113obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o 114obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
114obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o 115obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
115obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o 116obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
116obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
117
118obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o
119obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
120 power5+-pmu.o power6-pmu.o power7-pmu.o
121obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
122
123obj-$(CONFIG_FSL_EMB_PERF_EVENT) += perf_event_fsl_emb.o
124obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o
125 117
126obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o 118obj-$(CONFIG_8XX_MINIMAL_FPEMU) += softemu8xx.o
127 119
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 04caee7d9bc1..34b8afe94a50 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -46,9 +46,6 @@
46#include <asm/hvcall.h> 46#include <asm/hvcall.h>
47#include <asm/xics.h> 47#include <asm/xics.h>
48#endif 48#endif
49#ifdef CONFIG_PPC_ISERIES
50#include <asm/iseries/alpaca.h>
51#endif
52#ifdef CONFIG_PPC_POWERNV 49#ifdef CONFIG_PPC_POWERNV
53#include <asm/opal.h> 50#include <asm/opal.h>
54#endif 51#endif
@@ -147,7 +144,7 @@ int main(void)
147 DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase)); 144 DEFINE(PACAKBASE, offsetof(struct paca_struct, kernelbase));
148 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr)); 145 DEFINE(PACAKMSR, offsetof(struct paca_struct, kernel_msr));
149 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled)); 146 DEFINE(PACASOFTIRQEN, offsetof(struct paca_struct, soft_enabled));
150 DEFINE(PACAHARDIRQEN, offsetof(struct paca_struct, hard_enabled)); 147 DEFINE(PACAIRQHAPPENED, offsetof(struct paca_struct, irq_happened));
151 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id)); 148 DEFINE(PACACONTEXTID, offsetof(struct paca_struct, context.id));
152#ifdef CONFIG_PPC_MM_SLICES 149#ifdef CONFIG_PPC_MM_SLICES
153 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct, 150 DEFINE(PACALOWSLICESPSIZE, offsetof(struct paca_struct,
@@ -384,17 +381,6 @@ int main(void)
384 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry)); 381 DEFINE(BUG_ENTRY_SIZE, sizeof(struct bug_entry));
385#endif 382#endif
386 383
387#ifdef CONFIG_PPC_ISERIES
388 /* the assembler miscalculates the VSID values */
389 DEFINE(PAGE_OFFSET_ESID, GET_ESID(PAGE_OFFSET));
390 DEFINE(PAGE_OFFSET_VSID, KERNEL_VSID(PAGE_OFFSET));
391 DEFINE(VMALLOC_START_ESID, GET_ESID(VMALLOC_START));
392 DEFINE(VMALLOC_START_VSID, KERNEL_VSID(VMALLOC_START));
393
394 /* alpaca */
395 DEFINE(ALPACA_SIZE, sizeof(struct alpaca));
396#endif
397
398 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE); 384 DEFINE(PGD_TABLE_SIZE, PGD_TABLE_SIZE);
399 DEFINE(PTE_SIZE, sizeof(pte_t)); 385 DEFINE(PTE_SIZE, sizeof(pte_t));
400 386
@@ -426,16 +412,23 @@ int main(void)
426 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2)); 412 DEFINE(VCPU_SPRG2, offsetof(struct kvm_vcpu, arch.shregs.sprg2));
427 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3)); 413 DEFINE(VCPU_SPRG3, offsetof(struct kvm_vcpu, arch.shregs.sprg3));
428#endif 414#endif
429 DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); 415 DEFINE(VCPU_SHARED_SPRG4, offsetof(struct kvm_vcpu_arch_shared, sprg4));
430 DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); 416 DEFINE(VCPU_SHARED_SPRG5, offsetof(struct kvm_vcpu_arch_shared, sprg5));
431 DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); 417 DEFINE(VCPU_SHARED_SPRG6, offsetof(struct kvm_vcpu_arch_shared, sprg6));
432 DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); 418 DEFINE(VCPU_SHARED_SPRG7, offsetof(struct kvm_vcpu_arch_shared, sprg7));
433 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); 419 DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid));
434 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1)); 420 DEFINE(VCPU_SHADOW_PID1, offsetof(struct kvm_vcpu, arch.shadow_pid1));
435 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); 421 DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared));
436 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); 422 DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr));
437 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr)); 423 DEFINE(VCPU_SHADOW_MSR, offsetof(struct kvm_vcpu, arch.shadow_msr));
438 424
425 DEFINE(VCPU_SHARED_MAS0, offsetof(struct kvm_vcpu_arch_shared, mas0));
426 DEFINE(VCPU_SHARED_MAS1, offsetof(struct kvm_vcpu_arch_shared, mas1));
427 DEFINE(VCPU_SHARED_MAS2, offsetof(struct kvm_vcpu_arch_shared, mas2));
428 DEFINE(VCPU_SHARED_MAS7_3, offsetof(struct kvm_vcpu_arch_shared, mas7_3));
429 DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
430 DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
431
439 /* book3s */ 432 /* book3s */
440#ifdef CONFIG_KVM_BOOK3S_64_HV 433#ifdef CONFIG_KVM_BOOK3S_64_HV
441 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid)); 434 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
@@ -448,6 +441,7 @@ int main(void)
448 DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu)); 441 DEFINE(KVM_LAST_VCPU, offsetof(struct kvm, arch.last_vcpu));
449 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr)); 442 DEFINE(KVM_LPCR, offsetof(struct kvm, arch.lpcr));
450 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor)); 443 DEFINE(KVM_RMOR, offsetof(struct kvm, arch.rmor));
444 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
451 DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); 445 DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
452 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); 446 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
453#endif 447#endif
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 81db9e2a8a20..138ae183c440 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -1816,7 +1816,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
1816 .platform = "ppc440", 1816 .platform = "ppc440",
1817 }, 1817 },
1818 { /* 464 in APM821xx */ 1818 { /* 464 in APM821xx */
1819 .pvr_mask = 0xffffff00, 1819 .pvr_mask = 0xfffffff0,
1820 .pvr_value = 0x12C41C80, 1820 .pvr_value = 0x12C41C80,
1821 .cpu_name = "APM821XX", 1821 .cpu_name = "APM821XX",
1822 .cpu_features = CPU_FTRS_44X, 1822 .cpu_features = CPU_FTRS_44X,
@@ -2019,6 +2019,24 @@ static struct cpu_spec __initdata cpu_specs[] = {
2019 .machine_check = machine_check_e500mc, 2019 .machine_check = machine_check_e500mc,
2020 .platform = "ppce5500", 2020 .platform = "ppce5500",
2021 }, 2021 },
2022 { /* e6500 */
2023 .pvr_mask = 0xffff0000,
2024 .pvr_value = 0x80400000,
2025 .cpu_name = "e6500",
2026 .cpu_features = CPU_FTRS_E6500,
2027 .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU,
2028 .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS |
2029 MMU_FTR_USE_TLBILX,
2030 .icache_bsize = 64,
2031 .dcache_bsize = 64,
2032 .num_pmcs = 4,
2033 .oprofile_cpu_type = "ppc/e6500",
2034 .oprofile_type = PPC_OPROFILE_FSL_EMB,
2035 .cpu_setup = __setup_cpu_e5500,
2036 .cpu_restore = __restore_cpu_e5500,
2037 .machine_check = machine_check_e500mc,
2038 .platform = "ppce6500",
2039 },
2022#ifdef CONFIG_PPC32 2040#ifdef CONFIG_PPC32
2023 { /* default match */ 2041 { /* default match */
2024 .pvr_mask = 0x00000000, 2042 .pvr_mask = 0x00000000,
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c
index 28be3452e67a..abef75176c07 100644
--- a/arch/powerpc/kernel/crash.c
+++ b/arch/powerpc/kernel/crash.c
@@ -46,7 +46,6 @@
46 46
47/* This keeps a track of which one is the crashing cpu. */ 47/* This keeps a track of which one is the crashing cpu. */
48int crashing_cpu = -1; 48int crashing_cpu = -1;
49static atomic_t cpus_in_crash;
50static int time_to_dump; 49static int time_to_dump;
51 50
52#define CRASH_HANDLER_MAX 3 51#define CRASH_HANDLER_MAX 3
@@ -66,6 +65,7 @@ static int handle_fault(struct pt_regs *regs)
66 65
67#ifdef CONFIG_SMP 66#ifdef CONFIG_SMP
68 67
68static atomic_t cpus_in_crash;
69void crash_ipi_callback(struct pt_regs *regs) 69void crash_ipi_callback(struct pt_regs *regs)
70{ 70{
71 static cpumask_t cpus_state_saved = CPU_MASK_NONE; 71 static cpumask_t cpus_state_saved = CPU_MASK_NONE;
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index 2cc451aaaca7..5b25c8060fd6 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -37,6 +37,8 @@ void doorbell_exception(struct pt_regs *regs)
37 37
38 irq_enter(); 38 irq_enter();
39 39
40 may_hard_irq_enable();
41
40 smp_ipi_demux(); 42 smp_ipi_demux();
41 43
42 irq_exit(); 44 irq_exit();
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 4f80cf1ce77b..3e57a00b8cba 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1213,7 +1213,7 @@ do_user_signal: /* r10 contains MSR_KERNEL here */
1213 stw r3,_TRAP(r1) 1213 stw r3,_TRAP(r1)
12142: addi r3,r1,STACK_FRAME_OVERHEAD 12142: addi r3,r1,STACK_FRAME_OVERHEAD
1215 mr r4,r9 1215 mr r4,r9
1216 bl do_signal 1216 bl do_notify_resume
1217 REST_NVGPRS(r1) 1217 REST_NVGPRS(r1)
1218 b recheck 1218 b recheck
1219 1219
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index d834425186ae..f8a7a1a1a9f4 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -32,6 +32,7 @@
32#include <asm/ptrace.h> 32#include <asm/ptrace.h>
33#include <asm/irqflags.h> 33#include <asm/irqflags.h>
34#include <asm/ftrace.h> 34#include <asm/ftrace.h>
35#include <asm/hw_irq.h>
35 36
36/* 37/*
37 * System calls. 38 * System calls.
@@ -115,39 +116,33 @@ BEGIN_FW_FTR_SECTION
115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) 116END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
116#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ 117#endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */
117 118
118#ifdef CONFIG_TRACE_IRQFLAGS 119 /*
119 bl .trace_hardirqs_on 120 * A syscall should always be called with interrupts enabled
120 REST_GPR(0,r1) 121 * so we just unconditionally hard-enable here. When some kind
121 REST_4GPRS(3,r1) 122 * of irq tracing is used, we additionally check that condition
122 REST_2GPRS(7,r1) 123 * is correct
123 addi r9,r1,STACK_FRAME_OVERHEAD 124 */
124 ld r12,_MSR(r1) 125#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
125#endif /* CONFIG_TRACE_IRQFLAGS */ 126 lbz r10,PACASOFTIRQEN(r13)
126 li r10,1 127 xori r10,r10,1
127 stb r10,PACASOFTIRQEN(r13) 1281: tdnei r10,0
128 stb r10,PACAHARDIRQEN(r13) 129 EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
129 std r10,SOFTE(r1) 130#endif
130#ifdef CONFIG_PPC_ISERIES
131BEGIN_FW_FTR_SECTION
132 /* Hack for handling interrupts when soft-enabling on iSeries */
133 cmpdi cr1,r0,0x5555 /* syscall 0x5555 */
134 andi. r10,r12,MSR_PR /* from kernel */
135 crand 4*cr0+eq,4*cr1+eq,4*cr0+eq
136 bne 2f
137 b hardware_interrupt_entry
1382:
139END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
140#endif /* CONFIG_PPC_ISERIES */
141 131
142 /* Hard enable interrupts */
143#ifdef CONFIG_PPC_BOOK3E 132#ifdef CONFIG_PPC_BOOK3E
144 wrteei 1 133 wrteei 1
145#else 134#else
146 mfmsr r11 135 ld r11,PACAKMSR(r13)
147 ori r11,r11,MSR_EE 136 ori r11,r11,MSR_EE
148 mtmsrd r11,1 137 mtmsrd r11,1
149#endif /* CONFIG_PPC_BOOK3E */ 138#endif /* CONFIG_PPC_BOOK3E */
150 139
140 /* We do need to set SOFTE in the stack frame or the return
141 * from interrupt will be painful
142 */
143 li r10,1
144 std r10,SOFTE(r1)
145
151#ifdef SHOW_SYSCALLS 146#ifdef SHOW_SYSCALLS
152 bl .do_show_syscall 147 bl .do_show_syscall
153 REST_GPR(0,r1) 148 REST_GPR(0,r1)
@@ -198,16 +193,14 @@ syscall_exit:
198 andi. r10,r8,MSR_RI 193 andi. r10,r8,MSR_RI
199 beq- unrecov_restore 194 beq- unrecov_restore
200#endif 195#endif
201 196 /*
202 /* Disable interrupts so current_thread_info()->flags can't change, 197 * Disable interrupts so current_thread_info()->flags can't change,
203 * and so that we don't get interrupted after loading SRR0/1. 198 * and so that we don't get interrupted after loading SRR0/1.
204 */ 199 */
205#ifdef CONFIG_PPC_BOOK3E 200#ifdef CONFIG_PPC_BOOK3E
206 wrteei 0 201 wrteei 0
207#else 202#else
208 mfmsr r10 203 ld r10,PACAKMSR(r13)
209 rldicl r10,r10,48,1
210 rotldi r10,r10,16
211 mtmsrd r10,1 204 mtmsrd r10,1
212#endif /* CONFIG_PPC_BOOK3E */ 205#endif /* CONFIG_PPC_BOOK3E */
213 206
@@ -319,7 +312,7 @@ syscall_exit_work:
319#ifdef CONFIG_PPC_BOOK3E 312#ifdef CONFIG_PPC_BOOK3E
320 wrteei 1 313 wrteei 1
321#else 314#else
322 mfmsr r10 315 ld r10,PACAKMSR(r13)
323 ori r10,r10,MSR_EE 316 ori r10,r10,MSR_EE
324 mtmsrd r10,1 317 mtmsrd r10,1
325#endif /* CONFIG_PPC_BOOK3E */ 318#endif /* CONFIG_PPC_BOOK3E */
@@ -565,10 +558,8 @@ _GLOBAL(ret_from_except_lite)
565#ifdef CONFIG_PPC_BOOK3E 558#ifdef CONFIG_PPC_BOOK3E
566 wrteei 0 559 wrteei 0
567#else 560#else
568 mfmsr r10 /* Get current interrupt state */ 561 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
569 rldicl r9,r10,48,1 /* clear MSR_EE */ 562 mtmsrd r10,1 /* Update machine state */
570 rotldi r9,r9,16
571 mtmsrd r9,1 /* Update machine state */
572#endif /* CONFIG_PPC_BOOK3E */ 563#endif /* CONFIG_PPC_BOOK3E */
573 564
574#ifdef CONFIG_PREEMPT 565#ifdef CONFIG_PREEMPT
@@ -591,25 +582,74 @@ _GLOBAL(ret_from_except_lite)
591 ld r4,TI_FLAGS(r9) 582 ld r4,TI_FLAGS(r9)
592 andi. r0,r4,_TIF_USER_WORK_MASK 583 andi. r0,r4,_TIF_USER_WORK_MASK
593 bne do_work 584 bne do_work
594#endif 585#endif /* !CONFIG_PREEMPT */
595 586
587 .globl fast_exc_return_irq
588fast_exc_return_irq:
596restore: 589restore:
597BEGIN_FW_FTR_SECTION 590 /*
591 * This is the main kernel exit path, we first check if we
592 * have to change our interrupt state.
593 */
598 ld r5,SOFTE(r1) 594 ld r5,SOFTE(r1)
599FW_FTR_SECTION_ELSE 595 lbz r6,PACASOFTIRQEN(r13)
600 b .Liseries_check_pending_irqs 596 cmpwi cr1,r5,0
601ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) 597 cmpw cr0,r5,r6
6022: 598 beq cr0,4f
603 TRACE_AND_RESTORE_IRQ(r5); 599
600 /* We do, handle disable first, which is easy */
601 bne cr1,3f;
602 li r0,0
603 stb r0,PACASOFTIRQEN(r13);
604 TRACE_DISABLE_INTS
605 b 4f
604 606
605 /* extract EE bit and use it to restore paca->hard_enabled */ 6073: /*
606 ld r3,_MSR(r1) 608 * We are about to soft-enable interrupts (we are hard disabled
607 rldicl r4,r3,49,63 /* r0 = (r3 >> 15) & 1 */ 609 * at this point). We check if there's anything that needs to
608 stb r4,PACAHARDIRQEN(r13) 610 * be replayed first.
611 */
612 lbz r0,PACAIRQHAPPENED(r13)
613 cmpwi cr0,r0,0
614 bne- restore_check_irq_replay
609 615
616 /*
617 * Get here when nothing happened while soft-disabled, just
618 * soft-enable and move-on. We will hard-enable as a side
619 * effect of rfi
620 */
621restore_no_replay:
622 TRACE_ENABLE_INTS
623 li r0,1
624 stb r0,PACASOFTIRQEN(r13);
625
626 /*
627 * Final return path. BookE is handled in a different file
628 */
6294:
610#ifdef CONFIG_PPC_BOOK3E 630#ifdef CONFIG_PPC_BOOK3E
611 b .exception_return_book3e 631 b .exception_return_book3e
612#else 632#else
633 /*
634 * Clear the reservation. If we know the CPU tracks the address of
635 * the reservation then we can potentially save some cycles and use
636 * a larx. On POWER6 and POWER7 this is significantly faster.
637 */
638BEGIN_FTR_SECTION
639 stdcx. r0,0,r1 /* to clear the reservation */
640FTR_SECTION_ELSE
641 ldarx r4,0,r1
642ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
643
644 /*
645 * Some code path such as load_up_fpu or altivec return directly
646 * here. They run entirely hard disabled and do not alter the
647 * interrupt state. They also don't use lwarx/stwcx. and thus
648 * are known not to leave dangling reservations.
649 */
650 .globl fast_exception_return
651fast_exception_return:
652 ld r3,_MSR(r1)
613 ld r4,_CTR(r1) 653 ld r4,_CTR(r1)
614 ld r0,_LINK(r1) 654 ld r0,_LINK(r1)
615 mtctr r4 655 mtctr r4
@@ -623,28 +663,18 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES)
623 beq- unrecov_restore 663 beq- unrecov_restore
624 664
625 /* 665 /*
626 * Clear the reservation. If we know the CPU tracks the address of
627 * the reservation then we can potentially save some cycles and use
628 * a larx. On POWER6 and POWER7 this is significantly faster.
629 */
630BEGIN_FTR_SECTION
631 stdcx. r0,0,r1 /* to clear the reservation */
632FTR_SECTION_ELSE
633 ldarx r4,0,r1
634ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
635
636 /*
637 * Clear RI before restoring r13. If we are returning to 666 * Clear RI before restoring r13. If we are returning to
638 * userspace and we take an exception after restoring r13, 667 * userspace and we take an exception after restoring r13,
639 * we end up corrupting the userspace r13 value. 668 * we end up corrupting the userspace r13 value.
640 */ 669 */
641 mfmsr r4 670 ld r4,PACAKMSR(r13) /* Get kernel MSR without EE */
642 andc r4,r4,r0 /* r0 contains MSR_RI here */ 671 andc r4,r4,r0 /* r0 contains MSR_RI here */
643 mtmsrd r4,1 672 mtmsrd r4,1
644 673
645 /* 674 /*
646 * r13 is our per cpu area, only restore it if we are returning to 675 * r13 is our per cpu area, only restore it if we are returning to
647 * userspace 676 * userspace the value stored in the stack frame may belong to
677 * another CPU.
648 */ 678 */
649 andi. r0,r3,MSR_PR 679 andi. r0,r3,MSR_PR
650 beq 1f 680 beq 1f
@@ -669,30 +699,55 @@ ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
669 699
670#endif /* CONFIG_PPC_BOOK3E */ 700#endif /* CONFIG_PPC_BOOK3E */
671 701
672.Liseries_check_pending_irqs: 702 /*
673#ifdef CONFIG_PPC_ISERIES 703 * Something did happen, check if a re-emit is needed
674 ld r5,SOFTE(r1) 704 * (this also clears paca->irq_happened)
675 cmpdi 0,r5,0 705 */
676 beq 2b 706restore_check_irq_replay:
677 /* Check for pending interrupts (iSeries) */ 707 /* XXX: We could implement a fast path here where we check
678 ld r3,PACALPPACAPTR(r13) 708 * for irq_happened being just 0x01, in which case we can
679 ld r3,LPPACAANYINT(r3) 709 * clear it and return. That means that we would potentially
680 cmpdi r3,0 710 * miss a decrementer having wrapped all the way around.
681 beq+ 2b /* skip do_IRQ if no interrupts */ 711 *
682 712 * Still, this might be useful for things like hash_page
683 li r3,0 713 */
684 stb r3,PACASOFTIRQEN(r13) /* ensure we are soft-disabled */ 714 bl .__check_irq_replay
685#ifdef CONFIG_TRACE_IRQFLAGS 715 cmpwi cr0,r3,0
686 bl .trace_hardirqs_off 716 beq restore_no_replay
687 mfmsr r10 717
688#endif 718 /*
689 ori r10,r10,MSR_EE 719 * We need to re-emit an interrupt. We do so by re-using our
690 mtmsrd r10 /* hard-enable again */ 720 * existing exception frame. We first change the trap value,
691 addi r3,r1,STACK_FRAME_OVERHEAD 721 * but we need to ensure we preserve the low nibble of it
692 bl .do_IRQ 722 */
693 b .ret_from_except_lite /* loop back and handle more */ 723 ld r4,_TRAP(r1)
694#endif 724 clrldi r4,r4,60
725 or r4,r4,r3
726 std r4,_TRAP(r1)
695 727
728 /*
729 * Then find the right handler and call it. Interrupts are
730 * still soft-disabled and we keep them that way.
731 */
732 cmpwi cr0,r3,0x500
733 bne 1f
734 addi r3,r1,STACK_FRAME_OVERHEAD;
735 bl .do_IRQ
736 b .ret_from_except
7371: cmpwi cr0,r3,0x900
738 bne 1f
739 addi r3,r1,STACK_FRAME_OVERHEAD;
740 bl .timer_interrupt
741 b .ret_from_except
742#ifdef CONFIG_PPC_BOOK3E
7431: cmpwi cr0,r3,0x280
744 bne 1f
745 addi r3,r1,STACK_FRAME_OVERHEAD;
746 bl .doorbell_exception
747 b .ret_from_except
748#endif /* CONFIG_PPC_BOOK3E */
7491: b .ret_from_except /* What else to do here ? */
750
696do_work: 751do_work:
697#ifdef CONFIG_PREEMPT 752#ifdef CONFIG_PREEMPT
698 andi. r0,r3,MSR_PR /* Returning to user mode? */ 753 andi. r0,r3,MSR_PR /* Returning to user mode? */
@@ -705,31 +760,22 @@ do_work:
705 crandc eq,cr1*4+eq,eq 760 crandc eq,cr1*4+eq,eq
706 bne restore 761 bne restore
707 762
708 /* Here we are preempting the current task. 763 /*
709 * 764 * Here we are preempting the current task. We want to make
710 * Ensure interrupts are soft-disabled. We also properly mark 765 * sure we are soft-disabled first
711 * the PACA to reflect the fact that they are hard-disabled
712 * and trace the change
713 */ 766 */
714 li r0,0 767 SOFT_DISABLE_INTS(r3,r4)
715 stb r0,PACASOFTIRQEN(r13)
716 stb r0,PACAHARDIRQEN(r13)
717 TRACE_DISABLE_INTS
718
719 /* Call the scheduler with soft IRQs off */
7201: bl .preempt_schedule_irq 7681: bl .preempt_schedule_irq
721 769
722 /* Hard-disable interrupts again (and update PACA) */ 770 /* Hard-disable interrupts again (and update PACA) */
723#ifdef CONFIG_PPC_BOOK3E 771#ifdef CONFIG_PPC_BOOK3E
724 wrteei 0 772 wrteei 0
725#else 773#else
726 mfmsr r10 774 ld r10,PACAKMSR(r13) /* Get kernel MSR without EE */
727 rldicl r10,r10,48,1
728 rotldi r10,r10,16
729 mtmsrd r10,1 775 mtmsrd r10,1
730#endif /* CONFIG_PPC_BOOK3E */ 776#endif /* CONFIG_PPC_BOOK3E */
731 li r0,0 777 li r0,PACA_IRQ_HARD_DIS
732 stb r0,PACAHARDIRQEN(r13) 778 stb r0,PACAIRQHAPPENED(r13)
733 779
734 /* Re-test flags and eventually loop */ 780 /* Re-test flags and eventually loop */
735 clrrdi r9,r1,THREAD_SHIFT 781 clrrdi r9,r1,THREAD_SHIFT
@@ -751,12 +797,14 @@ user_work:
751 797
752 andi. r0,r4,_TIF_NEED_RESCHED 798 andi. r0,r4,_TIF_NEED_RESCHED
753 beq 1f 799 beq 1f
800 bl .restore_interrupts
754 bl .schedule 801 bl .schedule
755 b .ret_from_except_lite 802 b .ret_from_except_lite
756 803
7571: bl .save_nvgprs 8041: bl .save_nvgprs
805 bl .restore_interrupts
758 addi r3,r1,STACK_FRAME_OVERHEAD 806 addi r3,r1,STACK_FRAME_OVERHEAD
759 bl .do_signal 807 bl .do_notify_resume
760 b .ret_from_except 808 b .ret_from_except
761 809
762unrecov_restore: 810unrecov_restore:
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 429983c06f91..7215cc2495df 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -24,6 +24,7 @@
24#include <asm/ptrace.h> 24#include <asm/ptrace.h>
25#include <asm/ppc-opcode.h> 25#include <asm/ppc-opcode.h>
26#include <asm/mmu.h> 26#include <asm/mmu.h>
27#include <asm/hw_irq.h>
27 28
28/* XXX This will ultimately add space for a special exception save 29/* XXX This will ultimately add space for a special exception save
29 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc... 30 * structure used to save things like SRR0/SRR1, SPRGs, MAS, etc...
@@ -77,59 +78,55 @@
77#define SPRN_MC_SRR1 SPRN_MCSRR1 78#define SPRN_MC_SRR1 SPRN_MCSRR1
78 79
79#define NORMAL_EXCEPTION_PROLOG(n, addition) \ 80#define NORMAL_EXCEPTION_PROLOG(n, addition) \
80 EXCEPTION_PROLOG(n, GEN, addition##_GEN) 81 EXCEPTION_PROLOG(n, GEN, addition##_GEN(n))
81 82
82#define CRIT_EXCEPTION_PROLOG(n, addition) \ 83#define CRIT_EXCEPTION_PROLOG(n, addition) \
83 EXCEPTION_PROLOG(n, CRIT, addition##_CRIT) 84 EXCEPTION_PROLOG(n, CRIT, addition##_CRIT(n))
84 85
85#define DBG_EXCEPTION_PROLOG(n, addition) \ 86#define DBG_EXCEPTION_PROLOG(n, addition) \
86 EXCEPTION_PROLOG(n, DBG, addition##_DBG) 87 EXCEPTION_PROLOG(n, DBG, addition##_DBG(n))
87 88
88#define MC_EXCEPTION_PROLOG(n, addition) \ 89#define MC_EXCEPTION_PROLOG(n, addition) \
89 EXCEPTION_PROLOG(n, MC, addition##_MC) 90 EXCEPTION_PROLOG(n, MC, addition##_MC(n))
90 91
91 92
92/* Variants of the "addition" argument for the prolog 93/* Variants of the "addition" argument for the prolog
93 */ 94 */
94#define PROLOG_ADDITION_NONE_GEN 95#define PROLOG_ADDITION_NONE_GEN(n)
95#define PROLOG_ADDITION_NONE_CRIT 96#define PROLOG_ADDITION_NONE_CRIT(n)
96#define PROLOG_ADDITION_NONE_DBG 97#define PROLOG_ADDITION_NONE_DBG(n)
97#define PROLOG_ADDITION_NONE_MC 98#define PROLOG_ADDITION_NONE_MC(n)
98 99
99#define PROLOG_ADDITION_MASKABLE_GEN \ 100#define PROLOG_ADDITION_MASKABLE_GEN(n) \
100 lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \ 101 lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
101 cmpwi cr0,r11,0; /* yes -> go out of line */ \ 102 cmpwi cr0,r11,0; /* yes -> go out of line */ \
102 beq masked_interrupt_book3e; 103 beq masked_interrupt_book3e_##n
103 104
104#define PROLOG_ADDITION_2REGS_GEN \ 105#define PROLOG_ADDITION_2REGS_GEN(n) \
105 std r14,PACA_EXGEN+EX_R14(r13); \ 106 std r14,PACA_EXGEN+EX_R14(r13); \
106 std r15,PACA_EXGEN+EX_R15(r13) 107 std r15,PACA_EXGEN+EX_R15(r13)
107 108
108#define PROLOG_ADDITION_1REG_GEN \ 109#define PROLOG_ADDITION_1REG_GEN(n) \
109 std r14,PACA_EXGEN+EX_R14(r13); 110 std r14,PACA_EXGEN+EX_R14(r13);
110 111
111#define PROLOG_ADDITION_2REGS_CRIT \ 112#define PROLOG_ADDITION_2REGS_CRIT(n) \
112 std r14,PACA_EXCRIT+EX_R14(r13); \ 113 std r14,PACA_EXCRIT+EX_R14(r13); \
113 std r15,PACA_EXCRIT+EX_R15(r13) 114 std r15,PACA_EXCRIT+EX_R15(r13)
114 115
115#define PROLOG_ADDITION_2REGS_DBG \ 116#define PROLOG_ADDITION_2REGS_DBG(n) \
116 std r14,PACA_EXDBG+EX_R14(r13); \ 117 std r14,PACA_EXDBG+EX_R14(r13); \
117 std r15,PACA_EXDBG+EX_R15(r13) 118 std r15,PACA_EXDBG+EX_R15(r13)
118 119
119#define PROLOG_ADDITION_2REGS_MC \ 120#define PROLOG_ADDITION_2REGS_MC(n) \
120 std r14,PACA_EXMC+EX_R14(r13); \ 121 std r14,PACA_EXMC+EX_R14(r13); \
121 std r15,PACA_EXMC+EX_R15(r13) 122 std r15,PACA_EXMC+EX_R15(r13)
122 123
123#define PROLOG_ADDITION_DOORBELL_GEN \
124 lbz r11,PACASOFTIRQEN(r13); /* are irqs soft-disabled ? */ \
125 cmpwi cr0,r11,0; /* yes -> go out of line */ \
126 beq masked_doorbell_book3e
127
128 124
129/* Core exception code for all exceptions except TLB misses. 125/* Core exception code for all exceptions except TLB misses.
130 * XXX: Needs to make SPRN_SPRG_GEN depend on exception type 126 * XXX: Needs to make SPRN_SPRG_GEN depend on exception type
131 */ 127 */
132#define EXCEPTION_COMMON(n, excf, ints) \ 128#define EXCEPTION_COMMON(n, excf, ints) \
129exc_##n##_common: \
133 std r0,GPR0(r1); /* save r0 in stackframe */ \ 130 std r0,GPR0(r1); /* save r0 in stackframe */ \
134 std r2,GPR2(r1); /* save r2 in stackframe */ \ 131 std r2,GPR2(r1); /* save r2 in stackframe */ \
135 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \ 132 SAVE_4GPRS(3, r1); /* save r3 - r6 in stackframe */ \
@@ -167,20 +164,21 @@
167 std r0,RESULT(r1); /* clear regs->result */ \ 164 std r0,RESULT(r1); /* clear regs->result */ \
168 ints; 165 ints;
169 166
170/* Variants for the "ints" argument */ 167/* Variants for the "ints" argument. This one does nothing when we want
168 * to keep interrupts in their original state
169 */
171#define INTS_KEEP 170#define INTS_KEEP
172#define INTS_DISABLE_SOFT \ 171
173 stb r0,PACASOFTIRQEN(r13); /* mark interrupts soft-disabled */ \ 172/* This second version is meant for exceptions that don't immediately
174 TRACE_DISABLE_INTS; 173 * hard-enable. We set a bit in paca->irq_happened to ensure that
175#define INTS_DISABLE_HARD \ 174 * a subsequent call to arch_local_irq_restore() will properly
176 stb r0,PACAHARDIRQEN(r13); /* and hard disabled */ 175 * hard-enable and avoid the fast-path
177#define INTS_DISABLE_ALL \ 176 */
178 INTS_DISABLE_SOFT \ 177#define INTS_DISABLE SOFT_DISABLE_INTS(r3,r4)
179 INTS_DISABLE_HARD 178
180 179/* This is called by exceptions that used INTS_KEEP (that did not touch
181/* This is called by exceptions that used INTS_KEEP (that is did not clear 180 * irq indicators in the PACA). This will restore MSR:EE to it's previous
182 * neither soft nor hard IRQ indicators in the PACA. This will restore MSR:EE 181 * value
183 * to it's previous value
184 * 182 *
185 * XXX In the long run, we may want to open-code it in order to separate the 183 * XXX In the long run, we may want to open-code it in order to separate the
186 * load from the wrtee, thus limiting the latency caused by the dependency 184 * load from the wrtee, thus limiting the latency caused by the dependency
@@ -238,7 +236,7 @@ exc_##n##_bad_stack: \
238#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \ 236#define MASKABLE_EXCEPTION(trapnum, label, hdlr, ack) \
239 START_EXCEPTION(label); \ 237 START_EXCEPTION(label); \
240 NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \ 238 NORMAL_EXCEPTION_PROLOG(trapnum, PROLOG_ADDITION_MASKABLE) \
241 EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE_ALL) \ 239 EXCEPTION_COMMON(trapnum, PACA_EXGEN, INTS_DISABLE) \
242 ack(r8); \ 240 ack(r8); \
243 CHECK_NAPPING(); \ 241 CHECK_NAPPING(); \
244 addi r3,r1,STACK_FRAME_OVERHEAD; \ 242 addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -289,7 +287,7 @@ interrupt_end_book3e:
289/* Critical Input Interrupt */ 287/* Critical Input Interrupt */
290 START_EXCEPTION(critical_input); 288 START_EXCEPTION(critical_input);
291 CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE) 289 CRIT_EXCEPTION_PROLOG(0x100, PROLOG_ADDITION_NONE)
292// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE_ALL) 290// EXCEPTION_COMMON(0x100, PACA_EXCRIT, INTS_DISABLE)
293// bl special_reg_save_crit 291// bl special_reg_save_crit
294// CHECK_NAPPING(); 292// CHECK_NAPPING();
295// addi r3,r1,STACK_FRAME_OVERHEAD 293// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -300,7 +298,7 @@ interrupt_end_book3e:
300/* Machine Check Interrupt */ 298/* Machine Check Interrupt */
301 START_EXCEPTION(machine_check); 299 START_EXCEPTION(machine_check);
302 CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE) 300 CRIT_EXCEPTION_PROLOG(0x200, PROLOG_ADDITION_NONE)
303// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE_ALL) 301// EXCEPTION_COMMON(0x200, PACA_EXMC, INTS_DISABLE)
304// bl special_reg_save_mc 302// bl special_reg_save_mc
305// addi r3,r1,STACK_FRAME_OVERHEAD 303// addi r3,r1,STACK_FRAME_OVERHEAD
306// CHECK_NAPPING(); 304// CHECK_NAPPING();
@@ -313,7 +311,7 @@ interrupt_end_book3e:
313 NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS) 311 NORMAL_EXCEPTION_PROLOG(0x300, PROLOG_ADDITION_2REGS)
314 mfspr r14,SPRN_DEAR 312 mfspr r14,SPRN_DEAR
315 mfspr r15,SPRN_ESR 313 mfspr r15,SPRN_ESR
316 EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_KEEP) 314 EXCEPTION_COMMON(0x300, PACA_EXGEN, INTS_DISABLE)
317 b storage_fault_common 315 b storage_fault_common
318 316
319/* Instruction Storage Interrupt */ 317/* Instruction Storage Interrupt */
@@ -321,7 +319,7 @@ interrupt_end_book3e:
321 NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS) 319 NORMAL_EXCEPTION_PROLOG(0x400, PROLOG_ADDITION_2REGS)
322 li r15,0 320 li r15,0
323 mr r14,r10 321 mr r14,r10
324 EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_KEEP) 322 EXCEPTION_COMMON(0x400, PACA_EXGEN, INTS_DISABLE)
325 b storage_fault_common 323 b storage_fault_common
326 324
327/* External Input Interrupt */ 325/* External Input Interrupt */
@@ -339,12 +337,11 @@ interrupt_end_book3e:
339 START_EXCEPTION(program); 337 START_EXCEPTION(program);
340 NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG) 338 NORMAL_EXCEPTION_PROLOG(0x700, PROLOG_ADDITION_1REG)
341 mfspr r14,SPRN_ESR 339 mfspr r14,SPRN_ESR
342 EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE_SOFT) 340 EXCEPTION_COMMON(0x700, PACA_EXGEN, INTS_DISABLE)
343 std r14,_DSISR(r1) 341 std r14,_DSISR(r1)
344 addi r3,r1,STACK_FRAME_OVERHEAD 342 addi r3,r1,STACK_FRAME_OVERHEAD
345 ld r14,PACA_EXGEN+EX_R14(r13) 343 ld r14,PACA_EXGEN+EX_R14(r13)
346 bl .save_nvgprs 344 bl .save_nvgprs
347 INTS_RESTORE_HARD
348 bl .program_check_exception 345 bl .program_check_exception
349 b .ret_from_except 346 b .ret_from_except
350 347
@@ -353,15 +350,16 @@ interrupt_end_book3e:
353 NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE) 350 NORMAL_EXCEPTION_PROLOG(0x800, PROLOG_ADDITION_NONE)
354 /* we can probably do a shorter exception entry for that one... */ 351 /* we can probably do a shorter exception entry for that one... */
355 EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP) 352 EXCEPTION_COMMON(0x800, PACA_EXGEN, INTS_KEEP)
356 bne 1f /* if from user, just load it up */ 353 ld r12,_MSR(r1)
354 andi. r0,r12,MSR_PR;
355 beq- 1f
356 bl .load_up_fpu
357 b fast_exception_return
3581: INTS_DISABLE
357 bl .save_nvgprs 359 bl .save_nvgprs
358 addi r3,r1,STACK_FRAME_OVERHEAD 360 addi r3,r1,STACK_FRAME_OVERHEAD
359 INTS_RESTORE_HARD
360 bl .kernel_fp_unavailable_exception 361 bl .kernel_fp_unavailable_exception
361 BUG_OPCODE 362 b .ret_from_except
3621: ld r12,_MSR(r1)
363 bl .load_up_fpu
364 b fast_exception_return
365 363
366/* Decrementer Interrupt */ 364/* Decrementer Interrupt */
367 MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC) 365 MASKABLE_EXCEPTION(0x900, decrementer, .timer_interrupt, ACK_DEC)
@@ -372,7 +370,7 @@ interrupt_end_book3e:
372/* Watchdog Timer Interrupt */ 370/* Watchdog Timer Interrupt */
373 START_EXCEPTION(watchdog); 371 START_EXCEPTION(watchdog);
374 CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE) 372 CRIT_EXCEPTION_PROLOG(0x9f0, PROLOG_ADDITION_NONE)
375// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE_ALL) 373// EXCEPTION_COMMON(0x9f0, PACA_EXCRIT, INTS_DISABLE)
376// bl special_reg_save_crit 374// bl special_reg_save_crit
377// CHECK_NAPPING(); 375// CHECK_NAPPING();
378// addi r3,r1,STACK_FRAME_OVERHEAD 376// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -391,10 +389,9 @@ interrupt_end_book3e:
391/* Auxiliary Processor Unavailable Interrupt */ 389/* Auxiliary Processor Unavailable Interrupt */
392 START_EXCEPTION(ap_unavailable); 390 START_EXCEPTION(ap_unavailable);
393 NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) 391 NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE)
394 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) 392 EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_DISABLE)
395 addi r3,r1,STACK_FRAME_OVERHEAD
396 bl .save_nvgprs 393 bl .save_nvgprs
397 INTS_RESTORE_HARD 394 addi r3,r1,STACK_FRAME_OVERHEAD
398 bl .unknown_exception 395 bl .unknown_exception
399 b .ret_from_except 396 b .ret_from_except
400 397
@@ -450,7 +447,7 @@ interrupt_end_book3e:
450 mfspr r15,SPRN_SPRG_CRIT_SCRATCH 447 mfspr r15,SPRN_SPRG_CRIT_SCRATCH
451 mtspr SPRN_SPRG_GEN_SCRATCH,r15 448 mtspr SPRN_SPRG_GEN_SCRATCH,r15
452 mfspr r14,SPRN_DBSR 449 mfspr r14,SPRN_DBSR
453 EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE_ALL) 450 EXCEPTION_COMMON(0xd00, PACA_EXCRIT, INTS_DISABLE)
454 std r14,_DSISR(r1) 451 std r14,_DSISR(r1)
455 addi r3,r1,STACK_FRAME_OVERHEAD 452 addi r3,r1,STACK_FRAME_OVERHEAD
456 mr r4,r14 453 mr r4,r14
@@ -465,7 +462,7 @@ kernel_dbg_exc:
465 462
466/* Debug exception as a debug interrupt*/ 463/* Debug exception as a debug interrupt*/
467 START_EXCEPTION(debug_debug); 464 START_EXCEPTION(debug_debug);
468 DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) 465 DBG_EXCEPTION_PROLOG(0xd08, PROLOG_ADDITION_2REGS)
469 466
470 /* 467 /*
471 * If there is a single step or branch-taken exception in an 468 * If there is a single step or branch-taken exception in an
@@ -515,7 +512,7 @@ kernel_dbg_exc:
515 mfspr r15,SPRN_SPRG_DBG_SCRATCH 512 mfspr r15,SPRN_SPRG_DBG_SCRATCH
516 mtspr SPRN_SPRG_GEN_SCRATCH,r15 513 mtspr SPRN_SPRG_GEN_SCRATCH,r15
517 mfspr r14,SPRN_DBSR 514 mfspr r14,SPRN_DBSR
518 EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL) 515 EXCEPTION_COMMON(0xd08, PACA_EXDBG, INTS_DISABLE)
519 std r14,_DSISR(r1) 516 std r14,_DSISR(r1)
520 addi r3,r1,STACK_FRAME_OVERHEAD 517 addi r3,r1,STACK_FRAME_OVERHEAD
521 mr r4,r14 518 mr r4,r14
@@ -525,21 +522,20 @@ kernel_dbg_exc:
525 bl .DebugException 522 bl .DebugException
526 b .ret_from_except 523 b .ret_from_except
527 524
528 MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE) 525 START_EXCEPTION(perfmon);
529 526 NORMAL_EXCEPTION_PROLOG(0x260, PROLOG_ADDITION_NONE)
530/* Doorbell interrupt */ 527 EXCEPTION_COMMON(0x260, PACA_EXGEN, INTS_DISABLE)
531 START_EXCEPTION(doorbell)
532 NORMAL_EXCEPTION_PROLOG(0x2070, PROLOG_ADDITION_DOORBELL)
533 EXCEPTION_COMMON(0x2070, PACA_EXGEN, INTS_DISABLE_ALL)
534 CHECK_NAPPING()
535 addi r3,r1,STACK_FRAME_OVERHEAD 528 addi r3,r1,STACK_FRAME_OVERHEAD
536 bl .doorbell_exception 529 bl .performance_monitor_exception
537 b .ret_from_except_lite 530 b .ret_from_except_lite
538 531
532/* Doorbell interrupt */
533 MASKABLE_EXCEPTION(0x280, doorbell, .doorbell_exception, ACK_NONE)
534
539/* Doorbell critical Interrupt */ 535/* Doorbell critical Interrupt */
540 START_EXCEPTION(doorbell_crit); 536 START_EXCEPTION(doorbell_crit);
541 CRIT_EXCEPTION_PROLOG(0x2080, PROLOG_ADDITION_NONE) 537 CRIT_EXCEPTION_PROLOG(0x2a0, PROLOG_ADDITION_NONE)
542// EXCEPTION_COMMON(0x2080, PACA_EXCRIT, INTS_DISABLE_ALL) 538// EXCEPTION_COMMON(0x2a0, PACA_EXCRIT, INTS_DISABLE)
543// bl special_reg_save_crit 539// bl special_reg_save_crit
544// CHECK_NAPPING(); 540// CHECK_NAPPING();
545// addi r3,r1,STACK_FRAME_OVERHEAD 541// addi r3,r1,STACK_FRAME_OVERHEAD
@@ -547,36 +543,114 @@ kernel_dbg_exc:
547// b ret_from_crit_except 543// b ret_from_crit_except
548 b . 544 b .
549 545
546/* Guest Doorbell */
550 MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) 547 MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE)
551 MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE)
552 MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE)
553 MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE)
554 548
549/* Guest Doorbell critical Interrupt */
550 START_EXCEPTION(guest_doorbell_crit);
551 CRIT_EXCEPTION_PROLOG(0x2e0, PROLOG_ADDITION_NONE)
552// EXCEPTION_COMMON(0x2e0, PACA_EXCRIT, INTS_DISABLE)
553// bl special_reg_save_crit
554// CHECK_NAPPING();
555// addi r3,r1,STACK_FRAME_OVERHEAD
556// bl .guest_doorbell_critical_exception
557// b ret_from_crit_except
558 b .
559
560/* Hypervisor call */
561 START_EXCEPTION(hypercall);
562 NORMAL_EXCEPTION_PROLOG(0x310, PROLOG_ADDITION_NONE)
563 EXCEPTION_COMMON(0x310, PACA_EXGEN, INTS_KEEP)
564 addi r3,r1,STACK_FRAME_OVERHEAD
565 bl .save_nvgprs
566 INTS_RESTORE_HARD
567 bl .unknown_exception
568 b .ret_from_except
569
570/* Embedded Hypervisor priviledged */
571 START_EXCEPTION(ehpriv);
572 NORMAL_EXCEPTION_PROLOG(0x320, PROLOG_ADDITION_NONE)
573 EXCEPTION_COMMON(0x320, PACA_EXGEN, INTS_KEEP)
574 addi r3,r1,STACK_FRAME_OVERHEAD
575 bl .save_nvgprs
576 INTS_RESTORE_HARD
577 bl .unknown_exception
578 b .ret_from_except
555 579
556/* 580/*
557 * An interrupt came in while soft-disabled; clear EE in SRR1, 581 * An interrupt came in while soft-disabled; We mark paca->irq_happened
558 * clear paca->hard_enabled and return. 582 * accordingly and if the interrupt is level sensitive, we hard disable
559 */ 583 */
560masked_doorbell_book3e:
561 mtcr r10
562 /* Resend the doorbell to fire again when ints enabled */
563 mfspr r10,SPRN_PIR
564 PPC_MSGSND(r10)
565 b masked_interrupt_book3e_common
566 584
567masked_interrupt_book3e: 585masked_interrupt_book3e_0x500:
586 /* XXX When adding support for EPR, use PACA_IRQ_EE_EDGE */
587 li r11,PACA_IRQ_EE
588 b masked_interrupt_book3e_full_mask
589
590masked_interrupt_book3e_0x900:
591 ACK_DEC(r11);
592 li r11,PACA_IRQ_DEC
593 b masked_interrupt_book3e_no_mask
594masked_interrupt_book3e_0x980:
595 ACK_FIT(r11);
596 li r11,PACA_IRQ_DEC
597 b masked_interrupt_book3e_no_mask
598masked_interrupt_book3e_0x280:
599masked_interrupt_book3e_0x2c0:
600 li r11,PACA_IRQ_DBELL
601 b masked_interrupt_book3e_no_mask
602
603masked_interrupt_book3e_no_mask:
604 mtcr r10
605 lbz r10,PACAIRQHAPPENED(r13)
606 or r10,r10,r11
607 stb r10,PACAIRQHAPPENED(r13)
608 b 1f
609masked_interrupt_book3e_full_mask:
568 mtcr r10 610 mtcr r10
569masked_interrupt_book3e_common: 611 lbz r10,PACAIRQHAPPENED(r13)
570 stb r11,PACAHARDIRQEN(r13) 612 or r10,r10,r11
613 stb r10,PACAIRQHAPPENED(r13)
571 mfspr r10,SPRN_SRR1 614 mfspr r10,SPRN_SRR1
572 rldicl r11,r10,48,1 /* clear MSR_EE */ 615 rldicl r11,r10,48,1 /* clear MSR_EE */
573 rotldi r10,r11,16 616 rotldi r10,r11,16
574 mtspr SPRN_SRR1,r10 617 mtspr SPRN_SRR1,r10
575 ld r10,PACA_EXGEN+EX_R10(r13); /* restore registers */ 6181: ld r10,PACA_EXGEN+EX_R10(r13);
576 ld r11,PACA_EXGEN+EX_R11(r13); 619 ld r11,PACA_EXGEN+EX_R11(r13);
577 mfspr r13,SPRN_SPRG_GEN_SCRATCH; 620 mfspr r13,SPRN_SPRG_GEN_SCRATCH;
578 rfi 621 rfi
579 b . 622 b .
623/*
624 * Called from arch_local_irq_enable when an interrupt needs
625 * to be resent. r3 contains either 0x500,0x900,0x260 or 0x280
626 * to indicate the kind of interrupt. MSR:EE is already off.
627 * We generate a stackframe like if a real interrupt had happened.
628 *
629 * Note: While MSR:EE is off, we need to make sure that _MSR
630 * in the generated frame has EE set to 1 or the exception
631 * handler will not properly re-enable them.
632 */
633_GLOBAL(__replay_interrupt)
634 /* We are going to jump to the exception common code which
635 * will retrieve various register values from the PACA which
636 * we don't give a damn about.
637 */
638 mflr r10
639 mfmsr r11
640 mfcr r4
641 mtspr SPRN_SPRG_GEN_SCRATCH,r13;
642 std r1,PACA_EXGEN+EX_R1(r13);
643 stw r4,PACA_EXGEN+EX_CR(r13);
644 ori r11,r11,MSR_EE
645 subi r1,r1,INT_FRAME_SIZE;
646 cmpwi cr0,r3,0x500
647 beq exc_0x500_common
648 cmpwi cr0,r3,0x900
649 beq exc_0x900_common
650 cmpwi cr0,r3,0x280
651 beq exc_0x280_common
652 blr
653
580 654
581/* 655/*
582 * This is called from 0x300 and 0x400 handlers after the prologs with 656 * This is called from 0x300 and 0x400 handlers after the prologs with
@@ -591,7 +665,6 @@ storage_fault_common:
591 mr r5,r15 665 mr r5,r15
592 ld r14,PACA_EXGEN+EX_R14(r13) 666 ld r14,PACA_EXGEN+EX_R14(r13)
593 ld r15,PACA_EXGEN+EX_R15(r13) 667 ld r15,PACA_EXGEN+EX_R15(r13)
594 INTS_RESTORE_HARD
595 bl .do_page_fault 668 bl .do_page_fault
596 cmpdi r3,0 669 cmpdi r3,0
597 bne- 1f 670 bne- 1f
@@ -680,6 +753,8 @@ BAD_STACK_TRAMPOLINE(0x000)
680BAD_STACK_TRAMPOLINE(0x100) 753BAD_STACK_TRAMPOLINE(0x100)
681BAD_STACK_TRAMPOLINE(0x200) 754BAD_STACK_TRAMPOLINE(0x200)
682BAD_STACK_TRAMPOLINE(0x260) 755BAD_STACK_TRAMPOLINE(0x260)
756BAD_STACK_TRAMPOLINE(0x280)
757BAD_STACK_TRAMPOLINE(0x2a0)
683BAD_STACK_TRAMPOLINE(0x2c0) 758BAD_STACK_TRAMPOLINE(0x2c0)
684BAD_STACK_TRAMPOLINE(0x2e0) 759BAD_STACK_TRAMPOLINE(0x2e0)
685BAD_STACK_TRAMPOLINE(0x300) 760BAD_STACK_TRAMPOLINE(0x300)
@@ -697,11 +772,10 @@ BAD_STACK_TRAMPOLINE(0xa00)
697BAD_STACK_TRAMPOLINE(0xb00) 772BAD_STACK_TRAMPOLINE(0xb00)
698BAD_STACK_TRAMPOLINE(0xc00) 773BAD_STACK_TRAMPOLINE(0xc00)
699BAD_STACK_TRAMPOLINE(0xd00) 774BAD_STACK_TRAMPOLINE(0xd00)
775BAD_STACK_TRAMPOLINE(0xd08)
700BAD_STACK_TRAMPOLINE(0xe00) 776BAD_STACK_TRAMPOLINE(0xe00)
701BAD_STACK_TRAMPOLINE(0xf00) 777BAD_STACK_TRAMPOLINE(0xf00)
702BAD_STACK_TRAMPOLINE(0xf20) 778BAD_STACK_TRAMPOLINE(0xf20)
703BAD_STACK_TRAMPOLINE(0x2070)
704BAD_STACK_TRAMPOLINE(0x2080)
705 779
706 .globl bad_stack_book3e 780 .globl bad_stack_book3e
707bad_stack_book3e: 781bad_stack_book3e:
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index d4be7bb3dbdf..cb705fdbb458 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -12,6 +12,7 @@
12 * 12 *
13 */ 13 */
14 14
15#include <asm/hw_irq.h>
15#include <asm/exception-64s.h> 16#include <asm/exception-64s.h>
16#include <asm/ptrace.h> 17#include <asm/ptrace.h>
17 18
@@ -19,7 +20,7 @@
19 * We layout physical memory as follows: 20 * We layout physical memory as follows:
20 * 0x0000 - 0x00ff : Secondary processor spin code 21 * 0x0000 - 0x00ff : Secondary processor spin code
21 * 0x0100 - 0x2fff : pSeries Interrupt prologs 22 * 0x0100 - 0x2fff : pSeries Interrupt prologs
22 * 0x3000 - 0x5fff : interrupt support, iSeries and common interrupt prologs 23 * 0x3000 - 0x5fff : interrupt support common interrupt prologs
23 * 0x6000 - 0x6fff : Initial (CPU0) segment table 24 * 0x6000 - 0x6fff : Initial (CPU0) segment table
24 * 0x7000 - 0x7fff : FWNMI data area 25 * 0x7000 - 0x7fff : FWNMI data area
25 * 0x8000 - : Early init and support code 26 * 0x8000 - : Early init and support code
@@ -100,14 +101,14 @@ data_access_not_stab:
100END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) 101END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
101#endif 102#endif
102 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD, 103 EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
103 KVMTEST_PR, 0x300) 104 KVMTEST, 0x300)
104 105
105 . = 0x380 106 . = 0x380
106 .globl data_access_slb_pSeries 107 .globl data_access_slb_pSeries
107data_access_slb_pSeries: 108data_access_slb_pSeries:
108 HMT_MEDIUM 109 HMT_MEDIUM
109 SET_SCRATCH0(r13) 110 SET_SCRATCH0(r13)
110 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x380) 111 EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
111 std r3,PACA_EXSLB+EX_R3(r13) 112 std r3,PACA_EXSLB+EX_R3(r13)
112 mfspr r3,SPRN_DAR 113 mfspr r3,SPRN_DAR
113#ifdef __DISABLED__ 114#ifdef __DISABLED__
@@ -329,8 +330,8 @@ do_stab_bolted_pSeries:
329 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) 330 EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD)
330#endif /* CONFIG_POWER4_ONLY */ 331#endif /* CONFIG_POWER4_ONLY */
331 332
332 KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x300) 333 KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
333 KVM_HANDLER_PR_SKIP(PACA_EXSLB, EXC_STD, 0x380) 334 KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
334 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400) 335 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
335 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480) 336 KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
336 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900) 337 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
@@ -356,34 +357,60 @@ do_stab_bolted_pSeries:
356 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40) 357 KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
357 358
358/* 359/*
359 * An interrupt came in while soft-disabled; clear EE in SRR1, 360 * An interrupt came in while soft-disabled. We set paca->irq_happened,
360 * clear paca->hard_enabled and return. 361 * then, if it was a decrementer interrupt, we bump the dec to max and
362 * and return, else we hard disable and return. This is called with
363 * r10 containing the value to OR to the paca field.
361 */ 364 */
362masked_interrupt: 365#define MASKED_INTERRUPT(_H) \
363 stb r10,PACAHARDIRQEN(r13) 366masked_##_H##interrupt: \
364 mtcrf 0x80,r9 367 std r11,PACA_EXGEN+EX_R11(r13); \
365 ld r9,PACA_EXGEN+EX_R9(r13) 368 lbz r11,PACAIRQHAPPENED(r13); \
366 mfspr r10,SPRN_SRR1 369 or r11,r11,r10; \
367 rldicl r10,r10,48,1 /* clear MSR_EE */ 370 stb r11,PACAIRQHAPPENED(r13); \
368 rotldi r10,r10,16 371 andi. r10,r10,PACA_IRQ_DEC; \
369 mtspr SPRN_SRR1,r10 372 beq 1f; \
370 ld r10,PACA_EXGEN+EX_R10(r13) 373 lis r10,0x7fff; \
371 GET_SCRATCH0(r13) 374 ori r10,r10,0xffff; \
372 rfid 375 mtspr SPRN_DEC,r10; \
376 b 2f; \
3771: mfspr r10,SPRN_##_H##SRR1; \
378 rldicl r10,r10,48,1; /* clear MSR_EE */ \
379 rotldi r10,r10,16; \
380 mtspr SPRN_##_H##SRR1,r10; \
3812: mtcrf 0x80,r9; \
382 ld r9,PACA_EXGEN+EX_R9(r13); \
383 ld r10,PACA_EXGEN+EX_R10(r13); \
384 ld r11,PACA_EXGEN+EX_R11(r13); \
385 GET_SCRATCH0(r13); \
386 ##_H##rfid; \
373 b . 387 b .
388
389 MASKED_INTERRUPT()
390 MASKED_INTERRUPT(H)
374 391
375masked_Hinterrupt: 392/*
376 stb r10,PACAHARDIRQEN(r13) 393 * Called from arch_local_irq_enable when an interrupt needs
377 mtcrf 0x80,r9 394 * to be resent. r3 contains 0x500 or 0x900 to indicate which
378 ld r9,PACA_EXGEN+EX_R9(r13) 395 * kind of interrupt. MSR:EE is already off. We generate a
379 mfspr r10,SPRN_HSRR1 396 * stackframe like if a real interrupt had happened.
380 rldicl r10,r10,48,1 /* clear MSR_EE */ 397 *
381 rotldi r10,r10,16 398 * Note: While MSR:EE is off, we need to make sure that _MSR
382 mtspr SPRN_HSRR1,r10 399 * in the generated frame has EE set to 1 or the exception
383 ld r10,PACA_EXGEN+EX_R10(r13) 400 * handler will not properly re-enable them.
384 GET_SCRATCH0(r13) 401 */
385 hrfid 402_GLOBAL(__replay_interrupt)
386 b . 403 /* We are going to jump to the exception common code which
404 * will retrieve various register values from the PACA which
405 * we don't give a damn about, so we don't bother storing them.
406 */
407 mfmsr r12
408 mflr r11
409 mfcr r9
410 ori r12,r12,MSR_EE
411 andi. r3,r3,0x0800
412 bne decrementer_common
413 b hardware_interrupt_common
387 414
388#ifdef CONFIG_PPC_PSERIES 415#ifdef CONFIG_PPC_PSERIES
389/* 416/*
@@ -458,14 +485,15 @@ machine_check_common:
458 bl .machine_check_exception 485 bl .machine_check_exception
459 b .ret_from_except 486 b .ret_from_except
460 487
461 STD_EXCEPTION_COMMON_LITE(0x900, decrementer, .timer_interrupt) 488 STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
489 STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, .timer_interrupt)
462 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception) 490 STD_EXCEPTION_COMMON(0xa00, trap_0a, .unknown_exception)
463 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) 491 STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception)
464 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) 492 STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception)
465 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) 493 STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception)
466 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) 494 STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception)
467 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) 495 STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception)
468 STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) 496 STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, .performance_monitor_exception)
469 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) 497 STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception)
470#ifdef CONFIG_ALTIVEC 498#ifdef CONFIG_ALTIVEC
471 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception) 499 STD_EXCEPTION_COMMON(0x1700, altivec_assist, .altivec_assist_exception)
@@ -482,6 +510,9 @@ machine_check_common:
482system_call_entry: 510system_call_entry:
483 b system_call_common 511 b system_call_common
484 512
513ppc64_runlatch_on_trampoline:
514 b .__ppc64_runlatch_on
515
485/* 516/*
486 * Here we have detected that the kernel stack pointer is bad. 517 * Here we have detected that the kernel stack pointer is bad.
487 * R9 contains the saved CR, r13 points to the paca, 518 * R9 contains the saved CR, r13 points to the paca,
@@ -555,6 +586,8 @@ data_access_common:
555 mfspr r10,SPRN_DSISR 586 mfspr r10,SPRN_DSISR
556 stw r10,PACA_EXGEN+EX_DSISR(r13) 587 stw r10,PACA_EXGEN+EX_DSISR(r13)
557 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN) 588 EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
589 DISABLE_INTS
590 ld r12,_MSR(r1)
558 ld r3,PACA_EXGEN+EX_DAR(r13) 591 ld r3,PACA_EXGEN+EX_DAR(r13)
559 lwz r4,PACA_EXGEN+EX_DSISR(r13) 592 lwz r4,PACA_EXGEN+EX_DSISR(r13)
560 li r5,0x300 593 li r5,0x300
@@ -569,6 +602,7 @@ h_data_storage_common:
569 stw r10,PACA_EXGEN+EX_DSISR(r13) 602 stw r10,PACA_EXGEN+EX_DSISR(r13)
570 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) 603 EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
571 bl .save_nvgprs 604 bl .save_nvgprs
605 DISABLE_INTS
572 addi r3,r1,STACK_FRAME_OVERHEAD 606 addi r3,r1,STACK_FRAME_OVERHEAD
573 bl .unknown_exception 607 bl .unknown_exception
574 b .ret_from_except 608 b .ret_from_except
@@ -577,6 +611,8 @@ h_data_storage_common:
577 .globl instruction_access_common 611 .globl instruction_access_common
578instruction_access_common: 612instruction_access_common:
579 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN) 613 EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
614 DISABLE_INTS
615 ld r12,_MSR(r1)
580 ld r3,_NIP(r1) 616 ld r3,_NIP(r1)
581 andis. r4,r12,0x5820 617 andis. r4,r12,0x5820
582 li r5,0x400 618 li r5,0x400
@@ -672,12 +708,6 @@ _GLOBAL(slb_miss_realmode)
672 ld r10,PACA_EXSLB+EX_LR(r13) 708 ld r10,PACA_EXSLB+EX_LR(r13)
673 ld r3,PACA_EXSLB+EX_R3(r13) 709 ld r3,PACA_EXSLB+EX_R3(r13)
674 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */ 710 lwz r9,PACA_EXSLB+EX_CCR(r13) /* get saved CR */
675#ifdef CONFIG_PPC_ISERIES
676BEGIN_FW_FTR_SECTION
677 ld r11,PACALPPACAPTR(r13)
678 ld r11,LPPACASRR0(r11) /* get SRR0 value */
679END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
680#endif /* CONFIG_PPC_ISERIES */
681 711
682 mtlr r10 712 mtlr r10
683 713
@@ -690,12 +720,6 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
690 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */ 720 mtcrf 0x01,r9 /* slb_allocate uses cr0 and cr7 */
691.machine pop 721.machine pop
692 722
693#ifdef CONFIG_PPC_ISERIES
694BEGIN_FW_FTR_SECTION
695 mtspr SPRN_SRR0,r11
696 mtspr SPRN_SRR1,r12
697END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
698#endif /* CONFIG_PPC_ISERIES */
699 ld r9,PACA_EXSLB+EX_R9(r13) 723 ld r9,PACA_EXSLB+EX_R9(r13)
700 ld r10,PACA_EXSLB+EX_R10(r13) 724 ld r10,PACA_EXSLB+EX_R10(r13)
701 ld r11,PACA_EXSLB+EX_R11(r13) 725 ld r11,PACA_EXSLB+EX_R11(r13)
@@ -704,13 +728,7 @@ END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
704 rfid 728 rfid
705 b . /* prevent speculative execution */ 729 b . /* prevent speculative execution */
706 730
7072: 7312: mfspr r11,SPRN_SRR0
708#ifdef CONFIG_PPC_ISERIES
709BEGIN_FW_FTR_SECTION
710 b unrecov_slb
711END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
712#endif /* CONFIG_PPC_ISERIES */
713 mfspr r11,SPRN_SRR0
714 ld r10,PACAKBASE(r13) 732 ld r10,PACAKBASE(r13)
715 LOAD_HANDLER(r10,unrecov_slb) 733 LOAD_HANDLER(r10,unrecov_slb)
716 mtspr SPRN_SRR0,r10 734 mtspr SPRN_SRR0,r10
@@ -727,20 +745,6 @@ unrecov_slb:
727 bl .unrecoverable_exception 745 bl .unrecoverable_exception
728 b 1b 746 b 1b
729 747
730 .align 7
731 .globl hardware_interrupt_common
732 .globl hardware_interrupt_entry
733hardware_interrupt_common:
734 EXCEPTION_PROLOG_COMMON(0x500, PACA_EXGEN)
735 FINISH_NAP
736hardware_interrupt_entry:
737 DISABLE_INTS
738BEGIN_FTR_SECTION
739 bl .ppc64_runlatch_on
740END_FTR_SECTION_IFSET(CPU_FTR_CTRL)
741 addi r3,r1,STACK_FRAME_OVERHEAD
742 bl .do_IRQ
743 b .ret_from_except_lite
744 748
745#ifdef CONFIG_PPC_970_NAP 749#ifdef CONFIG_PPC_970_NAP
746power4_fixup_nap: 750power4_fixup_nap:
@@ -774,8 +778,8 @@ alignment_common:
774program_check_common: 778program_check_common:
775 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN) 779 EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
776 bl .save_nvgprs 780 bl .save_nvgprs
781 DISABLE_INTS
777 addi r3,r1,STACK_FRAME_OVERHEAD 782 addi r3,r1,STACK_FRAME_OVERHEAD
778 ENABLE_INTS
779 bl .program_check_exception 783 bl .program_check_exception
780 b .ret_from_except 784 b .ret_from_except
781 785
@@ -785,8 +789,8 @@ fp_unavailable_common:
785 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN) 789 EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
786 bne 1f /* if from user, just load it up */ 790 bne 1f /* if from user, just load it up */
787 bl .save_nvgprs 791 bl .save_nvgprs
792 DISABLE_INTS
788 addi r3,r1,STACK_FRAME_OVERHEAD 793 addi r3,r1,STACK_FRAME_OVERHEAD
789 ENABLE_INTS
790 bl .kernel_fp_unavailable_exception 794 bl .kernel_fp_unavailable_exception
791 BUG_OPCODE 795 BUG_OPCODE
7921: bl .load_up_fpu 7961: bl .load_up_fpu
@@ -805,8 +809,8 @@ BEGIN_FTR_SECTION
805END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 809END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
806#endif 810#endif
807 bl .save_nvgprs 811 bl .save_nvgprs
812 DISABLE_INTS
808 addi r3,r1,STACK_FRAME_OVERHEAD 813 addi r3,r1,STACK_FRAME_OVERHEAD
809 ENABLE_INTS
810 bl .altivec_unavailable_exception 814 bl .altivec_unavailable_exception
811 b .ret_from_except 815 b .ret_from_except
812 816
@@ -816,13 +820,14 @@ vsx_unavailable_common:
816 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN) 820 EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
817#ifdef CONFIG_VSX 821#ifdef CONFIG_VSX
818BEGIN_FTR_SECTION 822BEGIN_FTR_SECTION
819 bne .load_up_vsx 823 beq 1f
824 b .load_up_vsx
8201: 8251:
821END_FTR_SECTION_IFSET(CPU_FTR_VSX) 826END_FTR_SECTION_IFSET(CPU_FTR_VSX)
822#endif 827#endif
823 bl .save_nvgprs 828 bl .save_nvgprs
829 DISABLE_INTS
824 addi r3,r1,STACK_FRAME_OVERHEAD 830 addi r3,r1,STACK_FRAME_OVERHEAD
825 ENABLE_INTS
826 bl .vsx_unavailable_exception 831 bl .vsx_unavailable_exception
827 b .ret_from_except 832 b .ret_from_except
828 833
@@ -831,66 +836,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX)
831__end_handlers: 836__end_handlers:
832 837
833/* 838/*
834 * Return from an exception with minimal checks.
835 * The caller is assumed to have done EXCEPTION_PROLOG_COMMON.
836 * If interrupts have been enabled, or anything has been
837 * done that might have changed the scheduling status of
838 * any task or sent any task a signal, you should use
839 * ret_from_except or ret_from_except_lite instead of this.
840 */
841fast_exc_return_irq: /* restores irq state too */
842 ld r3,SOFTE(r1)
843 TRACE_AND_RESTORE_IRQ(r3);
844 ld r12,_MSR(r1)
845 rldicl r4,r12,49,63 /* get MSR_EE to LSB */
846 stb r4,PACAHARDIRQEN(r13) /* restore paca->hard_enabled */
847 b 1f
848
849 .globl fast_exception_return
850fast_exception_return:
851 ld r12,_MSR(r1)
8521: ld r11,_NIP(r1)
853 andi. r3,r12,MSR_RI /* check if RI is set */
854 beq- unrecov_fer
855
856#ifdef CONFIG_VIRT_CPU_ACCOUNTING
857 andi. r3,r12,MSR_PR
858 beq 2f
859 ACCOUNT_CPU_USER_EXIT(r3, r4)
8602:
861#endif
862
863 ld r3,_CCR(r1)
864 ld r4,_LINK(r1)
865 ld r5,_CTR(r1)
866 ld r6,_XER(r1)
867 mtcr r3
868 mtlr r4
869 mtctr r5
870 mtxer r6
871 REST_GPR(0, r1)
872 REST_8GPRS(2, r1)
873
874 mfmsr r10
875 rldicl r10,r10,48,1 /* clear EE */
876 rldicr r10,r10,16,61 /* clear RI (LE is 0 already) */
877 mtmsrd r10,1
878
879 mtspr SPRN_SRR1,r12
880 mtspr SPRN_SRR0,r11
881 REST_4GPRS(10, r1)
882 ld r1,GPR1(r1)
883 rfid
884 b . /* prevent speculative execution */
885
886unrecov_fer:
887 bl .save_nvgprs
8881: addi r3,r1,STACK_FRAME_OVERHEAD
889 bl .unrecoverable_exception
890 b 1b
891
892
893/*
894 * Hash table stuff 839 * Hash table stuff
895 */ 840 */
896 .align 7 841 .align 7
@@ -912,28 +857,6 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
912 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ 857 lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */
913 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */ 858 andis. r0,r0,NMI_MASK@h /* (i.e. an irq when soft-disabled) */
914 bne 77f /* then don't call hash_page now */ 859 bne 77f /* then don't call hash_page now */
915
916 /*
917 * On iSeries, we soft-disable interrupts here, then
918 * hard-enable interrupts so that the hash_page code can spin on
919 * the hash_table_lock without problems on a shared processor.
920 */
921 DISABLE_INTS
922
923 /*
924 * Currently, trace_hardirqs_off() will be called by DISABLE_INTS
925 * and will clobber volatile registers when irq tracing is enabled
926 * so we need to reload them. It may be possible to be smarter here
927 * and move the irq tracing elsewhere but let's keep it simple for
928 * now
929 */
930#ifdef CONFIG_TRACE_IRQFLAGS
931 ld r3,_DAR(r1)
932 ld r4,_DSISR(r1)
933 ld r5,_TRAP(r1)
934 ld r12,_MSR(r1)
935 clrrdi r5,r5,4
936#endif /* CONFIG_TRACE_IRQFLAGS */
937 /* 860 /*
938 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are 861 * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
939 * accessing a userspace segment (even from the kernel). We assume 862 * accessing a userspace segment (even from the kernel). We assume
@@ -951,62 +874,25 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
951 * r4 contains the required access permissions 874 * r4 contains the required access permissions
952 * r5 contains the trap number 875 * r5 contains the trap number
953 * 876 *
954 * at return r3 = 0 for success 877 * at return r3 = 0 for success, 1 for page fault, negative for error
955 */ 878 */
956 bl .hash_page /* build HPTE if possible */ 879 bl .hash_page /* build HPTE if possible */
957 cmpdi r3,0 /* see if hash_page succeeded */ 880 cmpdi r3,0 /* see if hash_page succeeded */
958 881
959BEGIN_FW_FTR_SECTION 882 /* Success */
960 /*
961 * If we had interrupts soft-enabled at the point where the
962 * DSI/ISI occurred, and an interrupt came in during hash_page,
963 * handle it now.
964 * We jump to ret_from_except_lite rather than fast_exception_return
965 * because ret_from_except_lite will check for and handle pending
966 * interrupts if necessary.
967 */
968 beq 13f
969END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
970
971BEGIN_FW_FTR_SECTION
972 /*
973 * Here we have interrupts hard-disabled, so it is sufficient
974 * to restore paca->{soft,hard}_enable and get out.
975 */
976 beq fast_exc_return_irq /* Return from exception on success */ 883 beq fast_exc_return_irq /* Return from exception on success */
977END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
978 884
979 /* For a hash failure, we don't bother re-enabling interrupts */ 885 /* Error */
980 ble- 12f 886 blt- 13f
981
982 /*
983 * hash_page couldn't handle it, set soft interrupt enable back
984 * to what it was before the trap. Note that .arch_local_irq_restore
985 * handles any interrupts pending at this point.
986 */
987 ld r3,SOFTE(r1)
988 TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f)
989 bl .arch_local_irq_restore
990 b 11f
991
992/* We have a data breakpoint exception - handle it */
993handle_dabr_fault:
994 bl .save_nvgprs
995 ld r4,_DAR(r1)
996 ld r5,_DSISR(r1)
997 addi r3,r1,STACK_FRAME_OVERHEAD
998 bl .do_dabr
999 b .ret_from_except_lite
1000 887
1001/* Here we have a page fault that hash_page can't handle. */ 888/* Here we have a page fault that hash_page can't handle. */
1002handle_page_fault: 889handle_page_fault:
1003 ENABLE_INTS
100411: ld r4,_DAR(r1) 89011: ld r4,_DAR(r1)
1005 ld r5,_DSISR(r1) 891 ld r5,_DSISR(r1)
1006 addi r3,r1,STACK_FRAME_OVERHEAD 892 addi r3,r1,STACK_FRAME_OVERHEAD
1007 bl .do_page_fault 893 bl .do_page_fault
1008 cmpdi r3,0 894 cmpdi r3,0
1009 beq+ 13f 895 beq+ 12f
1010 bl .save_nvgprs 896 bl .save_nvgprs
1011 mr r5,r3 897 mr r5,r3
1012 addi r3,r1,STACK_FRAME_OVERHEAD 898 addi r3,r1,STACK_FRAME_OVERHEAD
@@ -1014,12 +900,20 @@ handle_page_fault:
1014 bl .bad_page_fault 900 bl .bad_page_fault
1015 b .ret_from_except 901 b .ret_from_except
1016 902
101713: b .ret_from_except_lite 903/* We have a data breakpoint exception - handle it */
904handle_dabr_fault:
905 bl .save_nvgprs
906 ld r4,_DAR(r1)
907 ld r5,_DSISR(r1)
908 addi r3,r1,STACK_FRAME_OVERHEAD
909 bl .do_dabr
91012: b .ret_from_except_lite
911
1018 912
1019/* We have a page fault that hash_page could handle but HV refused 913/* We have a page fault that hash_page could handle but HV refused
1020 * the PTE insertion 914 * the PTE insertion
1021 */ 915 */
102212: bl .save_nvgprs 91613: bl .save_nvgprs
1023 mr r5,r3 917 mr r5,r3
1024 addi r3,r1,STACK_FRAME_OVERHEAD 918 addi r3,r1,STACK_FRAME_OVERHEAD
1025 ld r4,_DAR(r1) 919 ld r4,_DAR(r1)
@@ -1141,51 +1035,19 @@ _GLOBAL(do_stab_bolted)
1141 .= 0x7000 1035 .= 0x7000
1142 .globl fwnmi_data_area 1036 .globl fwnmi_data_area
1143fwnmi_data_area: 1037fwnmi_data_area:
1144#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1145 1038
1146 /* iSeries does not use the FWNMI stuff, so it is safe to put
1147 * this here, even if we later allow kernels that will boot on
1148 * both pSeries and iSeries */
1149#ifdef CONFIG_PPC_ISERIES
1150 . = LPARMAP_PHYS
1151 .globl xLparMap
1152xLparMap:
1153 .quad HvEsidsToMap /* xNumberEsids */
1154 .quad HvRangesToMap /* xNumberRanges */
1155 .quad STAB0_PAGE /* xSegmentTableOffs */
1156 .zero 40 /* xRsvd */
1157 /* xEsids (HvEsidsToMap entries of 2 quads) */
1158 .quad PAGE_OFFSET_ESID /* xKernelEsid */
1159 .quad PAGE_OFFSET_VSID /* xKernelVsid */
1160 .quad VMALLOC_START_ESID /* xKernelEsid */
1161 .quad VMALLOC_START_VSID /* xKernelVsid */
1162 /* xRanges (HvRangesToMap entries of 3 quads) */
1163 .quad HvPagesToMap /* xPages */
1164 .quad 0 /* xOffset */
1165 .quad PAGE_OFFSET_VSID << (SID_SHIFT - HW_PAGE_SHIFT) /* xVPN */
1166
1167#endif /* CONFIG_PPC_ISERIES */
1168
1169#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1170 /* pseries and powernv need to keep the whole page from 1039 /* pseries and powernv need to keep the whole page from
1171 * 0x7000 to 0x8000 free for use by the firmware 1040 * 0x7000 to 0x8000 free for use by the firmware
1172 */ 1041 */
1173 . = 0x8000 1042 . = 0x8000
1174#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */ 1043#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1175 1044
1176/* 1045/* Space for CPU0's segment table */
1177 * Space for CPU0's segment table. 1046 .balign 4096
1178 *
1179 * On iSeries, the hypervisor must fill in at least one entry before
1180 * we get control (with relocate on). The address is given to the hv
1181 * as a page number (see xLparMap above), so this must be at a
1182 * fixed address (the linker can't compute (u64)&initial_stab >>
1183 * PAGE_SHIFT).
1184 */
1185 . = STAB0_OFFSET /* 0x8000 */
1186 .globl initial_stab 1047 .globl initial_stab
1187initial_stab: 1048initial_stab:
1188 .space 4096 1049 .space 4096
1050
1189#ifdef CONFIG_PPC_POWERNV 1051#ifdef CONFIG_PPC_POWERNV
1190_GLOBAL(opal_mc_secondary_handler) 1052_GLOBAL(opal_mc_secondary_handler)
1191 HMT_MEDIUM 1053 HMT_MEDIUM
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
new file mode 100644
index 000000000000..cfe7a38708c3
--- /dev/null
+++ b/arch/powerpc/kernel/fadump.c
@@ -0,0 +1,1315 @@
1/*
2 * Firmware Assisted dump: A robust mechanism to get reliable kernel crash
3 * dump with assistance from firmware. This approach does not use kexec,
4 * instead firmware assists in booting the kdump kernel while preserving
5 * memory contents. The most of the code implementation has been adapted
6 * from phyp assisted dump implementation written by Linas Vepstas and
7 * Manish Ahuja
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 *
23 * Copyright 2011 IBM Corporation
24 * Author: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com>
25 */
26
27#undef DEBUG
28#define pr_fmt(fmt) "fadump: " fmt
29
30#include <linux/string.h>
31#include <linux/memblock.h>
32#include <linux/delay.h>
33#include <linux/debugfs.h>
34#include <linux/seq_file.h>
35#include <linux/crash_dump.h>
36#include <linux/kobject.h>
37#include <linux/sysfs.h>
38
39#include <asm/page.h>
40#include <asm/prom.h>
41#include <asm/rtas.h>
42#include <asm/fadump.h>
43
44static struct fw_dump fw_dump;
45static struct fadump_mem_struct fdm;
46static const struct fadump_mem_struct *fdm_active;
47
48static DEFINE_MUTEX(fadump_mutex);
49struct fad_crash_memory_ranges crash_memory_ranges[INIT_CRASHMEM_RANGES];
50int crash_mem_ranges;
51
52/* Scan the Firmware Assisted dump configuration details. */
53int __init early_init_dt_scan_fw_dump(unsigned long node,
54 const char *uname, int depth, void *data)
55{
56 __be32 *sections;
57 int i, num_sections;
58 unsigned long size;
59 const int *token;
60
61 if (depth != 1 || strcmp(uname, "rtas") != 0)
62 return 0;
63
64 /*
65 * Check if Firmware Assisted dump is supported. if yes, check
66 * if dump has been initiated on last reboot.
67 */
68 token = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL);
69 if (!token)
70 return 0;
71
72 fw_dump.fadump_supported = 1;
73 fw_dump.ibm_configure_kernel_dump = *token;
74
75 /*
76 * The 'ibm,kernel-dump' rtas node is present only if there is
77 * dump data waiting for us.
78 */
79 fdm_active = of_get_flat_dt_prop(node, "ibm,kernel-dump", NULL);
80 if (fdm_active)
81 fw_dump.dump_active = 1;
82
83 /* Get the sizes required to store dump data for the firmware provided
84 * dump sections.
85 * For each dump section type supported, a 32bit cell which defines
86 * the ID of a supported section followed by two 32 bit cells which
87 * gives teh size of the section in bytes.
88 */
89 sections = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes",
90 &size);
91
92 if (!sections)
93 return 0;
94
95 num_sections = size / (3 * sizeof(u32));
96
97 for (i = 0; i < num_sections; i++, sections += 3) {
98 u32 type = (u32)of_read_number(sections, 1);
99
100 switch (type) {
101 case FADUMP_CPU_STATE_DATA:
102 fw_dump.cpu_state_data_size =
103 of_read_ulong(&sections[1], 2);
104 break;
105 case FADUMP_HPTE_REGION:
106 fw_dump.hpte_region_size =
107 of_read_ulong(&sections[1], 2);
108 break;
109 }
110 }
111 return 1;
112}
113
114int is_fadump_active(void)
115{
116 return fw_dump.dump_active;
117}
118
119/* Print firmware assisted dump configurations for debugging purpose. */
120static void fadump_show_config(void)
121{
122 pr_debug("Support for firmware-assisted dump (fadump): %s\n",
123 (fw_dump.fadump_supported ? "present" : "no support"));
124
125 if (!fw_dump.fadump_supported)
126 return;
127
128 pr_debug("Fadump enabled : %s\n",
129 (fw_dump.fadump_enabled ? "yes" : "no"));
130 pr_debug("Dump Active : %s\n",
131 (fw_dump.dump_active ? "yes" : "no"));
132 pr_debug("Dump section sizes:\n");
133 pr_debug(" CPU state data size: %lx\n", fw_dump.cpu_state_data_size);
134 pr_debug(" HPTE region size : %lx\n", fw_dump.hpte_region_size);
135 pr_debug("Boot memory size : %lx\n", fw_dump.boot_memory_size);
136}
137
138static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
139 unsigned long addr)
140{
141 if (!fdm)
142 return 0;
143
144 memset(fdm, 0, sizeof(struct fadump_mem_struct));
145 addr = addr & PAGE_MASK;
146
147 fdm->header.dump_format_version = 0x00000001;
148 fdm->header.dump_num_sections = 3;
149 fdm->header.dump_status_flag = 0;
150 fdm->header.offset_first_dump_section =
151 (u32)offsetof(struct fadump_mem_struct, cpu_state_data);
152
153 /*
154 * Fields for disk dump option.
155 * We are not using disk dump option, hence set these fields to 0.
156 */
157 fdm->header.dd_block_size = 0;
158 fdm->header.dd_block_offset = 0;
159 fdm->header.dd_num_blocks = 0;
160 fdm->header.dd_offset_disk_path = 0;
161
162 /* set 0 to disable an automatic dump-reboot. */
163 fdm->header.max_time_auto = 0;
164
165 /* Kernel dump sections */
166 /* cpu state data section. */
167 fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG;
168 fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA;
169 fdm->cpu_state_data.source_address = 0;
170 fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size;
171 fdm->cpu_state_data.destination_address = addr;
172 addr += fw_dump.cpu_state_data_size;
173
174 /* hpte region section */
175 fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG;
176 fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION;
177 fdm->hpte_region.source_address = 0;
178 fdm->hpte_region.source_len = fw_dump.hpte_region_size;
179 fdm->hpte_region.destination_address = addr;
180 addr += fw_dump.hpte_region_size;
181
182 /* RMA region section */
183 fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG;
184 fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION;
185 fdm->rmr_region.source_address = RMA_START;
186 fdm->rmr_region.source_len = fw_dump.boot_memory_size;
187 fdm->rmr_region.destination_address = addr;
188 addr += fw_dump.boot_memory_size;
189
190 return addr;
191}
192
193/**
194 * fadump_calculate_reserve_size(): reserve variable boot area 5% of System RAM
195 *
196 * Function to find the largest memory size we need to reserve during early
197 * boot process. This will be the size of the memory that is required for a
198 * kernel to boot successfully.
199 *
200 * This function has been taken from phyp-assisted dump feature implementation.
201 *
202 * returns larger of 256MB or 5% rounded down to multiples of 256MB.
203 *
204 * TODO: Come up with better approach to find out more accurate memory size
205 * that is required for a kernel to boot successfully.
206 *
207 */
208static inline unsigned long fadump_calculate_reserve_size(void)
209{
210 unsigned long size;
211
212 /*
213 * Check if the size is specified through fadump_reserve_mem= cmdline
214 * option. If yes, then use that.
215 */
216 if (fw_dump.reserve_bootvar)
217 return fw_dump.reserve_bootvar;
218
219 /* divide by 20 to get 5% of value */
220 size = memblock_end_of_DRAM() / 20;
221
222 /* round it down in multiples of 256 */
223 size = size & ~0x0FFFFFFFUL;
224
225 /* Truncate to memory_limit. We don't want to over reserve the memory.*/
226 if (memory_limit && size > memory_limit)
227 size = memory_limit;
228
229 return (size > MIN_BOOT_MEM ? size : MIN_BOOT_MEM);
230}
231
232/*
233 * Calculate the total memory size required to be reserved for
234 * firmware-assisted dump registration.
235 */
236static unsigned long get_fadump_area_size(void)
237{
238 unsigned long size = 0;
239
240 size += fw_dump.cpu_state_data_size;
241 size += fw_dump.hpte_region_size;
242 size += fw_dump.boot_memory_size;
243 size += sizeof(struct fadump_crash_info_header);
244 size += sizeof(struct elfhdr); /* ELF core header.*/
245 size += sizeof(struct elf_phdr); /* place holder for cpu notes */
246 /* Program headers for crash memory regions. */
247 size += sizeof(struct elf_phdr) * (memblock_num_regions(memory) + 2);
248
249 size = PAGE_ALIGN(size);
250 return size;
251}
252
253int __init fadump_reserve_mem(void)
254{
255 unsigned long base, size, memory_boundary;
256
257 if (!fw_dump.fadump_enabled)
258 return 0;
259
260 if (!fw_dump.fadump_supported) {
261 printk(KERN_INFO "Firmware-assisted dump is not supported on"
262 " this hardware\n");
263 fw_dump.fadump_enabled = 0;
264 return 0;
265 }
266 /*
267 * Initialize boot memory size
268 * If dump is active then we have already calculated the size during
269 * first kernel.
270 */
271 if (fdm_active)
272 fw_dump.boot_memory_size = fdm_active->rmr_region.source_len;
273 else
274 fw_dump.boot_memory_size = fadump_calculate_reserve_size();
275
276 /*
277 * Calculate the memory boundary.
278 * If memory_limit is less than actual memory boundary then reserve
279 * the memory for fadump beyond the memory_limit and adjust the
280 * memory_limit accordingly, so that the running kernel can run with
281 * specified memory_limit.
282 */
283 if (memory_limit && memory_limit < memblock_end_of_DRAM()) {
284 size = get_fadump_area_size();
285 if ((memory_limit + size) < memblock_end_of_DRAM())
286 memory_limit += size;
287 else
288 memory_limit = memblock_end_of_DRAM();
289 printk(KERN_INFO "Adjusted memory_limit for firmware-assisted"
290 " dump, now %#016llx\n",
291 (unsigned long long)memory_limit);
292 }
293 if (memory_limit)
294 memory_boundary = memory_limit;
295 else
296 memory_boundary = memblock_end_of_DRAM();
297
298 if (fw_dump.dump_active) {
299 printk(KERN_INFO "Firmware-assisted dump is active.\n");
300 /*
301 * If last boot has crashed then reserve all the memory
302 * above boot_memory_size so that we don't touch it until
303 * dump is written to disk by userspace tool. This memory
304 * will be released for general use once the dump is saved.
305 */
306 base = fw_dump.boot_memory_size;
307 size = memory_boundary - base;
308 memblock_reserve(base, size);
309 printk(KERN_INFO "Reserved %ldMB of memory at %ldMB "
310 "for saving crash dump\n",
311 (unsigned long)(size >> 20),
312 (unsigned long)(base >> 20));
313
314 fw_dump.fadumphdr_addr =
315 fdm_active->rmr_region.destination_address +
316 fdm_active->rmr_region.source_len;
317 pr_debug("fadumphdr_addr = %p\n",
318 (void *) fw_dump.fadumphdr_addr);
319 } else {
320 /* Reserve the memory at the top of memory. */
321 size = get_fadump_area_size();
322 base = memory_boundary - size;
323 memblock_reserve(base, size);
324 printk(KERN_INFO "Reserved %ldMB of memory at %ldMB "
325 "for firmware-assisted dump\n",
326 (unsigned long)(size >> 20),
327 (unsigned long)(base >> 20));
328 }
329 fw_dump.reserve_dump_area_start = base;
330 fw_dump.reserve_dump_area_size = size;
331 return 1;
332}
333
334/* Look for fadump= cmdline option. */
335static int __init early_fadump_param(char *p)
336{
337 if (!p)
338 return 1;
339
340 if (strncmp(p, "on", 2) == 0)
341 fw_dump.fadump_enabled = 1;
342 else if (strncmp(p, "off", 3) == 0)
343 fw_dump.fadump_enabled = 0;
344
345 return 0;
346}
347early_param("fadump", early_fadump_param);
348
349/* Look for fadump_reserve_mem= cmdline option */
350static int __init early_fadump_reserve_mem(char *p)
351{
352 if (p)
353 fw_dump.reserve_bootvar = memparse(p, &p);
354 return 0;
355}
356early_param("fadump_reserve_mem", early_fadump_reserve_mem);
357
358static void register_fw_dump(struct fadump_mem_struct *fdm)
359{
360 int rc;
361 unsigned int wait_time;
362
363 pr_debug("Registering for firmware-assisted kernel dump...\n");
364
365 /* TODO: Add upper time limit for the delay */
366 do {
367 rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
368 FADUMP_REGISTER, fdm,
369 sizeof(struct fadump_mem_struct));
370
371 wait_time = rtas_busy_delay_time(rc);
372 if (wait_time)
373 mdelay(wait_time);
374
375 } while (wait_time);
376
377 switch (rc) {
378 case -1:
379 printk(KERN_ERR "Failed to register firmware-assisted kernel"
380 " dump. Hardware Error(%d).\n", rc);
381 break;
382 case -3:
383 printk(KERN_ERR "Failed to register firmware-assisted kernel"
384 " dump. Parameter Error(%d).\n", rc);
385 break;
386 case -9:
387 printk(KERN_ERR "firmware-assisted kernel dump is already "
388 " registered.");
389 fw_dump.dump_registered = 1;
390 break;
391 case 0:
392 printk(KERN_INFO "firmware-assisted kernel dump registration"
393 " is successful\n");
394 fw_dump.dump_registered = 1;
395 break;
396 }
397}
398
399void crash_fadump(struct pt_regs *regs, const char *str)
400{
401 struct fadump_crash_info_header *fdh = NULL;
402
403 if (!fw_dump.dump_registered || !fw_dump.fadumphdr_addr)
404 return;
405
406 fdh = __va(fw_dump.fadumphdr_addr);
407 crashing_cpu = smp_processor_id();
408 fdh->crashing_cpu = crashing_cpu;
409 crash_save_vmcoreinfo();
410
411 if (regs)
412 fdh->regs = *regs;
413 else
414 ppc_save_regs(&fdh->regs);
415
416 fdh->cpu_online_mask = *cpu_online_mask;
417
418 /* Call ibm,os-term rtas call to trigger firmware assisted dump */
419 rtas_os_term((char *)str);
420}
421
422#define GPR_MASK 0xffffff0000000000
423static inline int fadump_gpr_index(u64 id)
424{
425 int i = -1;
426 char str[3];
427
428 if ((id & GPR_MASK) == REG_ID("GPR")) {
429 /* get the digits at the end */
430 id &= ~GPR_MASK;
431 id >>= 24;
432 str[2] = '\0';
433 str[1] = id & 0xff;
434 str[0] = (id >> 8) & 0xff;
435 sscanf(str, "%d", &i);
436 if (i > 31)
437 i = -1;
438 }
439 return i;
440}
441
442static inline void fadump_set_regval(struct pt_regs *regs, u64 reg_id,
443 u64 reg_val)
444{
445 int i;
446
447 i = fadump_gpr_index(reg_id);
448 if (i >= 0)
449 regs->gpr[i] = (unsigned long)reg_val;
450 else if (reg_id == REG_ID("NIA"))
451 regs->nip = (unsigned long)reg_val;
452 else if (reg_id == REG_ID("MSR"))
453 regs->msr = (unsigned long)reg_val;
454 else if (reg_id == REG_ID("CTR"))
455 regs->ctr = (unsigned long)reg_val;
456 else if (reg_id == REG_ID("LR"))
457 regs->link = (unsigned long)reg_val;
458 else if (reg_id == REG_ID("XER"))
459 regs->xer = (unsigned long)reg_val;
460 else if (reg_id == REG_ID("CR"))
461 regs->ccr = (unsigned long)reg_val;
462 else if (reg_id == REG_ID("DAR"))
463 regs->dar = (unsigned long)reg_val;
464 else if (reg_id == REG_ID("DSISR"))
465 regs->dsisr = (unsigned long)reg_val;
466}
467
468static struct fadump_reg_entry*
469fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
470{
471 memset(regs, 0, sizeof(struct pt_regs));
472
473 while (reg_entry->reg_id != REG_ID("CPUEND")) {
474 fadump_set_regval(regs, reg_entry->reg_id,
475 reg_entry->reg_value);
476 reg_entry++;
477 }
478 reg_entry++;
479 return reg_entry;
480}
481
482static u32 *fadump_append_elf_note(u32 *buf, char *name, unsigned type,
483 void *data, size_t data_len)
484{
485 struct elf_note note;
486
487 note.n_namesz = strlen(name) + 1;
488 note.n_descsz = data_len;
489 note.n_type = type;
490 memcpy(buf, &note, sizeof(note));
491 buf += (sizeof(note) + 3)/4;
492 memcpy(buf, name, note.n_namesz);
493 buf += (note.n_namesz + 3)/4;
494 memcpy(buf, data, note.n_descsz);
495 buf += (note.n_descsz + 3)/4;
496
497 return buf;
498}
499
500static void fadump_final_note(u32 *buf)
501{
502 struct elf_note note;
503
504 note.n_namesz = 0;
505 note.n_descsz = 0;
506 note.n_type = 0;
507 memcpy(buf, &note, sizeof(note));
508}
509
510static u32 *fadump_regs_to_elf_notes(u32 *buf, struct pt_regs *regs)
511{
512 struct elf_prstatus prstatus;
513
514 memset(&prstatus, 0, sizeof(prstatus));
515 /*
516 * FIXME: How do i get PID? Do I really need it?
517 * prstatus.pr_pid = ????
518 */
519 elf_core_copy_kernel_regs(&prstatus.pr_reg, regs);
520 buf = fadump_append_elf_note(buf, KEXEC_CORE_NOTE_NAME, NT_PRSTATUS,
521 &prstatus, sizeof(prstatus));
522 return buf;
523}
524
525static void fadump_update_elfcore_header(char *bufp)
526{
527 struct elfhdr *elf;
528 struct elf_phdr *phdr;
529
530 elf = (struct elfhdr *)bufp;
531 bufp += sizeof(struct elfhdr);
532
533 /* First note is a place holder for cpu notes info. */
534 phdr = (struct elf_phdr *)bufp;
535
536 if (phdr->p_type == PT_NOTE) {
537 phdr->p_paddr = fw_dump.cpu_notes_buf;
538 phdr->p_offset = phdr->p_paddr;
539 phdr->p_filesz = fw_dump.cpu_notes_buf_size;
540 phdr->p_memsz = fw_dump.cpu_notes_buf_size;
541 }
542 return;
543}
544
545static void *fadump_cpu_notes_buf_alloc(unsigned long size)
546{
547 void *vaddr;
548 struct page *page;
549 unsigned long order, count, i;
550
551 order = get_order(size);
552 vaddr = (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, order);
553 if (!vaddr)
554 return NULL;
555
556 count = 1 << order;
557 page = virt_to_page(vaddr);
558 for (i = 0; i < count; i++)
559 SetPageReserved(page + i);
560 return vaddr;
561}
562
563static void fadump_cpu_notes_buf_free(unsigned long vaddr, unsigned long size)
564{
565 struct page *page;
566 unsigned long order, count, i;
567
568 order = get_order(size);
569 count = 1 << order;
570 page = virt_to_page(vaddr);
571 for (i = 0; i < count; i++)
572 ClearPageReserved(page + i);
573 __free_pages(page, order);
574}
575
576/*
577 * Read CPU state dump data and convert it into ELF notes.
578 * The CPU dump starts with magic number "REGSAVE". NumCpusOffset should be
579 * used to access the data to allow for additional fields to be added without
580 * affecting compatibility. Each list of registers for a CPU starts with
581 * "CPUSTRT" and ends with "CPUEND". Each register entry is of 16 bytes,
582 * 8 Byte ASCII identifier and 8 Byte register value. The register entry
583 * with identifier "CPUSTRT" and "CPUEND" contains 4 byte cpu id as part
584 * of register value. For more details refer to PAPR document.
585 *
586 * Only for the crashing cpu we ignore the CPU dump data and get exact
587 * state from fadump crash info structure populated by first kernel at the
588 * time of crash.
589 */
590static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
591{
592 struct fadump_reg_save_area_header *reg_header;
593 struct fadump_reg_entry *reg_entry;
594 struct fadump_crash_info_header *fdh = NULL;
595 void *vaddr;
596 unsigned long addr;
597 u32 num_cpus, *note_buf;
598 struct pt_regs regs;
599 int i, rc = 0, cpu = 0;
600
601 if (!fdm->cpu_state_data.bytes_dumped)
602 return -EINVAL;
603
604 addr = fdm->cpu_state_data.destination_address;
605 vaddr = __va(addr);
606
607 reg_header = vaddr;
608 if (reg_header->magic_number != REGSAVE_AREA_MAGIC) {
609 printk(KERN_ERR "Unable to read register save area.\n");
610 return -ENOENT;
611 }
612 pr_debug("--------CPU State Data------------\n");
613 pr_debug("Magic Number: %llx\n", reg_header->magic_number);
614 pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset);
615
616 vaddr += reg_header->num_cpu_offset;
617 num_cpus = *((u32 *)(vaddr));
618 pr_debug("NumCpus : %u\n", num_cpus);
619 vaddr += sizeof(u32);
620 reg_entry = (struct fadump_reg_entry *)vaddr;
621
622 /* Allocate buffer to hold cpu crash notes. */
623 fw_dump.cpu_notes_buf_size = num_cpus * sizeof(note_buf_t);
624 fw_dump.cpu_notes_buf_size = PAGE_ALIGN(fw_dump.cpu_notes_buf_size);
625 note_buf = fadump_cpu_notes_buf_alloc(fw_dump.cpu_notes_buf_size);
626 if (!note_buf) {
627 printk(KERN_ERR "Failed to allocate 0x%lx bytes for "
628 "cpu notes buffer\n", fw_dump.cpu_notes_buf_size);
629 return -ENOMEM;
630 }
631 fw_dump.cpu_notes_buf = __pa(note_buf);
632
633 pr_debug("Allocated buffer for cpu notes of size %ld at %p\n",
634 (num_cpus * sizeof(note_buf_t)), note_buf);
635
636 if (fw_dump.fadumphdr_addr)
637 fdh = __va(fw_dump.fadumphdr_addr);
638
639 for (i = 0; i < num_cpus; i++) {
640 if (reg_entry->reg_id != REG_ID("CPUSTRT")) {
641 printk(KERN_ERR "Unable to read CPU state data\n");
642 rc = -ENOENT;
643 goto error_out;
644 }
645 /* Lower 4 bytes of reg_value contains logical cpu id */
646 cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK;
647 if (!cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
648 SKIP_TO_NEXT_CPU(reg_entry);
649 continue;
650 }
651 pr_debug("Reading register data for cpu %d...\n", cpu);
652 if (fdh && fdh->crashing_cpu == cpu) {
653 regs = fdh->regs;
654 note_buf = fadump_regs_to_elf_notes(note_buf, &regs);
655 SKIP_TO_NEXT_CPU(reg_entry);
656 } else {
657 reg_entry++;
658 reg_entry = fadump_read_registers(reg_entry, &regs);
659 note_buf = fadump_regs_to_elf_notes(note_buf, &regs);
660 }
661 }
662 fadump_final_note(note_buf);
663
664 pr_debug("Updating elfcore header (%llx) with cpu notes\n",
665 fdh->elfcorehdr_addr);
666 fadump_update_elfcore_header((char *)__va(fdh->elfcorehdr_addr));
667 return 0;
668
669error_out:
670 fadump_cpu_notes_buf_free((unsigned long)__va(fw_dump.cpu_notes_buf),
671 fw_dump.cpu_notes_buf_size);
672 fw_dump.cpu_notes_buf = 0;
673 fw_dump.cpu_notes_buf_size = 0;
674 return rc;
675
676}
677
678/*
679 * Validate and process the dump data stored by firmware before exporting
680 * it through '/proc/vmcore'.
681 */
682static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
683{
684 struct fadump_crash_info_header *fdh;
685 int rc = 0;
686
687 if (!fdm_active || !fw_dump.fadumphdr_addr)
688 return -EINVAL;
689
690 /* Check if the dump data is valid. */
691 if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) ||
692 (fdm_active->cpu_state_data.error_flags != 0) ||
693 (fdm_active->rmr_region.error_flags != 0)) {
694 printk(KERN_ERR "Dump taken by platform is not valid\n");
695 return -EINVAL;
696 }
697 if ((fdm_active->rmr_region.bytes_dumped !=
698 fdm_active->rmr_region.source_len) ||
699 !fdm_active->cpu_state_data.bytes_dumped) {
700 printk(KERN_ERR "Dump taken by platform is incomplete\n");
701 return -EINVAL;
702 }
703
704 /* Validate the fadump crash info header */
705 fdh = __va(fw_dump.fadumphdr_addr);
706 if (fdh->magic_number != FADUMP_CRASH_INFO_MAGIC) {
707 printk(KERN_ERR "Crash info header is not valid.\n");
708 return -EINVAL;
709 }
710
711 rc = fadump_build_cpu_notes(fdm_active);
712 if (rc)
713 return rc;
714
715 /*
716 * We are done validating dump info and elfcore header is now ready
717 * to be exported. set elfcorehdr_addr so that vmcore module will
718 * export the elfcore header through '/proc/vmcore'.
719 */
720 elfcorehdr_addr = fdh->elfcorehdr_addr;
721
722 return 0;
723}
724
725static inline void fadump_add_crash_memory(unsigned long long base,
726 unsigned long long end)
727{
728 if (base == end)
729 return;
730
731 pr_debug("crash_memory_range[%d] [%#016llx-%#016llx], %#llx bytes\n",
732 crash_mem_ranges, base, end - 1, (end - base));
733 crash_memory_ranges[crash_mem_ranges].base = base;
734 crash_memory_ranges[crash_mem_ranges].size = end - base;
735 crash_mem_ranges++;
736}
737
738static void fadump_exclude_reserved_area(unsigned long long start,
739 unsigned long long end)
740{
741 unsigned long long ra_start, ra_end;
742
743 ra_start = fw_dump.reserve_dump_area_start;
744 ra_end = ra_start + fw_dump.reserve_dump_area_size;
745
746 if ((ra_start < end) && (ra_end > start)) {
747 if ((start < ra_start) && (end > ra_end)) {
748 fadump_add_crash_memory(start, ra_start);
749 fadump_add_crash_memory(ra_end, end);
750 } else if (start < ra_start) {
751 fadump_add_crash_memory(start, ra_start);
752 } else if (ra_end < end) {
753 fadump_add_crash_memory(ra_end, end);
754 }
755 } else
756 fadump_add_crash_memory(start, end);
757}
758
759static int fadump_init_elfcore_header(char *bufp)
760{
761 struct elfhdr *elf;
762
763 elf = (struct elfhdr *) bufp;
764 bufp += sizeof(struct elfhdr);
765 memcpy(elf->e_ident, ELFMAG, SELFMAG);
766 elf->e_ident[EI_CLASS] = ELF_CLASS;
767 elf->e_ident[EI_DATA] = ELF_DATA;
768 elf->e_ident[EI_VERSION] = EV_CURRENT;
769 elf->e_ident[EI_OSABI] = ELF_OSABI;
770 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
771 elf->e_type = ET_CORE;
772 elf->e_machine = ELF_ARCH;
773 elf->e_version = EV_CURRENT;
774 elf->e_entry = 0;
775 elf->e_phoff = sizeof(struct elfhdr);
776 elf->e_shoff = 0;
777 elf->e_flags = ELF_CORE_EFLAGS;
778 elf->e_ehsize = sizeof(struct elfhdr);
779 elf->e_phentsize = sizeof(struct elf_phdr);
780 elf->e_phnum = 0;
781 elf->e_shentsize = 0;
782 elf->e_shnum = 0;
783 elf->e_shstrndx = 0;
784
785 return 0;
786}
787
788/*
789 * Traverse through memblock structure and setup crash memory ranges. These
790 * ranges will be used create PT_LOAD program headers in elfcore header.
791 */
792static void fadump_setup_crash_memory_ranges(void)
793{
794 struct memblock_region *reg;
795 unsigned long long start, end;
796
797 pr_debug("Setup crash memory ranges.\n");
798 crash_mem_ranges = 0;
799 /*
800 * add the first memory chunk (RMA_START through boot_memory_size) as
801 * a separate memory chunk. The reason is, at the time crash firmware
802 * will move the content of this memory chunk to different location
803 * specified during fadump registration. We need to create a separate
804 * program header for this chunk with the correct offset.
805 */
806 fadump_add_crash_memory(RMA_START, fw_dump.boot_memory_size);
807
808 for_each_memblock(memory, reg) {
809 start = (unsigned long long)reg->base;
810 end = start + (unsigned long long)reg->size;
811 if (start == RMA_START && end >= fw_dump.boot_memory_size)
812 start = fw_dump.boot_memory_size;
813
814 /* add this range excluding the reserved dump area. */
815 fadump_exclude_reserved_area(start, end);
816 }
817}
818
819/*
820 * If the given physical address falls within the boot memory region then
821 * return the relocated address that points to the dump region reserved
822 * for saving initial boot memory contents.
823 */
824static inline unsigned long fadump_relocate(unsigned long paddr)
825{
826 if (paddr > RMA_START && paddr < fw_dump.boot_memory_size)
827 return fdm.rmr_region.destination_address + paddr;
828 else
829 return paddr;
830}
831
832static int fadump_create_elfcore_headers(char *bufp)
833{
834 struct elfhdr *elf;
835 struct elf_phdr *phdr;
836 int i;
837
838 fadump_init_elfcore_header(bufp);
839 elf = (struct elfhdr *)bufp;
840 bufp += sizeof(struct elfhdr);
841
842 /*
843 * setup ELF PT_NOTE, place holder for cpu notes info. The notes info
844 * will be populated during second kernel boot after crash. Hence
845 * this PT_NOTE will always be the first elf note.
846 *
847 * NOTE: Any new ELF note addition should be placed after this note.
848 */
849 phdr = (struct elf_phdr *)bufp;
850 bufp += sizeof(struct elf_phdr);
851 phdr->p_type = PT_NOTE;
852 phdr->p_flags = 0;
853 phdr->p_vaddr = 0;
854 phdr->p_align = 0;
855
856 phdr->p_offset = 0;
857 phdr->p_paddr = 0;
858 phdr->p_filesz = 0;
859 phdr->p_memsz = 0;
860
861 (elf->e_phnum)++;
862
863 /* setup ELF PT_NOTE for vmcoreinfo */
864 phdr = (struct elf_phdr *)bufp;
865 bufp += sizeof(struct elf_phdr);
866 phdr->p_type = PT_NOTE;
867 phdr->p_flags = 0;
868 phdr->p_vaddr = 0;
869 phdr->p_align = 0;
870
871 phdr->p_paddr = fadump_relocate(paddr_vmcoreinfo_note());
872 phdr->p_offset = phdr->p_paddr;
873 phdr->p_memsz = vmcoreinfo_max_size;
874 phdr->p_filesz = vmcoreinfo_max_size;
875
876 /* Increment number of program headers. */
877 (elf->e_phnum)++;
878
879 /* setup PT_LOAD sections. */
880
881 for (i = 0; i < crash_mem_ranges; i++) {
882 unsigned long long mbase, msize;
883 mbase = crash_memory_ranges[i].base;
884 msize = crash_memory_ranges[i].size;
885
886 if (!msize)
887 continue;
888
889 phdr = (struct elf_phdr *)bufp;
890 bufp += sizeof(struct elf_phdr);
891 phdr->p_type = PT_LOAD;
892 phdr->p_flags = PF_R|PF_W|PF_X;
893 phdr->p_offset = mbase;
894
895 if (mbase == RMA_START) {
896 /*
897 * The entire RMA region will be moved by firmware
898 * to the specified destination_address. Hence set
899 * the correct offset.
900 */
901 phdr->p_offset = fdm.rmr_region.destination_address;
902 }
903
904 phdr->p_paddr = mbase;
905 phdr->p_vaddr = (unsigned long)__va(mbase);
906 phdr->p_filesz = msize;
907 phdr->p_memsz = msize;
908 phdr->p_align = 0;
909
910 /* Increment number of program headers. */
911 (elf->e_phnum)++;
912 }
913 return 0;
914}
915
916static unsigned long init_fadump_header(unsigned long addr)
917{
918 struct fadump_crash_info_header *fdh;
919
920 if (!addr)
921 return 0;
922
923 fw_dump.fadumphdr_addr = addr;
924 fdh = __va(addr);
925 addr += sizeof(struct fadump_crash_info_header);
926
927 memset(fdh, 0, sizeof(struct fadump_crash_info_header));
928 fdh->magic_number = FADUMP_CRASH_INFO_MAGIC;
929 fdh->elfcorehdr_addr = addr;
930 /* We will set the crashing cpu id in crash_fadump() during crash. */
931 fdh->crashing_cpu = CPU_UNKNOWN;
932
933 return addr;
934}
935
936static void register_fadump(void)
937{
938 unsigned long addr;
939 void *vaddr;
940
941 /*
942 * If no memory is reserved then we can not register for firmware-
943 * assisted dump.
944 */
945 if (!fw_dump.reserve_dump_area_size)
946 return;
947
948 fadump_setup_crash_memory_ranges();
949
950 addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len;
951 /* Initialize fadump crash info header. */
952 addr = init_fadump_header(addr);
953 vaddr = __va(addr);
954
955 pr_debug("Creating ELF core headers at %#016lx\n", addr);
956 fadump_create_elfcore_headers(vaddr);
957
958 /* register the future kernel dump with firmware. */
959 register_fw_dump(&fdm);
960}
961
962static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
963{
964 int rc = 0;
965 unsigned int wait_time;
966
967 pr_debug("Un-register firmware-assisted dump\n");
968
969 /* TODO: Add upper time limit for the delay */
970 do {
971 rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
972 FADUMP_UNREGISTER, fdm,
973 sizeof(struct fadump_mem_struct));
974
975 wait_time = rtas_busy_delay_time(rc);
976 if (wait_time)
977 mdelay(wait_time);
978 } while (wait_time);
979
980 if (rc) {
981 printk(KERN_ERR "Failed to un-register firmware-assisted dump."
982 " unexpected error(%d).\n", rc);
983 return rc;
984 }
985 fw_dump.dump_registered = 0;
986 return 0;
987}
988
989static int fadump_invalidate_dump(struct fadump_mem_struct *fdm)
990{
991 int rc = 0;
992 unsigned int wait_time;
993
994 pr_debug("Invalidating firmware-assisted dump registration\n");
995
996 /* TODO: Add upper time limit for the delay */
997 do {
998 rc = rtas_call(fw_dump.ibm_configure_kernel_dump, 3, 1, NULL,
999 FADUMP_INVALIDATE, fdm,
1000 sizeof(struct fadump_mem_struct));
1001
1002 wait_time = rtas_busy_delay_time(rc);
1003 if (wait_time)
1004 mdelay(wait_time);
1005 } while (wait_time);
1006
1007 if (rc) {
1008 printk(KERN_ERR "Failed to invalidate firmware-assisted dump "
1009 "rgistration. unexpected error(%d).\n", rc);
1010 return rc;
1011 }
1012 fw_dump.dump_active = 0;
1013 fdm_active = NULL;
1014 return 0;
1015}
1016
1017void fadump_cleanup(void)
1018{
1019 /* Invalidate the registration only if dump is active. */
1020 if (fw_dump.dump_active) {
1021 init_fadump_mem_struct(&fdm,
1022 fdm_active->cpu_state_data.destination_address);
1023 fadump_invalidate_dump(&fdm);
1024 }
1025}
1026
1027/*
1028 * Release the memory that was reserved in early boot to preserve the memory
1029 * contents. The released memory will be available for general use.
1030 */
1031static void fadump_release_memory(unsigned long begin, unsigned long end)
1032{
1033 unsigned long addr;
1034 unsigned long ra_start, ra_end;
1035
1036 ra_start = fw_dump.reserve_dump_area_start;
1037 ra_end = ra_start + fw_dump.reserve_dump_area_size;
1038
1039 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1040 /*
1041 * exclude the dump reserve area. Will reuse it for next
1042 * fadump registration.
1043 */
1044 if (addr <= ra_end && ((addr + PAGE_SIZE) > ra_start))
1045 continue;
1046
1047 ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT));
1048 init_page_count(pfn_to_page(addr >> PAGE_SHIFT));
1049 free_page((unsigned long)__va(addr));
1050 totalram_pages++;
1051 }
1052}
1053
1054static void fadump_invalidate_release_mem(void)
1055{
1056 unsigned long reserved_area_start, reserved_area_end;
1057 unsigned long destination_address;
1058
1059 mutex_lock(&fadump_mutex);
1060 if (!fw_dump.dump_active) {
1061 mutex_unlock(&fadump_mutex);
1062 return;
1063 }
1064
1065 destination_address = fdm_active->cpu_state_data.destination_address;
1066 fadump_cleanup();
1067 mutex_unlock(&fadump_mutex);
1068
1069 /*
1070 * Save the current reserved memory bounds we will require them
1071 * later for releasing the memory for general use.
1072 */
1073 reserved_area_start = fw_dump.reserve_dump_area_start;
1074 reserved_area_end = reserved_area_start +
1075 fw_dump.reserve_dump_area_size;
1076 /*
1077 * Setup reserve_dump_area_start and its size so that we can
1078 * reuse this reserved memory for Re-registration.
1079 */
1080 fw_dump.reserve_dump_area_start = destination_address;
1081 fw_dump.reserve_dump_area_size = get_fadump_area_size();
1082
1083 fadump_release_memory(reserved_area_start, reserved_area_end);
1084 if (fw_dump.cpu_notes_buf) {
1085 fadump_cpu_notes_buf_free(
1086 (unsigned long)__va(fw_dump.cpu_notes_buf),
1087 fw_dump.cpu_notes_buf_size);
1088 fw_dump.cpu_notes_buf = 0;
1089 fw_dump.cpu_notes_buf_size = 0;
1090 }
1091 /* Initialize the kernel dump memory structure for FAD registration. */
1092 init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
1093}
1094
1095static ssize_t fadump_release_memory_store(struct kobject *kobj,
1096 struct kobj_attribute *attr,
1097 const char *buf, size_t count)
1098{
1099 if (!fw_dump.dump_active)
1100 return -EPERM;
1101
1102 if (buf[0] == '1') {
1103 /*
1104 * Take away the '/proc/vmcore'. We are releasing the dump
1105 * memory, hence it will not be valid anymore.
1106 */
1107 vmcore_cleanup();
1108 fadump_invalidate_release_mem();
1109
1110 } else
1111 return -EINVAL;
1112 return count;
1113}
1114
1115static ssize_t fadump_enabled_show(struct kobject *kobj,
1116 struct kobj_attribute *attr,
1117 char *buf)
1118{
1119 return sprintf(buf, "%d\n", fw_dump.fadump_enabled);
1120}
1121
1122static ssize_t fadump_register_show(struct kobject *kobj,
1123 struct kobj_attribute *attr,
1124 char *buf)
1125{
1126 return sprintf(buf, "%d\n", fw_dump.dump_registered);
1127}
1128
1129static ssize_t fadump_register_store(struct kobject *kobj,
1130 struct kobj_attribute *attr,
1131 const char *buf, size_t count)
1132{
1133 int ret = 0;
1134
1135 if (!fw_dump.fadump_enabled || fdm_active)
1136 return -EPERM;
1137
1138 mutex_lock(&fadump_mutex);
1139
1140 switch (buf[0]) {
1141 case '0':
1142 if (fw_dump.dump_registered == 0) {
1143 ret = -EINVAL;
1144 goto unlock_out;
1145 }
1146 /* Un-register Firmware-assisted dump */
1147 fadump_unregister_dump(&fdm);
1148 break;
1149 case '1':
1150 if (fw_dump.dump_registered == 1) {
1151 ret = -EINVAL;
1152 goto unlock_out;
1153 }
1154 /* Register Firmware-assisted dump */
1155 register_fadump();
1156 break;
1157 default:
1158 ret = -EINVAL;
1159 break;
1160 }
1161
1162unlock_out:
1163 mutex_unlock(&fadump_mutex);
1164 return ret < 0 ? ret : count;
1165}
1166
1167static int fadump_region_show(struct seq_file *m, void *private)
1168{
1169 const struct fadump_mem_struct *fdm_ptr;
1170
1171 if (!fw_dump.fadump_enabled)
1172 return 0;
1173
1174 mutex_lock(&fadump_mutex);
1175 if (fdm_active)
1176 fdm_ptr = fdm_active;
1177 else {
1178 mutex_unlock(&fadump_mutex);
1179 fdm_ptr = &fdm;
1180 }
1181
1182 seq_printf(m,
1183 "CPU : [%#016llx-%#016llx] %#llx bytes, "
1184 "Dumped: %#llx\n",
1185 fdm_ptr->cpu_state_data.destination_address,
1186 fdm_ptr->cpu_state_data.destination_address +
1187 fdm_ptr->cpu_state_data.source_len - 1,
1188 fdm_ptr->cpu_state_data.source_len,
1189 fdm_ptr->cpu_state_data.bytes_dumped);
1190 seq_printf(m,
1191 "HPTE: [%#016llx-%#016llx] %#llx bytes, "
1192 "Dumped: %#llx\n",
1193 fdm_ptr->hpte_region.destination_address,
1194 fdm_ptr->hpte_region.destination_address +
1195 fdm_ptr->hpte_region.source_len - 1,
1196 fdm_ptr->hpte_region.source_len,
1197 fdm_ptr->hpte_region.bytes_dumped);
1198 seq_printf(m,
1199 "DUMP: [%#016llx-%#016llx] %#llx bytes, "
1200 "Dumped: %#llx\n",
1201 fdm_ptr->rmr_region.destination_address,
1202 fdm_ptr->rmr_region.destination_address +
1203 fdm_ptr->rmr_region.source_len - 1,
1204 fdm_ptr->rmr_region.source_len,
1205 fdm_ptr->rmr_region.bytes_dumped);
1206
1207 if (!fdm_active ||
1208 (fw_dump.reserve_dump_area_start ==
1209 fdm_ptr->cpu_state_data.destination_address))
1210 goto out;
1211
1212 /* Dump is active. Show reserved memory region. */
1213 seq_printf(m,
1214 " : [%#016llx-%#016llx] %#llx bytes, "
1215 "Dumped: %#llx\n",
1216 (unsigned long long)fw_dump.reserve_dump_area_start,
1217 fdm_ptr->cpu_state_data.destination_address - 1,
1218 fdm_ptr->cpu_state_data.destination_address -
1219 fw_dump.reserve_dump_area_start,
1220 fdm_ptr->cpu_state_data.destination_address -
1221 fw_dump.reserve_dump_area_start);
1222out:
1223 if (fdm_active)
1224 mutex_unlock(&fadump_mutex);
1225 return 0;
1226}
1227
1228static struct kobj_attribute fadump_release_attr = __ATTR(fadump_release_mem,
1229 0200, NULL,
1230 fadump_release_memory_store);
1231static struct kobj_attribute fadump_attr = __ATTR(fadump_enabled,
1232 0444, fadump_enabled_show,
1233 NULL);
1234static struct kobj_attribute fadump_register_attr = __ATTR(fadump_registered,
1235 0644, fadump_register_show,
1236 fadump_register_store);
1237
1238static int fadump_region_open(struct inode *inode, struct file *file)
1239{
1240 return single_open(file, fadump_region_show, inode->i_private);
1241}
1242
1243static const struct file_operations fadump_region_fops = {
1244 .open = fadump_region_open,
1245 .read = seq_read,
1246 .llseek = seq_lseek,
1247 .release = single_release,
1248};
1249
1250static void fadump_init_files(void)
1251{
1252 struct dentry *debugfs_file;
1253 int rc = 0;
1254
1255 rc = sysfs_create_file(kernel_kobj, &fadump_attr.attr);
1256 if (rc)
1257 printk(KERN_ERR "fadump: unable to create sysfs file"
1258 " fadump_enabled (%d)\n", rc);
1259
1260 rc = sysfs_create_file(kernel_kobj, &fadump_register_attr.attr);
1261 if (rc)
1262 printk(KERN_ERR "fadump: unable to create sysfs file"
1263 " fadump_registered (%d)\n", rc);
1264
1265 debugfs_file = debugfs_create_file("fadump_region", 0444,
1266 powerpc_debugfs_root, NULL,
1267 &fadump_region_fops);
1268 if (!debugfs_file)
1269 printk(KERN_ERR "fadump: unable to create debugfs file"
1270 " fadump_region\n");
1271
1272 if (fw_dump.dump_active) {
1273 rc = sysfs_create_file(kernel_kobj, &fadump_release_attr.attr);
1274 if (rc)
1275 printk(KERN_ERR "fadump: unable to create sysfs file"
1276 " fadump_release_mem (%d)\n", rc);
1277 }
1278 return;
1279}
1280
1281/*
1282 * Prepare for firmware-assisted dump.
1283 */
1284int __init setup_fadump(void)
1285{
1286 if (!fw_dump.fadump_enabled)
1287 return 0;
1288
1289 if (!fw_dump.fadump_supported) {
1290 printk(KERN_ERR "Firmware-assisted dump is not supported on"
1291 " this hardware\n");
1292 return 0;
1293 }
1294
1295 fadump_show_config();
1296 /*
1297 * If dump data is available then see if it is valid and prepare for
1298 * saving it to the disk.
1299 */
1300 if (fw_dump.dump_active) {
1301 /*
1302 * if dump process fails then invalidate the registration
1303 * and release memory before proceeding for re-registration.
1304 */
1305 if (process_fadump(fdm_active) < 0)
1306 fadump_invalidate_release_mem();
1307 }
1308 /* Initialize the kernel dump memory structure for FAD registration. */
1309 else if (fw_dump.reserve_dump_area_size)
1310 init_fadump_mem_struct(&fdm, fw_dump.reserve_dump_area_start);
1311 fadump_init_files();
1312
1313 return 1;
1314}
1315subsys_initcall(setup_fadump);
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 0654dba2c1f1..dc0488b6f6e1 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -395,7 +395,7 @@ DataAccess:
395 bl hash_page 395 bl hash_page
3961: lwz r5,_DSISR(r11) /* get DSISR value */ 3961: lwz r5,_DSISR(r11) /* get DSISR value */
397 mfspr r4,SPRN_DAR 397 mfspr r4,SPRN_DAR
398 EXC_XFER_EE_LITE(0x300, handle_page_fault) 398 EXC_XFER_LITE(0x300, handle_page_fault)
399 399
400 400
401/* Instruction access exception. */ 401/* Instruction access exception. */
@@ -410,7 +410,7 @@ InstructionAccess:
410 bl hash_page 410 bl hash_page
4111: mr r4,r12 4111: mr r4,r12
412 mr r5,r9 412 mr r5,r9
413 EXC_XFER_EE_LITE(0x400, handle_page_fault) 413 EXC_XFER_LITE(0x400, handle_page_fault)
414 414
415/* External interrupt */ 415/* External interrupt */
416 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) 416 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 872a6af83bad..4989661b710b 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -394,7 +394,7 @@ label:
394 NORMAL_EXCEPTION_PROLOG 394 NORMAL_EXCEPTION_PROLOG
395 mr r4,r12 /* Pass SRR0 as arg2 */ 395 mr r4,r12 /* Pass SRR0 as arg2 */
396 li r5,0 /* Pass zero as arg3 */ 396 li r5,0 /* Pass zero as arg3 */
397 EXC_XFER_EE_LITE(0x400, handle_page_fault) 397 EXC_XFER_LITE(0x400, handle_page_fault)
398 398
399/* 0x0500 - External Interrupt Exception */ 399/* 0x0500 - External Interrupt Exception */
400 EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) 400 EXCEPTION(0x0500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
@@ -747,7 +747,7 @@ DataAccess:
747 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ 747 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
748 stw r5,_ESR(r11) 748 stw r5,_ESR(r11)
749 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ 749 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
750 EXC_XFER_EE_LITE(0x300, handle_page_fault) 750 EXC_XFER_LITE(0x300, handle_page_fault)
751 751
752/* Other PowerPC processors, namely those derived from the 6xx-series 752/* Other PowerPC processors, namely those derived from the 6xx-series
753 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved. 753 * have vectors from 0x2100 through 0x2F00 defined, but marked as reserved.
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 06c7251c1bf7..58bddee8e1e8 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -32,13 +32,13 @@
32#include <asm/cputable.h> 32#include <asm/cputable.h>
33#include <asm/setup.h> 33#include <asm/setup.h>
34#include <asm/hvcall.h> 34#include <asm/hvcall.h>
35#include <asm/iseries/lpar_map.h>
36#include <asm/thread_info.h> 35#include <asm/thread_info.h>
37#include <asm/firmware.h> 36#include <asm/firmware.h>
38#include <asm/page_64.h> 37#include <asm/page_64.h>
39#include <asm/irqflags.h> 38#include <asm/irqflags.h>
40#include <asm/kvm_book3s_asm.h> 39#include <asm/kvm_book3s_asm.h>
41#include <asm/ptrace.h> 40#include <asm/ptrace.h>
41#include <asm/hw_irq.h>
42 42
43/* The physical memory is laid out such that the secondary processor 43/* The physical memory is laid out such that the secondary processor
44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow 44 * spin code sits at 0x0000...0x00ff. On server, the vectors follow
@@ -57,10 +57,6 @@
57 * entry in r9 for debugging purposes 57 * entry in r9 for debugging purposes
58 * 2. Secondary processors enter at 0x60 with PIR in gpr3 58 * 2. Secondary processors enter at 0x60 with PIR in gpr3
59 * 59 *
60 * For iSeries:
61 * 1. The MMU is on (as it always is for iSeries)
62 * 2. The kernel is entered at system_reset_iSeries
63 *
64 * For Book3E processors: 60 * For Book3E processors:
65 * 1. The MMU is on running in AS0 in a state defined in ePAPR 61 * 1. The MMU is on running in AS0 in a state defined in ePAPR
66 * 2. The kernel is entered at __start 62 * 2. The kernel is entered at __start
@@ -93,15 +89,6 @@ __secondary_hold_spinloop:
93__secondary_hold_acknowledge: 89__secondary_hold_acknowledge:
94 .llong 0x0 90 .llong 0x0
95 91
96#ifdef CONFIG_PPC_ISERIES
97 /*
98 * At offset 0x20, there is a pointer to iSeries LPAR data.
99 * This is required by the hypervisor
100 */
101 . = 0x20
102 .llong hvReleaseData-KERNELBASE
103#endif /* CONFIG_PPC_ISERIES */
104
105#ifdef CONFIG_RELOCATABLE 92#ifdef CONFIG_RELOCATABLE
106 /* This flag is set to 1 by a loader if the kernel should run 93 /* This flag is set to 1 by a loader if the kernel should run
107 * at the loaded address instead of the linked address. This 94 * at the loaded address instead of the linked address. This
@@ -564,7 +551,8 @@ _GLOBAL(pmac_secondary_start)
564 */ 551 */
565 li r0,0 552 li r0,0
566 stb r0,PACASOFTIRQEN(r13) 553 stb r0,PACASOFTIRQEN(r13)
567 stb r0,PACAHARDIRQEN(r13) 554 li r0,PACA_IRQ_HARD_DIS
555 stb r0,PACAIRQHAPPENED(r13)
568 556
569 /* Create a temp kernel stack for use before relocation is on. */ 557 /* Create a temp kernel stack for use before relocation is on. */
570 ld r1,PACAEMERGSP(r13) 558 ld r1,PACAEMERGSP(r13)
@@ -582,7 +570,7 @@ _GLOBAL(pmac_secondary_start)
582 * 1. Processor number 570 * 1. Processor number
583 * 2. Segment table pointer (virtual address) 571 * 2. Segment table pointer (virtual address)
584 * On entry the following are set: 572 * On entry the following are set:
585 * r1 = stack pointer. vaddr for iSeries, raddr (temp stack) for pSeries 573 * r1 = stack pointer (real addr of temp stack)
586 * r24 = cpu# (in Linux terms) 574 * r24 = cpu# (in Linux terms)
587 * r13 = paca virtual address 575 * r13 = paca virtual address
588 * SPRG_PACA = paca virtual address 576 * SPRG_PACA = paca virtual address
@@ -595,7 +583,7 @@ __secondary_start:
595 /* Set thread priority to MEDIUM */ 583 /* Set thread priority to MEDIUM */
596 HMT_MEDIUM 584 HMT_MEDIUM
597 585
598 /* Initialize the kernel stack. Just a repeat for iSeries. */ 586 /* Initialize the kernel stack */
599 LOAD_REG_ADDR(r3, current_set) 587 LOAD_REG_ADDR(r3, current_set)
600 sldi r28,r24,3 /* get current_set[cpu#] */ 588 sldi r28,r24,3 /* get current_set[cpu#] */
601 ldx r14,r3,r28 589 ldx r14,r3,r28
@@ -615,20 +603,16 @@ __secondary_start:
615 li r7,0 603 li r7,0
616 mtlr r7 604 mtlr r7
617 605
606 /* Mark interrupts soft and hard disabled (they might be enabled
607 * in the PACA when doing hotplug)
608 */
609 stb r7,PACASOFTIRQEN(r13)
610 li r0,PACA_IRQ_HARD_DIS
611 stb r0,PACAIRQHAPPENED(r13)
612
618 /* enable MMU and jump to start_secondary */ 613 /* enable MMU and jump to start_secondary */
619 LOAD_REG_ADDR(r3, .start_secondary_prolog) 614 LOAD_REG_ADDR(r3, .start_secondary_prolog)
620 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) 615 LOAD_REG_IMMEDIATE(r4, MSR_KERNEL)
621#ifdef CONFIG_PPC_ISERIES
622BEGIN_FW_FTR_SECTION
623 ori r4,r4,MSR_EE
624 li r8,1
625 stb r8,PACAHARDIRQEN(r13)
626END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
627#endif
628BEGIN_FW_FTR_SECTION
629 stb r7,PACAHARDIRQEN(r13)
630END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES)
631 stb r7,PACASOFTIRQEN(r13)
632 616
633 mtspr SPRN_SRR0,r3 617 mtspr SPRN_SRR0,r3
634 mtspr SPRN_SRR1,r4 618 mtspr SPRN_SRR1,r4
@@ -771,22 +755,18 @@ _INIT_GLOBAL(start_here_common)
771 /* Load the TOC (virtual address) */ 755 /* Load the TOC (virtual address) */
772 ld r2,PACATOC(r13) 756 ld r2,PACATOC(r13)
773 757
758 /* Do more system initializations in virtual mode */
774 bl .setup_system 759 bl .setup_system
775 760
776 /* Load up the kernel context */ 761 /* Mark interrupts soft and hard disabled (they might be enabled
7775: 762 * in the PACA when doing hotplug)
778 li r5,0 763 */
779 stb r5,PACASOFTIRQEN(r13) /* Soft Disabled */ 764 li r0,0
780#ifdef CONFIG_PPC_ISERIES 765 stb r0,PACASOFTIRQEN(r13)
781BEGIN_FW_FTR_SECTION 766 li r0,PACA_IRQ_HARD_DIS
782 mfmsr r5 767 stb r0,PACAIRQHAPPENED(r13)
783 ori r5,r5,MSR_EE /* Hard Enabled on iSeries*/
784 mtmsrd r5
785 li r5,1
786END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
787#endif
788 stb r5,PACAHARDIRQEN(r13) /* Hard Disabled on others */
789 768
769 /* Generic kernel entry */
790 bl .start_kernel 770 bl .start_kernel
791 771
792 /* Not reached */ 772 /* Not reached */
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index b68cb173ba2c..b2a5860accfb 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -220,7 +220,7 @@ DataAccess:
220 mfspr r4,SPRN_DAR 220 mfspr r4,SPRN_DAR
221 li r10,0x00f0 221 li r10,0x00f0
222 mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */ 222 mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
223 EXC_XFER_EE_LITE(0x300, handle_page_fault) 223 EXC_XFER_LITE(0x300, handle_page_fault)
224 224
225/* Instruction access exception. 225/* Instruction access exception.
226 * This is "never generated" by the MPC8xx. We jump to it for other 226 * This is "never generated" by the MPC8xx. We jump to it for other
@@ -231,7 +231,7 @@ InstructionAccess:
231 EXCEPTION_PROLOG 231 EXCEPTION_PROLOG
232 mr r4,r12 232 mr r4,r12
233 mr r5,r9 233 mr r5,r9
234 EXC_XFER_EE_LITE(0x400, handle_page_fault) 234 EXC_XFER_LITE(0x400, handle_page_fault)
235 235
236/* External interrupt */ 236/* External interrupt */
237 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) 237 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index fc921bf62e15..0e4175388f47 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -359,7 +359,7 @@ label:
359 mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ 359 mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
360 stw r5,_ESR(r11); \ 360 stw r5,_ESR(r11); \
361 mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \ 361 mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
362 EXC_XFER_EE_LITE(0x0300, handle_page_fault) 362 EXC_XFER_LITE(0x0300, handle_page_fault)
363 363
364#define INSTRUCTION_STORAGE_EXCEPTION \ 364#define INSTRUCTION_STORAGE_EXCEPTION \
365 START_EXCEPTION(InstructionStorage) \ 365 START_EXCEPTION(InstructionStorage) \
@@ -368,7 +368,7 @@ label:
368 stw r5,_ESR(r11); \ 368 stw r5,_ESR(r11); \
369 mr r4,r12; /* Pass SRR0 as arg2 */ \ 369 mr r4,r12; /* Pass SRR0 as arg2 */ \
370 li r5,0; /* Pass zero as arg3 */ \ 370 li r5,0; /* Pass zero as arg3 */ \
371 EXC_XFER_EE_LITE(0x0400, handle_page_fault) 371 EXC_XFER_LITE(0x0400, handle_page_fault)
372 372
373#define ALIGNMENT_EXCEPTION \ 373#define ALIGNMENT_EXCEPTION \
374 START_EXCEPTION(Alignment) \ 374 START_EXCEPTION(Alignment) \
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index d5d78c4ceef6..28e62598d0e8 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -319,7 +319,7 @@ interrupt_base:
319 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ 319 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
320 andis. r10,r5,(ESR_ILK|ESR_DLK)@h 320 andis. r10,r5,(ESR_ILK|ESR_DLK)@h
321 bne 1f 321 bne 1f
322 EXC_XFER_EE_LITE(0x0300, handle_page_fault) 322 EXC_XFER_LITE(0x0300, handle_page_fault)
3231: 3231:
324 addi r3,r1,STACK_FRAME_OVERHEAD 324 addi r3,r1,STACK_FRAME_OVERHEAD
325 EXC_XFER_EE_LITE(0x0300, CacheLockingException) 325 EXC_XFER_EE_LITE(0x0300, CacheLockingException)
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c
index d39ae606ff8d..79bb282e6501 100644
--- a/arch/powerpc/kernel/ibmebus.c
+++ b/arch/powerpc/kernel/ibmebus.c
@@ -713,7 +713,7 @@ static struct dev_pm_ops ibmebus_bus_dev_pm_ops = {
713 713
714struct bus_type ibmebus_bus_type = { 714struct bus_type ibmebus_bus_type = {
715 .name = "ibmebus", 715 .name = "ibmebus",
716 .uevent = of_device_uevent, 716 .uevent = of_device_uevent_modalias,
717 .bus_attrs = ibmebus_bus_attrs, 717 .bus_attrs = ibmebus_bus_attrs,
718 .match = ibmebus_bus_bus_match, 718 .match = ibmebus_bus_bus_match,
719 .probe = ibmebus_bus_device_probe, 719 .probe = ibmebus_bus_device_probe,
diff --git a/arch/powerpc/kernel/idle.c b/arch/powerpc/kernel/idle.c
index 0a48bf5db6c8..e8e821146f38 100644
--- a/arch/powerpc/kernel/idle.c
+++ b/arch/powerpc/kernel/idle.c
@@ -84,7 +84,11 @@ void cpu_idle(void)
84 84
85 start_critical_timings(); 85 start_critical_timings();
86 86
87 local_irq_enable(); 87 /* Some power_save functions return with
88 * interrupts enabled, some don't.
89 */
90 if (irqs_disabled())
91 local_irq_enable();
88 set_thread_flag(TIF_POLLING_NRFLAG); 92 set_thread_flag(TIF_POLLING_NRFLAG);
89 93
90 } else { 94 } else {
@@ -101,11 +105,11 @@ void cpu_idle(void)
101 ppc64_runlatch_on(); 105 ppc64_runlatch_on();
102 rcu_idle_exit(); 106 rcu_idle_exit();
103 tick_nohz_idle_exit(); 107 tick_nohz_idle_exit();
104 preempt_enable_no_resched(); 108 if (cpu_should_die()) {
105 if (cpu_should_die()) 109 sched_preempt_enable_no_resched();
106 cpu_die(); 110 cpu_die();
107 schedule(); 111 }
108 preempt_disable(); 112 schedule_preempt_disabled();
109 } 113 }
110} 114}
111 115
diff --git a/arch/powerpc/kernel/idle_book3e.S b/arch/powerpc/kernel/idle_book3e.S
index 16c002d6bdf1..ff007b59448d 100644
--- a/arch/powerpc/kernel/idle_book3e.S
+++ b/arch/powerpc/kernel/idle_book3e.S
@@ -29,43 +29,30 @@ _GLOBAL(book3e_idle)
29 wrteei 0 29 wrteei 0
30 30
31 /* Now check if an interrupt came in while we were soft disabled 31 /* Now check if an interrupt came in while we were soft disabled
32 * since we may otherwise lose it (doorbells etc...). We know 32 * since we may otherwise lose it (doorbells etc...).
33 * that since PACAHARDIRQEN will have been cleared in that case.
34 */ 33 */
35 lbz r3,PACAHARDIRQEN(r13) 34 lbz r3,PACAIRQHAPPENED(r13)
36 cmpwi cr0,r3,0 35 cmpwi cr0,r3,0
37 beqlr 36 bnelr
38 37
39 /* Now we are going to mark ourselves as soft and hard enables in 38 /* Now we are going to mark ourselves as soft and hard enabled in
40 * order to be able to take interrupts while asleep. We inform lockdep 39 * order to be able to take interrupts while asleep. We inform lockdep
41 * of that. We don't actually turn interrupts on just yet tho. 40 * of that. We don't actually turn interrupts on just yet tho.
42 */ 41 */
43#ifdef CONFIG_TRACE_IRQFLAGS 42#ifdef CONFIG_TRACE_IRQFLAGS
44 stdu r1,-128(r1) 43 stdu r1,-128(r1)
45 bl .trace_hardirqs_on 44 bl .trace_hardirqs_on
45 addi r1,r1,128
46#endif 46#endif
47 li r0,1 47 li r0,1
48 stb r0,PACASOFTIRQEN(r13) 48 stb r0,PACASOFTIRQEN(r13)
49 stb r0,PACAHARDIRQEN(r13)
50 49
51 /* Interrupts will make use return to LR, so get something we want 50 /* Interrupts will make use return to LR, so get something we want
52 * in there 51 * in there
53 */ 52 */
54 bl 1f 53 bl 1f
55 54
56 /* Hard disable interrupts again */ 55 /* And return (interrupts are on) */
57 wrteei 0
58
59 /* Mark them off again in the PACA as well */
60 li r0,0
61 stb r0,PACASOFTIRQEN(r13)
62 stb r0,PACAHARDIRQEN(r13)
63
64 /* Tell lockdep about it */
65#ifdef CONFIG_TRACE_IRQFLAGS
66 bl .trace_hardirqs_off
67 addi r1,r1,128
68#endif
69 ld r0,16(r1) 56 ld r0,16(r1)
70 mtlr r0 57 mtlr r0
71 blr 58 blr
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S
index ba3195478600..2c71b0fc9f91 100644
--- a/arch/powerpc/kernel/idle_power4.S
+++ b/arch/powerpc/kernel/idle_power4.S
@@ -14,6 +14,7 @@
14#include <asm/thread_info.h> 14#include <asm/thread_info.h>
15#include <asm/ppc_asm.h> 15#include <asm/ppc_asm.h>
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/irqflags.h>
17 18
18#undef DEBUG 19#undef DEBUG
19 20
@@ -29,14 +30,31 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CAN_NAP)
29 cmpwi 0,r4,0 30 cmpwi 0,r4,0
30 beqlr 31 beqlr
31 32
32 /* Go to NAP now */ 33 /* Hard disable interrupts */
33 mfmsr r7 34 mfmsr r7
34 rldicl r0,r7,48,1 35 rldicl r0,r7,48,1
35 rotldi r0,r0,16 36 rotldi r0,r0,16
36 mtmsrd r0,1 /* hard-disable interrupts */ 37 mtmsrd r0,1
38
39 /* Check if something happened while soft-disabled */
40 lbz r0,PACAIRQHAPPENED(r13)
41 cmpwi cr0,r0,0
42 bnelr
43
44 /* Soft-enable interrupts */
45#ifdef CONFIG_TRACE_IRQFLAGS
46 mflr r0
47 std r0,16(r1)
48 stdu r1,-128(r1)
49 bl .trace_hardirqs_on
50 addi r1,r1,128
51 ld r0,16(r1)
52 mtlr r0
53 mfmsr r7
54#endif /* CONFIG_TRACE_IRQFLAGS */
55
37 li r0,1 56 li r0,1
38 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ 57 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
39 stb r0,PACAHARDIRQEN(r13)
40BEGIN_FTR_SECTION 58BEGIN_FTR_SECTION
41 DSSALL 59 DSSALL
42 sync 60 sync
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index fcdff198da4b..0cdc9a392839 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -1,5 +1,5 @@
1/* 1/*
2 * This file contains the power_save function for 970-family CPUs. 2 * This file contains the power_save function for Power7 CPUs.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License 5 * modify it under the terms of the GNU General Public License
@@ -15,6 +15,7 @@
15#include <asm/ppc_asm.h> 15#include <asm/ppc_asm.h>
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/ppc-opcode.h> 17#include <asm/ppc-opcode.h>
18#include <asm/hw_irq.h>
18 19
19#undef DEBUG 20#undef DEBUG
20 21
@@ -51,9 +52,25 @@ _GLOBAL(power7_idle)
51 rldicl r9,r9,48,1 52 rldicl r9,r9,48,1
52 rotldi r9,r9,16 53 rotldi r9,r9,16
53 mtmsrd r9,1 /* hard-disable interrupts */ 54 mtmsrd r9,1 /* hard-disable interrupts */
55
56 /* Check if something happened while soft-disabled */
57 lbz r0,PACAIRQHAPPENED(r13)
58 cmpwi cr0,r0,0
59 beq 1f
60 addi r1,r1,INT_FRAME_SIZE
61 ld r0,16(r1)
62 mtlr r0
63 blr
64
651: /* We mark irqs hard disabled as this is the state we'll
66 * be in when returning and we need to tell arch_local_irq_restore()
67 * about it
68 */
69 li r0,PACA_IRQ_HARD_DIS
70 stb r0,PACAIRQHAPPENED(r13)
71
72 /* We haven't lost state ... yet */
54 li r0,0 73 li r0,0
55 stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */
56 stb r0,PACAHARDIRQEN(r13)
57 stb r0,PACA_NAPSTATELOST(r13) 74 stb r0,PACA_NAPSTATELOST(r13)
58 75
59 /* Continue saving state */ 76 /* Continue saving state */
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index 0cfcf98aafca..359f078571c7 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -39,6 +39,7 @@
39#include <asm/pci-bridge.h> 39#include <asm/pci-bridge.h>
40#include <asm/machdep.h> 40#include <asm/machdep.h>
41#include <asm/kdump.h> 41#include <asm/kdump.h>
42#include <asm/fadump.h>
42 43
43#define DBG(...) 44#define DBG(...)
44 45
@@ -445,7 +446,12 @@ void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
445 446
446static void iommu_table_clear(struct iommu_table *tbl) 447static void iommu_table_clear(struct iommu_table *tbl)
447{ 448{
448 if (!is_kdump_kernel()) { 449 /*
450 * In case of firmware assisted dump system goes through clean
451 * reboot process at the time of system crash. Hence it's safe to
452 * clear the TCE entries if firmware assisted dump is active.
453 */
454 if (!is_kdump_kernel() || is_fadump_active()) {
449 /* Clear the table in case firmware left allocations in it */ 455 /* Clear the table in case firmware left allocations in it */
450 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); 456 ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size);
451 return; 457 return;
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 701d4aceb4f4..cea2d9f3ae4e 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -93,20 +93,16 @@ extern int tau_interrupts(int);
93 93
94#ifdef CONFIG_PPC64 94#ifdef CONFIG_PPC64
95 95
96#ifndef CONFIG_SPARSE_IRQ
97EXPORT_SYMBOL(irq_desc);
98#endif
99
100int distribute_irqs = 1; 96int distribute_irqs = 1;
101 97
102static inline notrace unsigned long get_hard_enabled(void) 98static inline notrace unsigned long get_irq_happened(void)
103{ 99{
104 unsigned long enabled; 100 unsigned long happened;
105 101
106 __asm__ __volatile__("lbz %0,%1(13)" 102 __asm__ __volatile__("lbz %0,%1(13)"
107 : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled))); 103 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened)));
108 104
109 return enabled; 105 return happened;
110} 106}
111 107
112static inline notrace void set_soft_enabled(unsigned long enable) 108static inline notrace void set_soft_enabled(unsigned long enable)
@@ -115,84 +111,162 @@ static inline notrace void set_soft_enabled(unsigned long enable)
115 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); 111 : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
116} 112}
117 113
118static inline notrace void decrementer_check_overflow(void) 114static inline notrace int decrementer_check_overflow(void)
119{ 115{
120 u64 now = get_tb_or_rtc(); 116 u64 now = get_tb_or_rtc();
121 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
122 118
123 if (now >= *next_tb) 119 if (now >= *next_tb)
124 set_dec(1); 120 set_dec(1);
121 return now >= *next_tb;
125} 122}
126 123
127notrace void arch_local_irq_restore(unsigned long en) 124/* This is called whenever we are re-enabling interrupts
125 * and returns either 0 (nothing to do) or 500/900 if there's
126 * either an EE or a DEC to generate.
127 *
128 * This is called in two contexts: From arch_local_irq_restore()
129 * before soft-enabling interrupts, and from the exception exit
130 * path when returning from an interrupt from a soft-disabled to
131 * a soft enabled context. In both case we have interrupts hard
132 * disabled.
133 *
134 * We take care of only clearing the bits we handled in the
135 * PACA irq_happened field since we can only re-emit one at a
136 * time and we don't want to "lose" one.
137 */
138notrace unsigned int __check_irq_replay(void)
128{ 139{
129 /* 140 /*
130 * get_paca()->soft_enabled = en; 141 * We use local_paca rather than get_paca() to avoid all
131 * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1? 142 * the debug_smp_processor_id() business in this low level
132 * That was allowed before, and in such a case we do need to take care 143 * function
133 * that gcc will set soft_enabled directly via r13, not choose to use
134 * an intermediate register, lest we're preempted to a different cpu.
135 */ 144 */
136 set_soft_enabled(en); 145 unsigned char happened = local_paca->irq_happened;
137 if (!en)
138 return;
139 146
140#ifdef CONFIG_PPC_STD_MMU_64 147 /* Clear bit 0 which we wouldn't clear otherwise */
141 if (firmware_has_feature(FW_FEATURE_ISERIES)) { 148 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS;
142 /* 149
143 * Do we need to disable preemption here? Not really: in the 150 /*
144 * unlikely event that we're preempted to a different cpu in 151 * Force the delivery of pending soft-disabled interrupts on PS3.
145 * between getting r13, loading its lppaca_ptr, and loading 152 * Any HV call will have this side effect.
146 * its any_int, we might call iseries_handle_interrupts without 153 */
147 * an interrupt pending on the new cpu, but that's no disaster, 154 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
148 * is it? And the business of preempting us off the old cpu 155 u64 tmp, tmp2;
149 * would itself involve a local_irq_restore which handles the 156 lv1_get_version_info(&tmp, &tmp2);
150 * interrupt to that cpu.
151 *
152 * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
153 * to avoid any preemption checking added into get_paca().
154 */
155 if (local_paca->lppaca_ptr->int_dword.any_int)
156 iseries_handle_interrupts();
157 } 157 }
158#endif /* CONFIG_PPC_STD_MMU_64 */
159 158
160 /* 159 /*
161 * if (get_paca()->hard_enabled) return; 160 * We may have missed a decrementer interrupt. We check the
162 * But again we need to take care that gcc gets hard_enabled directly 161 * decrementer itself rather than the paca irq_happened field
163 * via r13, not choose to use an intermediate register, lest we're 162 * in case we also had a rollover while hard disabled
164 * preempted to a different cpu in between the two instructions. 163 */
164 local_paca->irq_happened &= ~PACA_IRQ_DEC;
165 if (decrementer_check_overflow())
166 return 0x900;
167
168 /* Finally check if an external interrupt happened */
169 local_paca->irq_happened &= ~PACA_IRQ_EE;
170 if (happened & PACA_IRQ_EE)
171 return 0x500;
172
173#ifdef CONFIG_PPC_BOOK3E
174 /* Finally check if an EPR external interrupt happened
175 * this bit is typically set if we need to handle another
176 * "edge" interrupt from within the MPIC "EPR" handler
177 */
178 local_paca->irq_happened &= ~PACA_IRQ_EE_EDGE;
179 if (happened & PACA_IRQ_EE_EDGE)
180 return 0x500;
181
182 local_paca->irq_happened &= ~PACA_IRQ_DBELL;
183 if (happened & PACA_IRQ_DBELL)
184 return 0x280;
185#endif /* CONFIG_PPC_BOOK3E */
186
187 /* There should be nothing left ! */
188 BUG_ON(local_paca->irq_happened != 0);
189
190 return 0;
191}
192
193notrace void arch_local_irq_restore(unsigned long en)
194{
195 unsigned char irq_happened;
196 unsigned int replay;
197
198 /* Write the new soft-enabled value */
199 set_soft_enabled(en);
200 if (!en)
201 return;
202 /*
203 * From this point onward, we can take interrupts, preempt,
204 * etc... unless we got hard-disabled. We check if an event
205 * happened. If none happened, we know we can just return.
206 *
207 * We may have preempted before the check below, in which case
208 * we are checking the "new" CPU instead of the old one. This
209 * is only a problem if an event happened on the "old" CPU.
210 *
211 * External interrupt events will have caused interrupts to
212 * be hard-disabled, so there is no problem, we
213 * cannot have preempted.
165 */ 214 */
166 if (get_hard_enabled()) 215 irq_happened = get_irq_happened();
216 if (!irq_happened)
167 return; 217 return;
168 218
169 /* 219 /*
170 * Need to hard-enable interrupts here. Since currently disabled, 220 * We need to hard disable to get a trusted value from
171 * no need to take further asm precautions against preemption; but 221 * __check_irq_replay(). We also need to soft-disable
172 * use local_paca instead of get_paca() to avoid preemption checking. 222 * again to avoid warnings in there due to the use of
223 * per-cpu variables.
224 *
225 * We know that if the value in irq_happened is exactly 0x01
226 * then we are already hard disabled (there are other less
227 * common cases that we'll ignore for now), so we skip the
228 * (expensive) mtmsrd.
173 */ 229 */
174 local_paca->hard_enabled = en; 230 if (unlikely(irq_happened != PACA_IRQ_HARD_DIS))
231 __hard_irq_disable();
232 set_soft_enabled(0);
175 233
176 /* 234 /*
177 * Trigger the decrementer if we have a pending event. Some processors 235 * Check if anything needs to be re-emitted. We haven't
178 * only trigger on edge transitions of the sign bit. We might also 236 * soft-enabled yet to avoid warnings in decrementer_check_overflow
179 * have disabled interrupts long enough that the decrementer wrapped 237 * accessing per-cpu variables
180 * to positive.
181 */ 238 */
182 decrementer_check_overflow(); 239 replay = __check_irq_replay();
240
241 /* We can soft-enable now */
242 set_soft_enabled(1);
183 243
184 /* 244 /*
185 * Force the delivery of pending soft-disabled interrupts on PS3. 245 * And replay if we have to. This will return with interrupts
186 * Any HV call will have this side effect. 246 * hard-enabled.
187 */ 247 */
188 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 248 if (replay) {
189 u64 tmp, tmp2; 249 __replay_interrupt(replay);
190 lv1_get_version_info(&tmp, &tmp2); 250 return;
191 } 251 }
192 252
253 /* Finally, let's ensure we are hard enabled */
193 __hard_irq_enable(); 254 __hard_irq_enable();
194} 255}
195EXPORT_SYMBOL(arch_local_irq_restore); 256EXPORT_SYMBOL(arch_local_irq_restore);
257
258/*
259 * This is specifically called by assembly code to re-enable interrupts
260 * if they are currently disabled. This is typically called before
261 * schedule() or do_signal() when returning to userspace. We do it
262 * in C to avoid the burden of dealing with lockdep etc...
263 */
264void restore_interrupts(void)
265{
266 if (irqs_disabled())
267 local_irq_enable();
268}
269
196#endif /* CONFIG_PPC64 */ 270#endif /* CONFIG_PPC64 */
197 271
198int arch_show_interrupts(struct seq_file *p, int prec) 272int arch_show_interrupts(struct seq_file *p, int prec)
@@ -360,25 +434,25 @@ void do_IRQ(struct pt_regs *regs)
360 434
361 check_stack_overflow(); 435 check_stack_overflow();
362 436
437 /*
438 * Query the platform PIC for the interrupt & ack it.
439 *
440 * This will typically lower the interrupt line to the CPU
441 */
363 irq = ppc_md.get_irq(); 442 irq = ppc_md.get_irq();
364 443
365 if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) 444 /* We can hard enable interrupts now */
445 may_hard_irq_enable();
446
447 /* And finally process it */
448 if (irq != NO_IRQ)
366 handle_one_irq(irq); 449 handle_one_irq(irq);
367 else if (irq != NO_IRQ_IGNORE) 450 else
368 __get_cpu_var(irq_stat).spurious_irqs++; 451 __get_cpu_var(irq_stat).spurious_irqs++;
369 452
370 irq_exit(); 453 irq_exit();
371 set_irq_regs(old_regs); 454 set_irq_regs(old_regs);
372 455
373#ifdef CONFIG_PPC_ISERIES
374 if (firmware_has_feature(FW_FEATURE_ISERIES) &&
375 get_lppaca()->int_dword.fields.decr_int) {
376 get_lppaca()->int_dword.fields.decr_int = 0;
377 /* Signal a fake decrementer interrupt */
378 timer_interrupt(regs);
379 }
380#endif
381
382 trace_irq_exit(regs); 456 trace_irq_exit(regs);
383} 457}
384 458
@@ -486,409 +560,19 @@ void do_softirq(void)
486 local_irq_restore(flags); 560 local_irq_restore(flags);
487} 561}
488 562
489
490/*
491 * IRQ controller and virtual interrupts
492 */
493
494/* The main irq map itself is an array of NR_IRQ entries containing the
495 * associate host and irq number. An entry with a host of NULL is free.
496 * An entry can be allocated if it's free, the allocator always then sets
497 * hwirq first to the host's invalid irq number and then fills ops.
498 */
499struct irq_map_entry {
500 irq_hw_number_t hwirq;
501 struct irq_host *host;
502};
503
504static LIST_HEAD(irq_hosts);
505static DEFINE_RAW_SPINLOCK(irq_big_lock);
506static DEFINE_MUTEX(revmap_trees_mutex);
507static struct irq_map_entry irq_map[NR_IRQS];
508static unsigned int irq_virq_count = NR_IRQS;
509static struct irq_host *irq_default_host;
510
511irq_hw_number_t irqd_to_hwirq(struct irq_data *d) 563irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
512{ 564{
513 return irq_map[d->irq].hwirq; 565 return d->hwirq;
514} 566}
515EXPORT_SYMBOL_GPL(irqd_to_hwirq); 567EXPORT_SYMBOL_GPL(irqd_to_hwirq);
516 568
517irq_hw_number_t virq_to_hw(unsigned int virq) 569irq_hw_number_t virq_to_hw(unsigned int virq)
518{ 570{
519 return irq_map[virq].hwirq; 571 struct irq_data *irq_data = irq_get_irq_data(virq);
572 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq;
520} 573}
521EXPORT_SYMBOL_GPL(virq_to_hw); 574EXPORT_SYMBOL_GPL(virq_to_hw);
522 575
523bool virq_is_host(unsigned int virq, struct irq_host *host)
524{
525 return irq_map[virq].host == host;
526}
527EXPORT_SYMBOL_GPL(virq_is_host);
528
529static int default_irq_host_match(struct irq_host *h, struct device_node *np)
530{
531 return h->of_node != NULL && h->of_node == np;
532}
533
534struct irq_host *irq_alloc_host(struct device_node *of_node,
535 unsigned int revmap_type,
536 unsigned int revmap_arg,
537 struct irq_host_ops *ops,
538 irq_hw_number_t inval_irq)
539{
540 struct irq_host *host;
541 unsigned int size = sizeof(struct irq_host);
542 unsigned int i;
543 unsigned int *rmap;
544 unsigned long flags;
545
546 /* Allocate structure and revmap table if using linear mapping */
547 if (revmap_type == IRQ_HOST_MAP_LINEAR)
548 size += revmap_arg * sizeof(unsigned int);
549 host = kzalloc(size, GFP_KERNEL);
550 if (host == NULL)
551 return NULL;
552
553 /* Fill structure */
554 host->revmap_type = revmap_type;
555 host->inval_irq = inval_irq;
556 host->ops = ops;
557 host->of_node = of_node_get(of_node);
558
559 if (host->ops->match == NULL)
560 host->ops->match = default_irq_host_match;
561
562 raw_spin_lock_irqsave(&irq_big_lock, flags);
563
564 /* If it's a legacy controller, check for duplicates and
565 * mark it as allocated (we use irq 0 host pointer for that
566 */
567 if (revmap_type == IRQ_HOST_MAP_LEGACY) {
568 if (irq_map[0].host != NULL) {
569 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
570 of_node_put(host->of_node);
571 kfree(host);
572 return NULL;
573 }
574 irq_map[0].host = host;
575 }
576
577 list_add(&host->link, &irq_hosts);
578 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
579
580 /* Additional setups per revmap type */
581 switch(revmap_type) {
582 case IRQ_HOST_MAP_LEGACY:
583 /* 0 is always the invalid number for legacy */
584 host->inval_irq = 0;
585 /* setup us as the host for all legacy interrupts */
586 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
587 irq_map[i].hwirq = i;
588 smp_wmb();
589 irq_map[i].host = host;
590 smp_wmb();
591
592 /* Legacy flags are left to default at this point,
593 * one can then use irq_create_mapping() to
594 * explicitly change them
595 */
596 ops->map(host, i, i);
597
598 /* Clear norequest flags */
599 irq_clear_status_flags(i, IRQ_NOREQUEST);
600 }
601 break;
602 case IRQ_HOST_MAP_LINEAR:
603 rmap = (unsigned int *)(host + 1);
604 for (i = 0; i < revmap_arg; i++)
605 rmap[i] = NO_IRQ;
606 host->revmap_data.linear.size = revmap_arg;
607 smp_wmb();
608 host->revmap_data.linear.revmap = rmap;
609 break;
610 case IRQ_HOST_MAP_TREE:
611 INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL);
612 break;
613 default:
614 break;
615 }
616
617 pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
618
619 return host;
620}
621
622struct irq_host *irq_find_host(struct device_node *node)
623{
624 struct irq_host *h, *found = NULL;
625 unsigned long flags;
626
627 /* We might want to match the legacy controller last since
628 * it might potentially be set to match all interrupts in
629 * the absence of a device node. This isn't a problem so far
630 * yet though...
631 */
632 raw_spin_lock_irqsave(&irq_big_lock, flags);
633 list_for_each_entry(h, &irq_hosts, link)
634 if (h->ops->match(h, node)) {
635 found = h;
636 break;
637 }
638 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
639 return found;
640}
641EXPORT_SYMBOL_GPL(irq_find_host);
642
643void irq_set_default_host(struct irq_host *host)
644{
645 pr_debug("irq: Default host set to @0x%p\n", host);
646
647 irq_default_host = host;
648}
649
650void irq_set_virq_count(unsigned int count)
651{
652 pr_debug("irq: Trying to set virq count to %d\n", count);
653
654 BUG_ON(count < NUM_ISA_INTERRUPTS);
655 if (count < NR_IRQS)
656 irq_virq_count = count;
657}
658
659static int irq_setup_virq(struct irq_host *host, unsigned int virq,
660 irq_hw_number_t hwirq)
661{
662 int res;
663
664 res = irq_alloc_desc_at(virq, 0);
665 if (res != virq) {
666 pr_debug("irq: -> allocating desc failed\n");
667 goto error;
668 }
669
670 /* map it */
671 smp_wmb();
672 irq_map[virq].hwirq = hwirq;
673 smp_mb();
674
675 if (host->ops->map(host, virq, hwirq)) {
676 pr_debug("irq: -> mapping failed, freeing\n");
677 goto errdesc;
678 }
679
680 irq_clear_status_flags(virq, IRQ_NOREQUEST);
681
682 return 0;
683
684errdesc:
685 irq_free_descs(virq, 1);
686error:
687 irq_free_virt(virq, 1);
688 return -1;
689}
690
691unsigned int irq_create_direct_mapping(struct irq_host *host)
692{
693 unsigned int virq;
694
695 if (host == NULL)
696 host = irq_default_host;
697
698 BUG_ON(host == NULL);
699 WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
700
701 virq = irq_alloc_virt(host, 1, 0);
702 if (virq == NO_IRQ) {
703 pr_debug("irq: create_direct virq allocation failed\n");
704 return NO_IRQ;
705 }
706
707 pr_debug("irq: create_direct obtained virq %d\n", virq);
708
709 if (irq_setup_virq(host, virq, virq))
710 return NO_IRQ;
711
712 return virq;
713}
714
715unsigned int irq_create_mapping(struct irq_host *host,
716 irq_hw_number_t hwirq)
717{
718 unsigned int virq, hint;
719
720 pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
721
722 /* Look for default host if nececssary */
723 if (host == NULL)
724 host = irq_default_host;
725 if (host == NULL) {
726 printk(KERN_WARNING "irq_create_mapping called for"
727 " NULL host, hwirq=%lx\n", hwirq);
728 WARN_ON(1);
729 return NO_IRQ;
730 }
731 pr_debug("irq: -> using host @%p\n", host);
732
733 /* Check if mapping already exists */
734 virq = irq_find_mapping(host, hwirq);
735 if (virq != NO_IRQ) {
736 pr_debug("irq: -> existing mapping on virq %d\n", virq);
737 return virq;
738 }
739
740 /* Get a virtual interrupt number */
741 if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
742 /* Handle legacy */
743 virq = (unsigned int)hwirq;
744 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
745 return NO_IRQ;
746 return virq;
747 } else {
748 /* Allocate a virtual interrupt number */
749 hint = hwirq % irq_virq_count;
750 virq = irq_alloc_virt(host, 1, hint);
751 if (virq == NO_IRQ) {
752 pr_debug("irq: -> virq allocation failed\n");
753 return NO_IRQ;
754 }
755 }
756
757 if (irq_setup_virq(host, virq, hwirq))
758 return NO_IRQ;
759
760 pr_debug("irq: irq %lu on host %s mapped to virtual irq %u\n",
761 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
762
763 return virq;
764}
765EXPORT_SYMBOL_GPL(irq_create_mapping);
766
767unsigned int irq_create_of_mapping(struct device_node *controller,
768 const u32 *intspec, unsigned int intsize)
769{
770 struct irq_host *host;
771 irq_hw_number_t hwirq;
772 unsigned int type = IRQ_TYPE_NONE;
773 unsigned int virq;
774
775 if (controller == NULL)
776 host = irq_default_host;
777 else
778 host = irq_find_host(controller);
779 if (host == NULL) {
780 printk(KERN_WARNING "irq: no irq host found for %s !\n",
781 controller->full_name);
782 return NO_IRQ;
783 }
784
785 /* If host has no translation, then we assume interrupt line */
786 if (host->ops->xlate == NULL)
787 hwirq = intspec[0];
788 else {
789 if (host->ops->xlate(host, controller, intspec, intsize,
790 &hwirq, &type))
791 return NO_IRQ;
792 }
793
794 /* Create mapping */
795 virq = irq_create_mapping(host, hwirq);
796 if (virq == NO_IRQ)
797 return virq;
798
799 /* Set type if specified and different than the current one */
800 if (type != IRQ_TYPE_NONE &&
801 type != (irqd_get_trigger_type(irq_get_irq_data(virq))))
802 irq_set_irq_type(virq, type);
803 return virq;
804}
805EXPORT_SYMBOL_GPL(irq_create_of_mapping);
806
807void irq_dispose_mapping(unsigned int virq)
808{
809 struct irq_host *host;
810 irq_hw_number_t hwirq;
811
812 if (virq == NO_IRQ)
813 return;
814
815 host = irq_map[virq].host;
816 if (WARN_ON(host == NULL))
817 return;
818
819 /* Never unmap legacy interrupts */
820 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
821 return;
822
823 irq_set_status_flags(virq, IRQ_NOREQUEST);
824
825 /* remove chip and handler */
826 irq_set_chip_and_handler(virq, NULL, NULL);
827
828 /* Make sure it's completed */
829 synchronize_irq(virq);
830
831 /* Tell the PIC about it */
832 if (host->ops->unmap)
833 host->ops->unmap(host, virq);
834 smp_mb();
835
836 /* Clear reverse map */
837 hwirq = irq_map[virq].hwirq;
838 switch(host->revmap_type) {
839 case IRQ_HOST_MAP_LINEAR:
840 if (hwirq < host->revmap_data.linear.size)
841 host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
842 break;
843 case IRQ_HOST_MAP_TREE:
844 mutex_lock(&revmap_trees_mutex);
845 radix_tree_delete(&host->revmap_data.tree, hwirq);
846 mutex_unlock(&revmap_trees_mutex);
847 break;
848 }
849
850 /* Destroy map */
851 smp_mb();
852 irq_map[virq].hwirq = host->inval_irq;
853
854 irq_free_descs(virq, 1);
855 /* Free it */
856 irq_free_virt(virq, 1);
857}
858EXPORT_SYMBOL_GPL(irq_dispose_mapping);
859
860unsigned int irq_find_mapping(struct irq_host *host,
861 irq_hw_number_t hwirq)
862{
863 unsigned int i;
864 unsigned int hint = hwirq % irq_virq_count;
865
866 /* Look for default host if nececssary */
867 if (host == NULL)
868 host = irq_default_host;
869 if (host == NULL)
870 return NO_IRQ;
871
872 /* legacy -> bail early */
873 if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
874 return hwirq;
875
876 /* Slow path does a linear search of the map */
877 if (hint < NUM_ISA_INTERRUPTS)
878 hint = NUM_ISA_INTERRUPTS;
879 i = hint;
880 do {
881 if (irq_map[i].host == host &&
882 irq_map[i].hwirq == hwirq)
883 return i;
884 i++;
885 if (i >= irq_virq_count)
886 i = NUM_ISA_INTERRUPTS;
887 } while(i != hint);
888 return NO_IRQ;
889}
890EXPORT_SYMBOL_GPL(irq_find_mapping);
891
892#ifdef CONFIG_SMP 576#ifdef CONFIG_SMP
893int irq_choose_cpu(const struct cpumask *mask) 577int irq_choose_cpu(const struct cpumask *mask)
894{ 578{
@@ -925,232 +609,11 @@ int irq_choose_cpu(const struct cpumask *mask)
925} 609}
926#endif 610#endif
927 611
928unsigned int irq_radix_revmap_lookup(struct irq_host *host,
929 irq_hw_number_t hwirq)
930{
931 struct irq_map_entry *ptr;
932 unsigned int virq;
933
934 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE))
935 return irq_find_mapping(host, hwirq);
936
937 /*
938 * The ptr returned references the static global irq_map.
939 * but freeing an irq can delete nodes along the path to
940 * do the lookup via call_rcu.
941 */
942 rcu_read_lock();
943 ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
944 rcu_read_unlock();
945
946 /*
947 * If found in radix tree, then fine.
948 * Else fallback to linear lookup - this should not happen in practice
949 * as it means that we failed to insert the node in the radix tree.
950 */
951 if (ptr)
952 virq = ptr - irq_map;
953 else
954 virq = irq_find_mapping(host, hwirq);
955
956 return virq;
957}
958
959void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
960 irq_hw_number_t hwirq)
961{
962 if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE))
963 return;
964
965 if (virq != NO_IRQ) {
966 mutex_lock(&revmap_trees_mutex);
967 radix_tree_insert(&host->revmap_data.tree, hwirq,
968 &irq_map[virq]);
969 mutex_unlock(&revmap_trees_mutex);
970 }
971}
972
973unsigned int irq_linear_revmap(struct irq_host *host,
974 irq_hw_number_t hwirq)
975{
976 unsigned int *revmap;
977
978 if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR))
979 return irq_find_mapping(host, hwirq);
980
981 /* Check revmap bounds */
982 if (unlikely(hwirq >= host->revmap_data.linear.size))
983 return irq_find_mapping(host, hwirq);
984
985 /* Check if revmap was allocated */
986 revmap = host->revmap_data.linear.revmap;
987 if (unlikely(revmap == NULL))
988 return irq_find_mapping(host, hwirq);
989
990 /* Fill up revmap with slow path if no mapping found */
991 if (unlikely(revmap[hwirq] == NO_IRQ))
992 revmap[hwirq] = irq_find_mapping(host, hwirq);
993
994 return revmap[hwirq];
995}
996
997unsigned int irq_alloc_virt(struct irq_host *host,
998 unsigned int count,
999 unsigned int hint)
1000{
1001 unsigned long flags;
1002 unsigned int i, j, found = NO_IRQ;
1003
1004 if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1005 return NO_IRQ;
1006
1007 raw_spin_lock_irqsave(&irq_big_lock, flags);
1008
1009 /* Use hint for 1 interrupt if any */
1010 if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1011 hint < irq_virq_count && irq_map[hint].host == NULL) {
1012 found = hint;
1013 goto hint_found;
1014 }
1015
1016 /* Look for count consecutive numbers in the allocatable
1017 * (non-legacy) space
1018 */
1019 for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1020 if (irq_map[i].host != NULL)
1021 j = 0;
1022 else
1023 j++;
1024
1025 if (j == count) {
1026 found = i - count + 1;
1027 break;
1028 }
1029 }
1030 if (found == NO_IRQ) {
1031 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1032 return NO_IRQ;
1033 }
1034 hint_found:
1035 for (i = found; i < (found + count); i++) {
1036 irq_map[i].hwirq = host->inval_irq;
1037 smp_wmb();
1038 irq_map[i].host = host;
1039 }
1040 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1041 return found;
1042}
1043
1044void irq_free_virt(unsigned int virq, unsigned int count)
1045{
1046 unsigned long flags;
1047 unsigned int i;
1048
1049 WARN_ON (virq < NUM_ISA_INTERRUPTS);
1050 WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1051
1052 if (virq < NUM_ISA_INTERRUPTS) {
1053 if (virq + count < NUM_ISA_INTERRUPTS)
1054 return;
1055 count =- NUM_ISA_INTERRUPTS - virq;
1056 virq = NUM_ISA_INTERRUPTS;
1057 }
1058
1059 if (count > irq_virq_count || virq > irq_virq_count - count) {
1060 if (virq > irq_virq_count)
1061 return;
1062 count = irq_virq_count - virq;
1063 }
1064
1065 raw_spin_lock_irqsave(&irq_big_lock, flags);
1066 for (i = virq; i < (virq + count); i++) {
1067 struct irq_host *host;
1068
1069 host = irq_map[i].host;
1070 irq_map[i].hwirq = host->inval_irq;
1071 smp_wmb();
1072 irq_map[i].host = NULL;
1073 }
1074 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1075}
1076
1077int arch_early_irq_init(void) 612int arch_early_irq_init(void)
1078{ 613{
1079 return 0; 614 return 0;
1080} 615}
1081 616
1082#ifdef CONFIG_VIRQ_DEBUG
1083static int virq_debug_show(struct seq_file *m, void *private)
1084{
1085 unsigned long flags;
1086 struct irq_desc *desc;
1087 const char *p;
1088 static const char none[] = "none";
1089 void *data;
1090 int i;
1091
1092 seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq",
1093 "chip name", "chip data", "host name");
1094
1095 for (i = 1; i < nr_irqs; i++) {
1096 desc = irq_to_desc(i);
1097 if (!desc)
1098 continue;
1099
1100 raw_spin_lock_irqsave(&desc->lock, flags);
1101
1102 if (desc->action && desc->action->handler) {
1103 struct irq_chip *chip;
1104
1105 seq_printf(m, "%5d ", i);
1106 seq_printf(m, "0x%05lx ", irq_map[i].hwirq);
1107
1108 chip = irq_desc_get_chip(desc);
1109 if (chip && chip->name)
1110 p = chip->name;
1111 else
1112 p = none;
1113 seq_printf(m, "%-15s ", p);
1114
1115 data = irq_desc_get_chip_data(desc);
1116 seq_printf(m, "0x%16p ", data);
1117
1118 if (irq_map[i].host && irq_map[i].host->of_node)
1119 p = irq_map[i].host->of_node->full_name;
1120 else
1121 p = none;
1122 seq_printf(m, "%s\n", p);
1123 }
1124
1125 raw_spin_unlock_irqrestore(&desc->lock, flags);
1126 }
1127
1128 return 0;
1129}
1130
1131static int virq_debug_open(struct inode *inode, struct file *file)
1132{
1133 return single_open(file, virq_debug_show, inode->i_private);
1134}
1135
1136static const struct file_operations virq_debug_fops = {
1137 .open = virq_debug_open,
1138 .read = seq_read,
1139 .llseek = seq_lseek,
1140 .release = single_release,
1141};
1142
1143static int __init irq_debugfs_init(void)
1144{
1145 if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1146 NULL, &virq_debug_fops) == NULL)
1147 return -ENOMEM;
1148
1149 return 0;
1150}
1151__initcall(irq_debugfs_init);
1152#endif /* CONFIG_VIRQ_DEBUG */
1153
1154#ifdef CONFIG_PPC64 617#ifdef CONFIG_PPC64
1155static int __init setup_noirqdistrib(char *str) 618static int __init setup_noirqdistrib(char *str)
1156{ 619{
diff --git a/arch/powerpc/kernel/isa-bridge.c b/arch/powerpc/kernel/isa-bridge.c
index 479752901ec6..d45ec58703ce 100644
--- a/arch/powerpc/kernel/isa-bridge.c
+++ b/arch/powerpc/kernel/isa-bridge.c
@@ -29,7 +29,6 @@
29#include <asm/pci-bridge.h> 29#include <asm/pci-bridge.h>
30#include <asm/machdep.h> 30#include <asm/machdep.h>
31#include <asm/ppc-pci.h> 31#include <asm/ppc-pci.h>
32#include <asm/firmware.h>
33 32
34unsigned long isa_io_base; /* NULL if no ISA bus */ 33unsigned long isa_io_base; /* NULL if no ISA bus */
35EXPORT_SYMBOL(isa_io_base); 34EXPORT_SYMBOL(isa_io_base);
@@ -261,8 +260,6 @@ static struct notifier_block isa_bridge_notifier = {
261 */ 260 */
262static int __init isa_bridge_init(void) 261static int __init isa_bridge_init(void)
263{ 262{
264 if (firmware_has_feature(FW_FEATURE_ISERIES))
265 return 0;
266 bus_register_notifier(&pci_bus_type, &isa_bridge_notifier); 263 bus_register_notifier(&pci_bus_type, &isa_bridge_notifier);
267 return 0; 264 return 0;
268} 265}
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 2985338d0e10..62bdf2389669 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. 2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 * Copyright 2010-2011 Freescale Semiconductor, Inc.
3 * 4 *
4 * Authors: 5 * Authors:
5 * Alexander Graf <agraf@suse.de> 6 * Alexander Graf <agraf@suse.de>
@@ -29,6 +30,7 @@
29#include <asm/sections.h> 30#include <asm/sections.h>
30#include <asm/cacheflush.h> 31#include <asm/cacheflush.h>
31#include <asm/disassemble.h> 32#include <asm/disassemble.h>
33#include <asm/ppc-opcode.h>
32 34
33#define KVM_MAGIC_PAGE (-4096L) 35#define KVM_MAGIC_PAGE (-4096L)
34#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) 36#define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x)
@@ -41,34 +43,30 @@
41#define KVM_INST_B 0x48000000 43#define KVM_INST_B 0x48000000
42#define KVM_INST_B_MASK 0x03ffffff 44#define KVM_INST_B_MASK 0x03ffffff
43#define KVM_INST_B_MAX 0x01ffffff 45#define KVM_INST_B_MAX 0x01ffffff
46#define KVM_INST_LI 0x38000000
44 47
45#define KVM_MASK_RT 0x03e00000 48#define KVM_MASK_RT 0x03e00000
46#define KVM_RT_30 0x03c00000 49#define KVM_RT_30 0x03c00000
47#define KVM_MASK_RB 0x0000f800 50#define KVM_MASK_RB 0x0000f800
48#define KVM_INST_MFMSR 0x7c0000a6 51#define KVM_INST_MFMSR 0x7c0000a6
49#define KVM_INST_MFSPR_SPRG0 0x7c1042a6 52
50#define KVM_INST_MFSPR_SPRG1 0x7c1142a6 53#define SPR_FROM 0
51#define KVM_INST_MFSPR_SPRG2 0x7c1242a6 54#define SPR_TO 0x100
52#define KVM_INST_MFSPR_SPRG3 0x7c1342a6 55
53#define KVM_INST_MFSPR_SRR0 0x7c1a02a6 56#define KVM_INST_SPR(sprn, moveto) (0x7c0002a6 | \
54#define KVM_INST_MFSPR_SRR1 0x7c1b02a6 57 (((sprn) & 0x1f) << 16) | \
55#define KVM_INST_MFSPR_DAR 0x7c1302a6 58 (((sprn) & 0x3e0) << 6) | \
56#define KVM_INST_MFSPR_DSISR 0x7c1202a6 59 (moveto))
57 60
58#define KVM_INST_MTSPR_SPRG0 0x7c1043a6 61#define KVM_INST_MFSPR(sprn) KVM_INST_SPR(sprn, SPR_FROM)
59#define KVM_INST_MTSPR_SPRG1 0x7c1143a6 62#define KVM_INST_MTSPR(sprn) KVM_INST_SPR(sprn, SPR_TO)
60#define KVM_INST_MTSPR_SPRG2 0x7c1243a6
61#define KVM_INST_MTSPR_SPRG3 0x7c1343a6
62#define KVM_INST_MTSPR_SRR0 0x7c1a03a6
63#define KVM_INST_MTSPR_SRR1 0x7c1b03a6
64#define KVM_INST_MTSPR_DAR 0x7c1303a6
65#define KVM_INST_MTSPR_DSISR 0x7c1203a6
66 63
67#define KVM_INST_TLBSYNC 0x7c00046c 64#define KVM_INST_TLBSYNC 0x7c00046c
68#define KVM_INST_MTMSRD_L0 0x7c000164 65#define KVM_INST_MTMSRD_L0 0x7c000164
69#define KVM_INST_MTMSRD_L1 0x7c010164 66#define KVM_INST_MTMSRD_L1 0x7c010164
70#define KVM_INST_MTMSR 0x7c000124 67#define KVM_INST_MTMSR 0x7c000124
71 68
69#define KVM_INST_WRTEE 0x7c000106
72#define KVM_INST_WRTEEI_0 0x7c000146 70#define KVM_INST_WRTEEI_0 0x7c000146
73#define KVM_INST_WRTEEI_1 0x7c008146 71#define KVM_INST_WRTEEI_1 0x7c008146
74 72
@@ -270,26 +268,27 @@ static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt)
270 268
271#ifdef CONFIG_BOOKE 269#ifdef CONFIG_BOOKE
272 270
273extern u32 kvm_emulate_wrteei_branch_offs; 271extern u32 kvm_emulate_wrtee_branch_offs;
274extern u32 kvm_emulate_wrteei_ee_offs; 272extern u32 kvm_emulate_wrtee_reg_offs;
275extern u32 kvm_emulate_wrteei_len; 273extern u32 kvm_emulate_wrtee_orig_ins_offs;
276extern u32 kvm_emulate_wrteei[]; 274extern u32 kvm_emulate_wrtee_len;
275extern u32 kvm_emulate_wrtee[];
277 276
278static void kvm_patch_ins_wrteei(u32 *inst) 277static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one)
279{ 278{
280 u32 *p; 279 u32 *p;
281 int distance_start; 280 int distance_start;
282 int distance_end; 281 int distance_end;
283 ulong next_inst; 282 ulong next_inst;
284 283
285 p = kvm_alloc(kvm_emulate_wrteei_len * 4); 284 p = kvm_alloc(kvm_emulate_wrtee_len * 4);
286 if (!p) 285 if (!p)
287 return; 286 return;
288 287
289 /* Find out where we are and put everything there */ 288 /* Find out where we are and put everything there */
290 distance_start = (ulong)p - (ulong)inst; 289 distance_start = (ulong)p - (ulong)inst;
291 next_inst = ((ulong)inst + 4); 290 next_inst = ((ulong)inst + 4);
292 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs]; 291 distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs];
293 292
294 /* Make sure we only write valid b instructions */ 293 /* Make sure we only write valid b instructions */
295 if (distance_start > KVM_INST_B_MAX) { 294 if (distance_start > KVM_INST_B_MAX) {
@@ -298,10 +297,65 @@ static void kvm_patch_ins_wrteei(u32 *inst)
298 } 297 }
299 298
300 /* Modify the chunk to fit the invocation */ 299 /* Modify the chunk to fit the invocation */
301 memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4); 300 memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4);
302 p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK; 301 p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK;
303 p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE); 302
304 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4); 303 if (imm_one) {
304 p[kvm_emulate_wrtee_reg_offs] =
305 KVM_INST_LI | __PPC_RT(30) | MSR_EE;
306 } else {
307 /* Make clobbered registers work too */
308 switch (get_rt(rt)) {
309 case 30:
310 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
311 magic_var(scratch2), KVM_RT_30);
312 break;
313 case 31:
314 kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs],
315 magic_var(scratch1), KVM_RT_30);
316 break;
317 default:
318 p[kvm_emulate_wrtee_reg_offs] |= rt;
319 break;
320 }
321 }
322
323 p[kvm_emulate_wrtee_orig_ins_offs] = *inst;
324 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4);
325
326 /* Patch the invocation */
327 kvm_patch_ins_b(inst, distance_start);
328}
329
330extern u32 kvm_emulate_wrteei_0_branch_offs;
331extern u32 kvm_emulate_wrteei_0_len;
332extern u32 kvm_emulate_wrteei_0[];
333
334static void kvm_patch_ins_wrteei_0(u32 *inst)
335{
336 u32 *p;
337 int distance_start;
338 int distance_end;
339 ulong next_inst;
340
341 p = kvm_alloc(kvm_emulate_wrteei_0_len * 4);
342 if (!p)
343 return;
344
345 /* Find out where we are and put everything there */
346 distance_start = (ulong)p - (ulong)inst;
347 next_inst = ((ulong)inst + 4);
348 distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_0_branch_offs];
349
350 /* Make sure we only write valid b instructions */
351 if (distance_start > KVM_INST_B_MAX) {
352 kvm_patching_worked = false;
353 return;
354 }
355
356 memcpy(p, kvm_emulate_wrteei_0, kvm_emulate_wrteei_0_len * 4);
357 p[kvm_emulate_wrteei_0_branch_offs] |= distance_end & KVM_INST_B_MASK;
358 flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_0_len * 4);
305 359
306 /* Patch the invocation */ 360 /* Patch the invocation */
307 kvm_patch_ins_b(inst, distance_start); 361 kvm_patch_ins_b(inst, distance_start);
@@ -380,56 +434,191 @@ static void kvm_check_ins(u32 *inst, u32 features)
380 case KVM_INST_MFMSR: 434 case KVM_INST_MFMSR:
381 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); 435 kvm_patch_ins_ld(inst, magic_var(msr), inst_rt);
382 break; 436 break;
383 case KVM_INST_MFSPR_SPRG0: 437 case KVM_INST_MFSPR(SPRN_SPRG0):
384 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); 438 kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt);
385 break; 439 break;
386 case KVM_INST_MFSPR_SPRG1: 440 case KVM_INST_MFSPR(SPRN_SPRG1):
387 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); 441 kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt);
388 break; 442 break;
389 case KVM_INST_MFSPR_SPRG2: 443 case KVM_INST_MFSPR(SPRN_SPRG2):
390 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); 444 kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt);
391 break; 445 break;
392 case KVM_INST_MFSPR_SPRG3: 446 case KVM_INST_MFSPR(SPRN_SPRG3):
393 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); 447 kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt);
394 break; 448 break;
395 case KVM_INST_MFSPR_SRR0: 449 case KVM_INST_MFSPR(SPRN_SRR0):
396 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); 450 kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt);
397 break; 451 break;
398 case KVM_INST_MFSPR_SRR1: 452 case KVM_INST_MFSPR(SPRN_SRR1):
399 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); 453 kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt);
400 break; 454 break;
401 case KVM_INST_MFSPR_DAR: 455#ifdef CONFIG_BOOKE
456 case KVM_INST_MFSPR(SPRN_DEAR):
457#else
458 case KVM_INST_MFSPR(SPRN_DAR):
459#endif
402 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); 460 kvm_patch_ins_ld(inst, magic_var(dar), inst_rt);
403 break; 461 break;
404 case KVM_INST_MFSPR_DSISR: 462 case KVM_INST_MFSPR(SPRN_DSISR):
405 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); 463 kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt);
406 break; 464 break;
407 465
466#ifdef CONFIG_PPC_BOOK3E_MMU
467 case KVM_INST_MFSPR(SPRN_MAS0):
468 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
469 kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt);
470 break;
471 case KVM_INST_MFSPR(SPRN_MAS1):
472 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
473 kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt);
474 break;
475 case KVM_INST_MFSPR(SPRN_MAS2):
476 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
477 kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt);
478 break;
479 case KVM_INST_MFSPR(SPRN_MAS3):
480 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
481 kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt);
482 break;
483 case KVM_INST_MFSPR(SPRN_MAS4):
484 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
485 kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt);
486 break;
487 case KVM_INST_MFSPR(SPRN_MAS6):
488 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
489 kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt);
490 break;
491 case KVM_INST_MFSPR(SPRN_MAS7):
492 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
493 kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt);
494 break;
495#endif /* CONFIG_PPC_BOOK3E_MMU */
496
497 case KVM_INST_MFSPR(SPRN_SPRG4):
498#ifdef CONFIG_BOOKE
499 case KVM_INST_MFSPR(SPRN_SPRG4R):
500#endif
501 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
502 kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt);
503 break;
504 case KVM_INST_MFSPR(SPRN_SPRG5):
505#ifdef CONFIG_BOOKE
506 case KVM_INST_MFSPR(SPRN_SPRG5R):
507#endif
508 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
509 kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt);
510 break;
511 case KVM_INST_MFSPR(SPRN_SPRG6):
512#ifdef CONFIG_BOOKE
513 case KVM_INST_MFSPR(SPRN_SPRG6R):
514#endif
515 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
516 kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt);
517 break;
518 case KVM_INST_MFSPR(SPRN_SPRG7):
519#ifdef CONFIG_BOOKE
520 case KVM_INST_MFSPR(SPRN_SPRG7R):
521#endif
522 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
523 kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt);
524 break;
525
526#ifdef CONFIG_BOOKE
527 case KVM_INST_MFSPR(SPRN_ESR):
528 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
529 kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt);
530 break;
531#endif
532
533 case KVM_INST_MFSPR(SPRN_PIR):
534 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
535 kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt);
536 break;
537
538
408 /* Stores */ 539 /* Stores */
409 case KVM_INST_MTSPR_SPRG0: 540 case KVM_INST_MTSPR(SPRN_SPRG0):
410 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); 541 kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt);
411 break; 542 break;
412 case KVM_INST_MTSPR_SPRG1: 543 case KVM_INST_MTSPR(SPRN_SPRG1):
413 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); 544 kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt);
414 break; 545 break;
415 case KVM_INST_MTSPR_SPRG2: 546 case KVM_INST_MTSPR(SPRN_SPRG2):
416 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); 547 kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt);
417 break; 548 break;
418 case KVM_INST_MTSPR_SPRG3: 549 case KVM_INST_MTSPR(SPRN_SPRG3):
419 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); 550 kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt);
420 break; 551 break;
421 case KVM_INST_MTSPR_SRR0: 552 case KVM_INST_MTSPR(SPRN_SRR0):
422 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); 553 kvm_patch_ins_std(inst, magic_var(srr0), inst_rt);
423 break; 554 break;
424 case KVM_INST_MTSPR_SRR1: 555 case KVM_INST_MTSPR(SPRN_SRR1):
425 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); 556 kvm_patch_ins_std(inst, magic_var(srr1), inst_rt);
426 break; 557 break;
427 case KVM_INST_MTSPR_DAR: 558#ifdef CONFIG_BOOKE
559 case KVM_INST_MTSPR(SPRN_DEAR):
560#else
561 case KVM_INST_MTSPR(SPRN_DAR):
562#endif
428 kvm_patch_ins_std(inst, magic_var(dar), inst_rt); 563 kvm_patch_ins_std(inst, magic_var(dar), inst_rt);
429 break; 564 break;
430 case KVM_INST_MTSPR_DSISR: 565 case KVM_INST_MTSPR(SPRN_DSISR):
431 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); 566 kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt);
432 break; 567 break;
568#ifdef CONFIG_PPC_BOOK3E_MMU
569 case KVM_INST_MTSPR(SPRN_MAS0):
570 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
571 kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt);
572 break;
573 case KVM_INST_MTSPR(SPRN_MAS1):
574 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
575 kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt);
576 break;
577 case KVM_INST_MTSPR(SPRN_MAS2):
578 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
579 kvm_patch_ins_std(inst, magic_var(mas2), inst_rt);
580 break;
581 case KVM_INST_MTSPR(SPRN_MAS3):
582 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
583 kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt);
584 break;
585 case KVM_INST_MTSPR(SPRN_MAS4):
586 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
587 kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt);
588 break;
589 case KVM_INST_MTSPR(SPRN_MAS6):
590 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
591 kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt);
592 break;
593 case KVM_INST_MTSPR(SPRN_MAS7):
594 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
595 kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt);
596 break;
597#endif /* CONFIG_PPC_BOOK3E_MMU */
598
599 case KVM_INST_MTSPR(SPRN_SPRG4):
600 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
601 kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt);
602 break;
603 case KVM_INST_MTSPR(SPRN_SPRG5):
604 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
605 kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt);
606 break;
607 case KVM_INST_MTSPR(SPRN_SPRG6):
608 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
609 kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt);
610 break;
611 case KVM_INST_MTSPR(SPRN_SPRG7):
612 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
613 kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt);
614 break;
615
616#ifdef CONFIG_BOOKE
617 case KVM_INST_MTSPR(SPRN_ESR):
618 if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7)
619 kvm_patch_ins_stw(inst, magic_var(esr), inst_rt);
620 break;
621#endif
433 622
434 /* Nops */ 623 /* Nops */
435 case KVM_INST_TLBSYNC: 624 case KVM_INST_TLBSYNC:
@@ -444,6 +633,11 @@ static void kvm_check_ins(u32 *inst, u32 features)
444 case KVM_INST_MTMSRD_L0: 633 case KVM_INST_MTMSRD_L0:
445 kvm_patch_ins_mtmsr(inst, inst_rt); 634 kvm_patch_ins_mtmsr(inst, inst_rt);
446 break; 635 break;
636#ifdef CONFIG_BOOKE
637 case KVM_INST_WRTEE:
638 kvm_patch_ins_wrtee(inst, inst_rt, 0);
639 break;
640#endif
447 } 641 }
448 642
449 switch (inst_no_rt & ~KVM_MASK_RB) { 643 switch (inst_no_rt & ~KVM_MASK_RB) {
@@ -461,13 +655,19 @@ static void kvm_check_ins(u32 *inst, u32 features)
461 switch (_inst) { 655 switch (_inst) {
462#ifdef CONFIG_BOOKE 656#ifdef CONFIG_BOOKE
463 case KVM_INST_WRTEEI_0: 657 case KVM_INST_WRTEEI_0:
658 kvm_patch_ins_wrteei_0(inst);
659 break;
660
464 case KVM_INST_WRTEEI_1: 661 case KVM_INST_WRTEEI_1:
465 kvm_patch_ins_wrteei(inst); 662 kvm_patch_ins_wrtee(inst, 0, 1);
466 break; 663 break;
467#endif 664#endif
468 } 665 }
469} 666}
470 667
668extern u32 kvm_template_start[];
669extern u32 kvm_template_end[];
670
471static void kvm_use_magic_page(void) 671static void kvm_use_magic_page(void)
472{ 672{
473 u32 *p; 673 u32 *p;
@@ -488,8 +688,23 @@ static void kvm_use_magic_page(void)
488 start = (void*)_stext; 688 start = (void*)_stext;
489 end = (void*)_etext; 689 end = (void*)_etext;
490 690
491 for (p = start; p < end; p++) 691 /*
692 * Being interrupted in the middle of patching would
693 * be bad for SPRG4-7, which KVM can't keep in sync
694 * with emulated accesses because reads don't trap.
695 */
696 local_irq_disable();
697
698 for (p = start; p < end; p++) {
699 /* Avoid patching the template code */
700 if (p >= kvm_template_start && p < kvm_template_end) {
701 p = kvm_template_end - 1;
702 continue;
703 }
492 kvm_check_ins(p, features); 704 kvm_check_ins(p, features);
705 }
706
707 local_irq_enable();
493 708
494 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", 709 printk(KERN_INFO "KVM: Live patching for a fast VM %s\n",
495 kvm_patching_worked ? "worked" : "failed"); 710 kvm_patching_worked ? "worked" : "failed");
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S
index f2b1b2523e61..e291cf3cf954 100644
--- a/arch/powerpc/kernel/kvm_emul.S
+++ b/arch/powerpc/kernel/kvm_emul.S
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright SUSE Linux Products GmbH 2010 15 * Copyright SUSE Linux Products GmbH 2010
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Alexander Graf <agraf@suse.de> 18 * Authors: Alexander Graf <agraf@suse.de>
18 */ 19 */
@@ -65,6 +66,9 @@ kvm_hypercall_start:
65 shared->critical == r1 and r2 is always != r1 */ \ 66 shared->critical == r1 and r2 is always != r1 */ \
66 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); 67 STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0);
67 68
69.global kvm_template_start
70kvm_template_start:
71
68.global kvm_emulate_mtmsrd 72.global kvm_emulate_mtmsrd
69kvm_emulate_mtmsrd: 73kvm_emulate_mtmsrd:
70 74
@@ -167,6 +171,9 @@ maybe_stay_in_guest:
167kvm_emulate_mtmsr_reg2: 171kvm_emulate_mtmsr_reg2:
168 ori r30, r0, 0 172 ori r30, r0, 0
169 173
174 /* Put MSR into magic page because we don't call mtmsr */
175 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
176
170 /* Check if we have to fetch an interrupt */ 177 /* Check if we have to fetch an interrupt */
171 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) 178 lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
172 cmpwi r31, 0 179 cmpwi r31, 0
@@ -174,15 +181,10 @@ kvm_emulate_mtmsr_reg2:
174 181
175 /* Check if we may trigger an interrupt */ 182 /* Check if we may trigger an interrupt */
176 andi. r31, r30, MSR_EE 183 andi. r31, r30, MSR_EE
177 beq no_mtmsr 184 bne do_mtmsr
178
179 b do_mtmsr
180 185
181no_mtmsr: 186no_mtmsr:
182 187
183 /* Put MSR into magic page because we don't call mtmsr */
184 STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
185
186 SCRATCH_RESTORE 188 SCRATCH_RESTORE
187 189
188 /* Go back to caller */ 190 /* Go back to caller */
@@ -210,24 +212,80 @@ kvm_emulate_mtmsr_orig_ins_offs:
210kvm_emulate_mtmsr_len: 212kvm_emulate_mtmsr_len:
211 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 213 .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4
212 214
215/* also used for wrteei 1 */
216.global kvm_emulate_wrtee
217kvm_emulate_wrtee:
218
219 SCRATCH_SAVE
220
221 /* Fetch old MSR in r31 */
222 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
223
224 /* Insert new MSR[EE] */
225kvm_emulate_wrtee_reg:
226 ori r30, r0, 0
227 rlwimi r31, r30, 0, MSR_EE
228
229 /*
230 * If MSR[EE] is now set, check for a pending interrupt.
231 * We could skip this if MSR[EE] was already on, but that
232 * should be rare, so don't bother.
233 */
234 andi. r30, r30, MSR_EE
235
236 /* Put MSR into magic page because we don't call wrtee */
237 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
238
239 beq no_wrtee
240
241 /* Check if we have to fetch an interrupt */
242 lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0)
243 cmpwi r30, 0
244 bne do_wrtee
245
246no_wrtee:
247 SCRATCH_RESTORE
248
249 /* Go back to caller */
250kvm_emulate_wrtee_branch:
251 b .
252
253do_wrtee:
254 SCRATCH_RESTORE
213 255
256 /* Just fire off the wrtee if it's critical */
257kvm_emulate_wrtee_orig_ins:
258 wrtee r0
214 259
215.global kvm_emulate_wrteei 260 b kvm_emulate_wrtee_branch
216kvm_emulate_wrteei:
217 261
262kvm_emulate_wrtee_end:
263
264.global kvm_emulate_wrtee_branch_offs
265kvm_emulate_wrtee_branch_offs:
266 .long (kvm_emulate_wrtee_branch - kvm_emulate_wrtee) / 4
267
268.global kvm_emulate_wrtee_reg_offs
269kvm_emulate_wrtee_reg_offs:
270 .long (kvm_emulate_wrtee_reg - kvm_emulate_wrtee) / 4
271
272.global kvm_emulate_wrtee_orig_ins_offs
273kvm_emulate_wrtee_orig_ins_offs:
274 .long (kvm_emulate_wrtee_orig_ins - kvm_emulate_wrtee) / 4
275
276.global kvm_emulate_wrtee_len
277kvm_emulate_wrtee_len:
278 .long (kvm_emulate_wrtee_end - kvm_emulate_wrtee) / 4
279
280.global kvm_emulate_wrteei_0
281kvm_emulate_wrteei_0:
218 SCRATCH_SAVE 282 SCRATCH_SAVE
219 283
220 /* Fetch old MSR in r31 */ 284 /* Fetch old MSR in r31 */
221 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 285 LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
222 286
223 /* Remove MSR_EE from old MSR */ 287 /* Remove MSR_EE from old MSR */
224 li r30, 0 288 rlwinm r31, r31, 0, ~MSR_EE
225 ori r30, r30, MSR_EE
226 andc r31, r31, r30
227
228 /* OR new MSR_EE onto the old MSR */
229kvm_emulate_wrteei_ee:
230 ori r31, r31, 0
231 289
232 /* Write new MSR value back */ 290 /* Write new MSR value back */
233 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) 291 STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0)
@@ -235,22 +293,17 @@ kvm_emulate_wrteei_ee:
235 SCRATCH_RESTORE 293 SCRATCH_RESTORE
236 294
237 /* Go back to caller */ 295 /* Go back to caller */
238kvm_emulate_wrteei_branch: 296kvm_emulate_wrteei_0_branch:
239 b . 297 b .
240kvm_emulate_wrteei_end: 298kvm_emulate_wrteei_0_end:
241
242.global kvm_emulate_wrteei_branch_offs
243kvm_emulate_wrteei_branch_offs:
244 .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4
245 299
246.global kvm_emulate_wrteei_ee_offs 300.global kvm_emulate_wrteei_0_branch_offs
247kvm_emulate_wrteei_ee_offs: 301kvm_emulate_wrteei_0_branch_offs:
248 .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4 302 .long (kvm_emulate_wrteei_0_branch - kvm_emulate_wrteei_0) / 4
249
250.global kvm_emulate_wrteei_len
251kvm_emulate_wrteei_len:
252 .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4
253 303
304.global kvm_emulate_wrteei_0_len
305kvm_emulate_wrteei_0_len:
306 .long (kvm_emulate_wrteei_0_end - kvm_emulate_wrteei_0) / 4
254 307
255.global kvm_emulate_mtsrin 308.global kvm_emulate_mtsrin
256kvm_emulate_mtsrin: 309kvm_emulate_mtsrin:
@@ -300,3 +353,6 @@ kvm_emulate_mtsrin_orig_ins_offs:
300.global kvm_emulate_mtsrin_len 353.global kvm_emulate_mtsrin_len
301kvm_emulate_mtsrin_len: 354kvm_emulate_mtsrin_len:
302 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 355 .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4
356
357.global kvm_template_end
358kvm_template_end:
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c
index 3fea3689527e..bedd12e1cfbc 100644
--- a/arch/powerpc/kernel/legacy_serial.c
+++ b/arch/powerpc/kernel/legacy_serial.c
@@ -442,8 +442,10 @@ static void __init fixup_port_irq(int index,
442 442
443 port->irq = virq; 443 port->irq = virq;
444 444
445#ifdef CONFIG_SERIAL_8250_FSL
445 if (of_device_is_compatible(np, "fsl,ns16550")) 446 if (of_device_is_compatible(np, "fsl,ns16550"))
446 port->handle_irq = fsl8250_handle_irq; 447 port->handle_irq = fsl8250_handle_irq;
448#endif
447} 449}
448 450
449static void __init fixup_port_pio(int index, 451static void __init fixup_port_pio(int index,
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 578f35f18723..ac12bd80ad95 100644
--- a/arch/powerpc/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -26,7 +26,6 @@
26#include <linux/seq_file.h> 26#include <linux/seq_file.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <asm/uaccess.h> 28#include <asm/uaccess.h>
29#include <asm/iseries/hv_lp_config.h>
30#include <asm/lppaca.h> 29#include <asm/lppaca.h>
31#include <asm/hvcall.h> 30#include <asm/hvcall.h>
32#include <asm/firmware.h> 31#include <asm/firmware.h>
@@ -55,80 +54,14 @@ static unsigned long get_purr(void)
55 int cpu; 54 int cpu;
56 55
57 for_each_possible_cpu(cpu) { 56 for_each_possible_cpu(cpu) {
58 if (firmware_has_feature(FW_FEATURE_ISERIES)) 57 struct cpu_usage *cu;
59 sum_purr += lppaca_of(cpu).emulated_time_base;
60 else {
61 struct cpu_usage *cu;
62 58
63 cu = &per_cpu(cpu_usage_array, cpu); 59 cu = &per_cpu(cpu_usage_array, cpu);
64 sum_purr += cu->current_tb; 60 sum_purr += cu->current_tb;
65 }
66 } 61 }
67 return sum_purr; 62 return sum_purr;
68} 63}
69 64
70#ifdef CONFIG_PPC_ISERIES
71
72/*
73 * Methods used to fetch LPAR data when running on an iSeries platform.
74 */
75static int iseries_lparcfg_data(struct seq_file *m, void *v)
76{
77 unsigned long pool_id;
78 int shared, entitled_capacity, max_entitled_capacity;
79 int processors, max_processors;
80 unsigned long purr = get_purr();
81
82 shared = (int)(local_paca->lppaca_ptr->shared_proc);
83
84 seq_printf(m, "system_active_processors=%d\n",
85 (int)HvLpConfig_getSystemPhysicalProcessors());
86
87 seq_printf(m, "system_potential_processors=%d\n",
88 (int)HvLpConfig_getSystemPhysicalProcessors());
89
90 processors = (int)HvLpConfig_getPhysicalProcessors();
91 seq_printf(m, "partition_active_processors=%d\n", processors);
92
93 max_processors = (int)HvLpConfig_getMaxPhysicalProcessors();
94 seq_printf(m, "partition_potential_processors=%d\n", max_processors);
95
96 if (shared) {
97 entitled_capacity = HvLpConfig_getSharedProcUnits();
98 max_entitled_capacity = HvLpConfig_getMaxSharedProcUnits();
99 } else {
100 entitled_capacity = processors * 100;
101 max_entitled_capacity = max_processors * 100;
102 }
103 seq_printf(m, "partition_entitled_capacity=%d\n", entitled_capacity);
104
105 seq_printf(m, "partition_max_entitled_capacity=%d\n",
106 max_entitled_capacity);
107
108 if (shared) {
109 pool_id = HvLpConfig_getSharedPoolIndex();
110 seq_printf(m, "pool=%d\n", (int)pool_id);
111 seq_printf(m, "pool_capacity=%d\n",
112 (int)(HvLpConfig_getNumProcsInSharedPool(pool_id) *
113 100));
114 seq_printf(m, "purr=%ld\n", purr);
115 }
116
117 seq_printf(m, "shared_processor_mode=%d\n", shared);
118
119 return 0;
120}
121
122#else /* CONFIG_PPC_ISERIES */
123
124static int iseries_lparcfg_data(struct seq_file *m, void *v)
125{
126 return 0;
127}
128
129#endif /* CONFIG_PPC_ISERIES */
130
131#ifdef CONFIG_PPC_PSERIES
132/* 65/*
133 * Methods used to fetch LPAR data when running on a pSeries platform. 66 * Methods used to fetch LPAR data when running on a pSeries platform.
134 */ 67 */
@@ -648,8 +581,7 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
648 u8 new_weight, *new_weight_ptr = &new_weight; 581 u8 new_weight, *new_weight_ptr = &new_weight;
649 ssize_t retval; 582 ssize_t retval;
650 583
651 if (!firmware_has_feature(FW_FEATURE_SPLPAR) || 584 if (!firmware_has_feature(FW_FEATURE_SPLPAR))
652 firmware_has_feature(FW_FEATURE_ISERIES))
653 return -EINVAL; 585 return -EINVAL;
654 586
655 if (count > kbuf_sz) 587 if (count > kbuf_sz)
@@ -709,21 +641,6 @@ static ssize_t lparcfg_write(struct file *file, const char __user * buf,
709 return retval; 641 return retval;
710} 642}
711 643
712#else /* CONFIG_PPC_PSERIES */
713
714static int pseries_lparcfg_data(struct seq_file *m, void *v)
715{
716 return 0;
717}
718
719static ssize_t lparcfg_write(struct file *file, const char __user * buf,
720 size_t count, loff_t * off)
721{
722 return -EINVAL;
723}
724
725#endif /* CONFIG_PPC_PSERIES */
726
727static int lparcfg_data(struct seq_file *m, void *v) 644static int lparcfg_data(struct seq_file *m, void *v)
728{ 645{
729 struct device_node *rootdn; 646 struct device_node *rootdn;
@@ -738,19 +655,11 @@ static int lparcfg_data(struct seq_file *m, void *v)
738 rootdn = of_find_node_by_path("/"); 655 rootdn = of_find_node_by_path("/");
739 if (rootdn) { 656 if (rootdn) {
740 tmp = of_get_property(rootdn, "model", NULL); 657 tmp = of_get_property(rootdn, "model", NULL);
741 if (tmp) { 658 if (tmp)
742 model = tmp; 659 model = tmp;
743 /* Skip "IBM," - see platforms/iseries/dt.c */
744 if (firmware_has_feature(FW_FEATURE_ISERIES))
745 model += 4;
746 }
747 tmp = of_get_property(rootdn, "system-id", NULL); 660 tmp = of_get_property(rootdn, "system-id", NULL);
748 if (tmp) { 661 if (tmp)
749 system_id = tmp; 662 system_id = tmp;
750 /* Skip "IBM," - see platforms/iseries/dt.c */
751 if (firmware_has_feature(FW_FEATURE_ISERIES))
752 system_id += 4;
753 }
754 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no", 663 lp_index_ptr = of_get_property(rootdn, "ibm,partition-no",
755 NULL); 664 NULL);
756 if (lp_index_ptr) 665 if (lp_index_ptr)
@@ -761,8 +670,6 @@ static int lparcfg_data(struct seq_file *m, void *v)
761 seq_printf(m, "system_type=%s\n", model); 670 seq_printf(m, "system_type=%s\n", model);
762 seq_printf(m, "partition_id=%d\n", (int)lp_index); 671 seq_printf(m, "partition_id=%d\n", (int)lp_index);
763 672
764 if (firmware_has_feature(FW_FEATURE_ISERIES))
765 return iseries_lparcfg_data(m, v);
766 return pseries_lparcfg_data(m, v); 673 return pseries_lparcfg_data(m, v);
767} 674}
768 675
@@ -786,8 +693,7 @@ static int __init lparcfg_init(void)
786 umode_t mode = S_IRUSR | S_IRGRP | S_IROTH; 693 umode_t mode = S_IRUSR | S_IRGRP | S_IROTH;
787 694
788 /* Allow writing if we have FW_FEATURE_SPLPAR */ 695 /* Allow writing if we have FW_FEATURE_SPLPAR */
789 if (firmware_has_feature(FW_FEATURE_SPLPAR) && 696 if (firmware_has_feature(FW_FEATURE_SPLPAR))
790 !firmware_has_feature(FW_FEATURE_ISERIES))
791 mode |= S_IWUSR; 697 mode |= S_IWUSR;
792 698
793 ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops); 699 ent = proc_create("powerpc/lparcfg", mode, NULL, &lparcfg_fops);
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index b69463ec2010..ba16874fe294 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -5,7 +5,6 @@
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) 5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras. 6 * and Paul Mackerras.
7 * 7 *
8 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
9 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com) 8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
10 * 9 *
11 * setjmp/longjmp code by Paul Mackerras. 10 * setjmp/longjmp code by Paul Mackerras.
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c
index e1612dfb4a93..2049f2d00ffe 100644
--- a/arch/powerpc/kernel/of_platform.c
+++ b/arch/powerpc/kernel/of_platform.c
@@ -21,12 +21,13 @@
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_device.h> 22#include <linux/of_device.h>
23#include <linux/of_platform.h> 23#include <linux/of_platform.h>
24#include <linux/atomic.h>
24 25
25#include <asm/errno.h> 26#include <asm/errno.h>
26#include <asm/topology.h> 27#include <asm/topology.h>
27#include <asm/pci-bridge.h> 28#include <asm/pci-bridge.h>
28#include <asm/ppc-pci.h> 29#include <asm/ppc-pci.h>
29#include <linux/atomic.h> 30#include <asm/eeh.h>
30 31
31#ifdef CONFIG_PPC_OF_PLATFORM_PCI 32#ifdef CONFIG_PPC_OF_PLATFORM_PCI
32 33
@@ -66,6 +67,9 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev)
66 /* Init pci_dn data structures */ 67 /* Init pci_dn data structures */
67 pci_devs_phb_init_dynamic(phb); 68 pci_devs_phb_init_dynamic(phb);
68 69
70 /* Create EEH devices for the PHB */
71 eeh_dev_phb_init_dynamic(phb);
72
69 /* Register devices with EEH */ 73 /* Register devices with EEH */
70#ifdef CONFIG_EEH 74#ifdef CONFIG_EEH
71 if (dev->dev.of_node->child) 75 if (dev->dev.of_node->child)
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c
index 41456ff55e14..0bb1f98613ba 100644
--- a/arch/powerpc/kernel/paca.c
+++ b/arch/powerpc/kernel/paca.c
@@ -11,13 +11,10 @@
11#include <linux/export.h> 11#include <linux/export.h>
12#include <linux/memblock.h> 12#include <linux/memblock.h>
13 13
14#include <asm/firmware.h>
15#include <asm/lppaca.h> 14#include <asm/lppaca.h>
16#include <asm/paca.h> 15#include <asm/paca.h>
17#include <asm/sections.h> 16#include <asm/sections.h>
18#include <asm/pgtable.h> 17#include <asm/pgtable.h>
19#include <asm/iseries/lpar_map.h>
20#include <asm/iseries/hv_types.h>
21#include <asm/kexec.h> 18#include <asm/kexec.h>
22 19
23/* This symbol is provided by the linker - let it fill in the paca 20/* This symbol is provided by the linker - let it fill in the paca
@@ -30,8 +27,8 @@ extern unsigned long __toc_start;
30 * The structure which the hypervisor knows about - this structure 27 * The structure which the hypervisor knows about - this structure
31 * should not cross a page boundary. The vpa_init/register_vpa call 28 * should not cross a page boundary. The vpa_init/register_vpa call
32 * is now known to fail if the lppaca structure crosses a page 29 * is now known to fail if the lppaca structure crosses a page
33 * boundary. The lppaca is also used on legacy iSeries and POWER5 30 * boundary. The lppaca is also used on POWER5 pSeries boxes.
34 * pSeries boxes. The lppaca is 640 bytes long, and cannot readily 31 * The lppaca is 640 bytes long, and cannot readily
35 * change since the hypervisor knows its layout, so a 1kB alignment 32 * change since the hypervisor knows its layout, so a 1kB alignment
36 * will suffice to ensure that it doesn't cross a page boundary. 33 * will suffice to ensure that it doesn't cross a page boundary.
37 */ 34 */
@@ -183,12 +180,9 @@ void __init allocate_pacas(void)
183 /* 180 /*
184 * We can't take SLB misses on the paca, and we want to access them 181 * We can't take SLB misses on the paca, and we want to access them
185 * in real mode, so allocate them within the RMA and also within 182 * in real mode, so allocate them within the RMA and also within
186 * the first segment. On iSeries they must be within the area mapped 183 * the first segment.
187 * by the HV, which is HvPagesToMap * HVPAGESIZE bytes.
188 */ 184 */
189 limit = min(0x10000000ULL, ppc64_rma_size); 185 limit = min(0x10000000ULL, ppc64_rma_size);
190 if (firmware_has_feature(FW_FEATURE_ISERIES))
191 limit = min(limit, HvPagesToMap * HVPAGESIZE);
192 186
193 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); 187 paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids);
194 188
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index cce98d76e905..8e78e93c8185 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -38,7 +38,6 @@
38#include <asm/byteorder.h> 38#include <asm/byteorder.h>
39#include <asm/machdep.h> 39#include <asm/machdep.h>
40#include <asm/ppc-pci.h> 40#include <asm/ppc-pci.h>
41#include <asm/firmware.h>
42#include <asm/eeh.h> 41#include <asm/eeh.h>
43 42
44static DEFINE_SPINLOCK(hose_spinlock); 43static DEFINE_SPINLOCK(hose_spinlock);
@@ -50,9 +49,6 @@ static int global_phb_number; /* Global phb counter */
50/* ISA Memory physical address */ 49/* ISA Memory physical address */
51resource_size_t isa_mem_base; 50resource_size_t isa_mem_base;
52 51
53/* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */
54unsigned int pci_flags = 0;
55
56 52
57static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; 53static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
58 54
@@ -219,20 +215,6 @@ static int pci_read_irq_line(struct pci_dev *pci_dev)
219 struct of_irq oirq; 215 struct of_irq oirq;
220 unsigned int virq; 216 unsigned int virq;
221 217
222 /* The current device-tree that iSeries generates from the HV
223 * PCI informations doesn't contain proper interrupt routing,
224 * and all the fallback would do is print out crap, so we
225 * don't attempt to resolve the interrupts here at all, some
226 * iSeries specific fixup does it.
227 *
228 * In the long run, we will hopefully fix the generated device-tree
229 * instead.
230 */
231#ifdef CONFIG_PPC_ISERIES
232 if (firmware_has_feature(FW_FEATURE_ISERIES))
233 return -1;
234#endif
235
236 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev)); 218 pr_debug("PCI: Try to map irq for %s...\n", pci_name(pci_dev));
237 219
238#ifdef DEBUG 220#ifdef DEBUG
@@ -849,60 +831,6 @@ int pci_proc_domain(struct pci_bus *bus)
849 return 1; 831 return 1;
850} 832}
851 833
852void pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
853 struct resource *res)
854{
855 resource_size_t offset = 0, mask = (resource_size_t)-1;
856 struct pci_controller *hose = pci_bus_to_host(dev->bus);
857
858 if (!hose)
859 return;
860 if (res->flags & IORESOURCE_IO) {
861 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
862 mask = 0xffffffffu;
863 } else if (res->flags & IORESOURCE_MEM)
864 offset = hose->pci_mem_offset;
865
866 region->start = (res->start - offset) & mask;
867 region->end = (res->end - offset) & mask;
868}
869EXPORT_SYMBOL(pcibios_resource_to_bus);
870
871void pcibios_bus_to_resource(struct pci_dev *dev, struct resource *res,
872 struct pci_bus_region *region)
873{
874 resource_size_t offset = 0, mask = (resource_size_t)-1;
875 struct pci_controller *hose = pci_bus_to_host(dev->bus);
876
877 if (!hose)
878 return;
879 if (res->flags & IORESOURCE_IO) {
880 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
881 mask = 0xffffffffu;
882 } else if (res->flags & IORESOURCE_MEM)
883 offset = hose->pci_mem_offset;
884 res->start = (region->start + offset) & mask;
885 res->end = (region->end + offset) & mask;
886}
887EXPORT_SYMBOL(pcibios_bus_to_resource);
888
889/* Fixup a bus resource into a linux resource */
890static void __devinit fixup_resource(struct resource *res, struct pci_dev *dev)
891{
892 struct pci_controller *hose = pci_bus_to_host(dev->bus);
893 resource_size_t offset = 0, mask = (resource_size_t)-1;
894
895 if (res->flags & IORESOURCE_IO) {
896 offset = (unsigned long)hose->io_base_virt - _IO_BASE;
897 mask = 0xffffffffu;
898 } else if (res->flags & IORESOURCE_MEM)
899 offset = hose->pci_mem_offset;
900
901 res->start = (res->start + offset) & mask;
902 res->end = (res->end + offset) & mask;
903}
904
905
906/* This header fixup will do the resource fixup for all devices as they are 834/* This header fixup will do the resource fixup for all devices as they are
907 * probed, but not for bridge ranges 835 * probed, but not for bridge ranges
908 */ 836 */
@@ -942,18 +870,11 @@ static void __devinit pcibios_fixup_resources(struct pci_dev *dev)
942 continue; 870 continue;
943 } 871 }
944 872
945 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x] fixup...\n", 873 pr_debug("PCI:%s Resource %d %016llx-%016llx [%x]\n",
946 pci_name(dev), i, 874 pci_name(dev), i,
947 (unsigned long long)res->start,\ 875 (unsigned long long)res->start,\
948 (unsigned long long)res->end, 876 (unsigned long long)res->end,
949 (unsigned int)res->flags); 877 (unsigned int)res->flags);
950
951 fixup_resource(res, dev);
952
953 pr_debug("PCI:%s %016llx-%016llx\n",
954 pci_name(dev),
955 (unsigned long long)res->start,
956 (unsigned long long)res->end);
957 } 878 }
958 879
959 /* Call machine specific resource fixup */ 880 /* Call machine specific resource fixup */
@@ -1055,27 +976,18 @@ static void __devinit pcibios_fixup_bridge(struct pci_bus *bus)
1055 continue; 976 continue;
1056 } 977 }
1057 978
1058 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x] fixup...\n", 979 pr_debug("PCI:%s Bus rsrc %d %016llx-%016llx [%x]\n",
1059 pci_name(dev), i, 980 pci_name(dev), i,
1060 (unsigned long long)res->start,\ 981 (unsigned long long)res->start,\
1061 (unsigned long long)res->end, 982 (unsigned long long)res->end,
1062 (unsigned int)res->flags); 983 (unsigned int)res->flags);
1063 984
1064 /* Perform fixup */
1065 fixup_resource(res, dev);
1066
1067 /* Try to detect uninitialized P2P bridge resources, 985 /* Try to detect uninitialized P2P bridge resources,
1068 * and clear them out so they get re-assigned later 986 * and clear them out so they get re-assigned later
1069 */ 987 */
1070 if (pcibios_uninitialized_bridge_resource(bus, res)) { 988 if (pcibios_uninitialized_bridge_resource(bus, res)) {
1071 res->flags = 0; 989 res->flags = 0;
1072 pr_debug("PCI:%s (unassigned)\n", pci_name(dev)); 990 pr_debug("PCI:%s (unassigned)\n", pci_name(dev));
1073 } else {
1074
1075 pr_debug("PCI:%s %016llx-%016llx\n",
1076 pci_name(dev),
1077 (unsigned long long)res->start,
1078 (unsigned long long)res->end);
1079 } 991 }
1080 } 992 }
1081} 993}
@@ -1565,6 +1477,11 @@ int pcibios_enable_device(struct pci_dev *dev, int mask)
1565 return pci_enable_resources(dev, mask); 1477 return pci_enable_resources(dev, mask);
1566} 1478}
1567 1479
1480resource_size_t pcibios_io_space_offset(struct pci_controller *hose)
1481{
1482 return (unsigned long) hose->io_base_virt - _IO_BASE;
1483}
1484
1568static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources) 1485static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, struct list_head *resources)
1569{ 1486{
1570 struct resource *res; 1487 struct resource *res;
@@ -1589,7 +1506,7 @@ static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, s
1589 (unsigned long long)res->start, 1506 (unsigned long long)res->start,
1590 (unsigned long long)res->end, 1507 (unsigned long long)res->end,
1591 (unsigned long)res->flags); 1508 (unsigned long)res->flags);
1592 pci_add_resource(resources, res); 1509 pci_add_resource_offset(resources, res, pcibios_io_space_offset(hose));
1593 1510
1594 /* Hookup PHB Memory resources */ 1511 /* Hookup PHB Memory resources */
1595 for (i = 0; i < 3; ++i) { 1512 for (i = 0; i < 3; ++i) {
@@ -1612,7 +1529,7 @@ static void __devinit pcibios_setup_phb_resources(struct pci_controller *hose, s
1612 (unsigned long long)res->start, 1529 (unsigned long long)res->start,
1613 (unsigned long long)res->end, 1530 (unsigned long long)res->end,
1614 (unsigned long)res->flags); 1531 (unsigned long)res->flags);
1615 pci_add_resource(resources, res); 1532 pci_add_resource_offset(resources, res, hose->pci_mem_offset);
1616 } 1533 }
1617 1534
1618 pr_debug("PCI: PHB MEM offset = %016llx\n", 1535 pr_debug("PCI: PHB MEM offset = %016llx\n",
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index fdd1a3d951dc..4b06ec5a502e 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -219,9 +219,9 @@ void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose)
219 struct resource *res = &hose->io_resource; 219 struct resource *res = &hose->io_resource;
220 220
221 /* Fixup IO space offset */ 221 /* Fixup IO space offset */
222 io_offset = (unsigned long)hose->io_base_virt - isa_io_base; 222 io_offset = pcibios_io_space_offset(hose);
223 res->start = (res->start + io_offset) & 0xffffffffu; 223 res->start += io_offset;
224 res->end = (res->end + io_offset) & 0xffffffffu; 224 res->end += io_offset;
225} 225}
226 226
227static int __init pcibios_init(void) 227static int __init pcibios_init(void)
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 3318d39b7d4c..94a54f61d341 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -33,8 +33,6 @@
33#include <asm/machdep.h> 33#include <asm/machdep.h>
34#include <asm/ppc-pci.h> 34#include <asm/ppc-pci.h>
35 35
36unsigned long pci_probe_only = 1;
37
38/* pci_io_base -- the base address from which io bars are offsets. 36/* pci_io_base -- the base address from which io bars are offsets.
39 * This is the lowest I/O base address (so bar values are always positive), 37 * This is the lowest I/O base address (so bar values are always positive),
40 * and it *must* be the start of ISA space if an ISA bus exists because 38 * and it *must* be the start of ISA space if an ISA bus exists because
@@ -55,9 +53,6 @@ static int __init pcibios_init(void)
55 */ 53 */
56 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; 54 ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot;
57 55
58 if (pci_probe_only)
59 pci_add_flags(PCI_PROBE_ONLY);
60
61 /* On ppc64, we always enable PCI domains and we keep domain 0 56 /* On ppc64, we always enable PCI domains and we keep domain 0
62 * backward compatible in /proc for video cards 57 * backward compatible in /proc for video cards
63 */ 58 */
@@ -173,7 +168,7 @@ static int __devinit pcibios_map_phb_io_space(struct pci_controller *hose)
173 return -ENOMEM; 168 return -ENOMEM;
174 169
175 /* Fixup hose IO resource */ 170 /* Fixup hose IO resource */
176 io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE; 171 io_virt_offset = pcibios_io_space_offset(hose);
177 hose->io_resource.start += io_virt_offset; 172 hose->io_resource.start += io_virt_offset;
178 hose->io_resource.end += io_virt_offset; 173 hose->io_resource.end += io_virt_offset;
179 174
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c
index b37d0b5a796e..89dde171a6fa 100644
--- a/arch/powerpc/kernel/pci_of_scan.c
+++ b/arch/powerpc/kernel/pci_of_scan.c
@@ -75,6 +75,7 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
75{ 75{
76 u64 base, size; 76 u64 base, size;
77 unsigned int flags; 77 unsigned int flags;
78 struct pci_bus_region region;
78 struct resource *res; 79 struct resource *res;
79 const u32 *addrs; 80 const u32 *addrs;
80 u32 i; 81 u32 i;
@@ -106,10 +107,11 @@ static void of_pci_parse_addrs(struct device_node *node, struct pci_dev *dev)
106 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i); 107 printk(KERN_ERR "PCI: bad cfg reg num 0x%x\n", i);
107 continue; 108 continue;
108 } 109 }
109 res->start = base;
110 res->end = base + size - 1;
111 res->flags = flags; 110 res->flags = flags;
112 res->name = pci_name(dev); 111 res->name = pci_name(dev);
112 region.start = base;
113 region.end = base + size - 1;
114 pcibios_bus_to_resource(dev, res, &region);
113 } 115 }
114} 116}
115 117
@@ -209,6 +211,7 @@ void __devinit of_scan_pci_bridge(struct pci_dev *dev)
209 struct pci_bus *bus; 211 struct pci_bus *bus;
210 const u32 *busrange, *ranges; 212 const u32 *busrange, *ranges;
211 int len, i, mode; 213 int len, i, mode;
214 struct pci_bus_region region;
212 struct resource *res; 215 struct resource *res;
213 unsigned int flags; 216 unsigned int flags;
214 u64 size; 217 u64 size;
@@ -270,9 +273,10 @@ void __devinit of_scan_pci_bridge(struct pci_dev *dev)
270 res = bus->resource[i]; 273 res = bus->resource[i];
271 ++i; 274 ++i;
272 } 275 }
273 res->start = of_read_number(&ranges[1], 2);
274 res->end = res->start + size - 1;
275 res->flags = flags; 276 res->flags = flags;
277 region.start = of_read_number(&ranges[1], 2);
278 region.end = region.start + size - 1;
279 pcibios_bus_to_resource(dev, res, &region);
276 } 280 }
277 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), 281 sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus),
278 bus->number); 282 bus->number);
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c
index a841a9d136a2..58eaa3ddf7b9 100644
--- a/arch/powerpc/kernel/pmc.c
+++ b/arch/powerpc/kernel/pmc.c
@@ -13,6 +13,7 @@
13 */ 13 */
14 14
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/bug.h>
16#include <linux/spinlock.h> 17#include <linux/spinlock.h>
17#include <linux/export.h> 18#include <linux/export.h>
18 19
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index ebe5766781aa..e40707032ac3 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -566,12 +566,12 @@ static void show_instructions(struct pt_regs *regs)
566 */ 566 */
567 if (!__kernel_text_address(pc) || 567 if (!__kernel_text_address(pc) ||
568 __get_user(instr, (unsigned int __user *)pc)) { 568 __get_user(instr, (unsigned int __user *)pc)) {
569 printk("XXXXXXXX "); 569 printk(KERN_CONT "XXXXXXXX ");
570 } else { 570 } else {
571 if (regs->nip == pc) 571 if (regs->nip == pc)
572 printk("<%08x> ", instr); 572 printk(KERN_CONT "<%08x> ", instr);
573 else 573 else
574 printk("%08x ", instr); 574 printk(KERN_CONT "%08x ", instr);
575 } 575 }
576 576
577 pc += sizeof(int); 577 pc += sizeof(int);
@@ -647,6 +647,9 @@ void show_regs(struct pt_regs * regs)
647 printk("MSR: "REG" ", regs->msr); 647 printk("MSR: "REG" ", regs->msr);
648 printbits(regs->msr, msr_bits); 648 printbits(regs->msr, msr_bits);
649 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer); 649 printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
650#ifdef CONFIG_PPC64
651 printk("SOFTE: %ld\n", regs->softe);
652#endif
650 trap = TRAP(regs); 653 trap = TRAP(regs);
651 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR)) 654 if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
652 printk("CFAR: "REG"\n", regs->orig_gpr3); 655 printk("CFAR: "REG"\n", regs->orig_gpr3);
@@ -1220,34 +1223,32 @@ void dump_stack(void)
1220EXPORT_SYMBOL(dump_stack); 1223EXPORT_SYMBOL(dump_stack);
1221 1224
1222#ifdef CONFIG_PPC64 1225#ifdef CONFIG_PPC64
1223void ppc64_runlatch_on(void) 1226/* Called with hard IRQs off */
1227void __ppc64_runlatch_on(void)
1224{ 1228{
1229 struct thread_info *ti = current_thread_info();
1225 unsigned long ctrl; 1230 unsigned long ctrl;
1226 1231
1227 if (cpu_has_feature(CPU_FTR_CTRL) && !test_thread_flag(TIF_RUNLATCH)) { 1232 ctrl = mfspr(SPRN_CTRLF);
1228 HMT_medium(); 1233 ctrl |= CTRL_RUNLATCH;
1229 1234 mtspr(SPRN_CTRLT, ctrl);
1230 ctrl = mfspr(SPRN_CTRLF);
1231 ctrl |= CTRL_RUNLATCH;
1232 mtspr(SPRN_CTRLT, ctrl);
1233 1235
1234 set_thread_flag(TIF_RUNLATCH); 1236 ti->local_flags |= TLF_RUNLATCH;
1235 }
1236} 1237}
1237 1238
1239/* Called with hard IRQs off */
1238void __ppc64_runlatch_off(void) 1240void __ppc64_runlatch_off(void)
1239{ 1241{
1242 struct thread_info *ti = current_thread_info();
1240 unsigned long ctrl; 1243 unsigned long ctrl;
1241 1244
1242 HMT_medium(); 1245 ti->local_flags &= ~TLF_RUNLATCH;
1243
1244 clear_thread_flag(TIF_RUNLATCH);
1245 1246
1246 ctrl = mfspr(SPRN_CTRLF); 1247 ctrl = mfspr(SPRN_CTRLF);
1247 ctrl &= ~CTRL_RUNLATCH; 1248 ctrl &= ~CTRL_RUNLATCH;
1248 mtspr(SPRN_CTRLT, ctrl); 1249 mtspr(SPRN_CTRLT, ctrl);
1249} 1250}
1250#endif 1251#endif /* CONFIG_PPC64 */
1251 1252
1252#if THREAD_SHIFT < PAGE_SHIFT 1253#if THREAD_SHIFT < PAGE_SHIFT
1253 1254
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index abe405dab34d..89e850af3dd6 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -52,9 +52,9 @@
52#include <asm/machdep.h> 52#include <asm/machdep.h>
53#include <asm/pSeries_reconfig.h> 53#include <asm/pSeries_reconfig.h>
54#include <asm/pci-bridge.h> 54#include <asm/pci-bridge.h>
55#include <asm/phyp_dump.h>
56#include <asm/kexec.h> 55#include <asm/kexec.h>
57#include <asm/opal.h> 56#include <asm/opal.h>
57#include <asm/fadump.h>
58 58
59#include <mm/mmu_decl.h> 59#include <mm/mmu_decl.h>
60 60
@@ -615,86 +615,6 @@ static void __init early_reserve_mem(void)
615 } 615 }
616} 616}
617 617
618#ifdef CONFIG_PHYP_DUMP
619/**
620 * phyp_dump_calculate_reserve_size() - reserve variable boot area 5% or arg
621 *
622 * Function to find the largest size we need to reserve
623 * during early boot process.
624 *
625 * It either looks for boot param and returns that OR
626 * returns larger of 256 or 5% rounded down to multiples of 256MB.
627 *
628 */
629static inline unsigned long phyp_dump_calculate_reserve_size(void)
630{
631 unsigned long tmp;
632
633 if (phyp_dump_info->reserve_bootvar)
634 return phyp_dump_info->reserve_bootvar;
635
636 /* divide by 20 to get 5% of value */
637 tmp = memblock_end_of_DRAM();
638 do_div(tmp, 20);
639
640 /* round it down in multiples of 256 */
641 tmp = tmp & ~0x0FFFFFFFUL;
642
643 return (tmp > PHYP_DUMP_RMR_END ? tmp : PHYP_DUMP_RMR_END);
644}
645
646/**
647 * phyp_dump_reserve_mem() - reserve all not-yet-dumped mmemory
648 *
649 * This routine may reserve memory regions in the kernel only
650 * if the system is supported and a dump was taken in last
651 * boot instance or if the hardware is supported and the
652 * scratch area needs to be setup. In other instances it returns
653 * without reserving anything. The memory in case of dump being
654 * active is freed when the dump is collected (by userland tools).
655 */
656static void __init phyp_dump_reserve_mem(void)
657{
658 unsigned long base, size;
659 unsigned long variable_reserve_size;
660
661 if (!phyp_dump_info->phyp_dump_configured) {
662 printk(KERN_ERR "Phyp-dump not supported on this hardware\n");
663 return;
664 }
665
666 if (!phyp_dump_info->phyp_dump_at_boot) {
667 printk(KERN_INFO "Phyp-dump disabled at boot time\n");
668 return;
669 }
670
671 variable_reserve_size = phyp_dump_calculate_reserve_size();
672
673 if (phyp_dump_info->phyp_dump_is_active) {
674 /* Reserve *everything* above RMR.Area freed by userland tools*/
675 base = variable_reserve_size;
676 size = memblock_end_of_DRAM() - base;
677
678 /* XXX crashed_ram_end is wrong, since it may be beyond
679 * the memory_limit, it will need to be adjusted. */
680 memblock_reserve(base, size);
681
682 phyp_dump_info->init_reserve_start = base;
683 phyp_dump_info->init_reserve_size = size;
684 } else {
685 size = phyp_dump_info->cpu_state_size +
686 phyp_dump_info->hpte_region_size +
687 variable_reserve_size;
688 base = memblock_end_of_DRAM() - size;
689 memblock_reserve(base, size);
690 phyp_dump_info->init_reserve_start = base;
691 phyp_dump_info->init_reserve_size = size;
692 }
693}
694#else
695static inline void __init phyp_dump_reserve_mem(void) {}
696#endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */
697
698void __init early_init_devtree(void *params) 618void __init early_init_devtree(void *params)
699{ 619{
700 phys_addr_t limit; 620 phys_addr_t limit;
@@ -714,9 +634,9 @@ void __init early_init_devtree(void *params)
714 of_scan_flat_dt(early_init_dt_scan_opal, NULL); 634 of_scan_flat_dt(early_init_dt_scan_opal, NULL);
715#endif 635#endif
716 636
717#ifdef CONFIG_PHYP_DUMP 637#ifdef CONFIG_FA_DUMP
718 /* scan tree to see if dump occurred during last boot */ 638 /* scan tree to see if dump is active during last boot */
719 of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); 639 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
720#endif 640#endif
721 641
722 /* Pre-initialize the cmd_line with the content of boot_commmand_line, 642 /* Pre-initialize the cmd_line with the content of boot_commmand_line,
@@ -750,9 +670,15 @@ void __init early_init_devtree(void *params)
750 if (PHYSICAL_START > MEMORY_START) 670 if (PHYSICAL_START > MEMORY_START)
751 memblock_reserve(MEMORY_START, 0x8000); 671 memblock_reserve(MEMORY_START, 0x8000);
752 reserve_kdump_trampoline(); 672 reserve_kdump_trampoline();
753 reserve_crashkernel(); 673#ifdef CONFIG_FA_DUMP
674 /*
675 * If we fail to reserve memory for firmware-assisted dump then
676 * fallback to kexec based kdump.
677 */
678 if (fadump_reserve_mem() == 0)
679#endif
680 reserve_crashkernel();
754 early_reserve_mem(); 681 early_reserve_mem();
755 phyp_dump_reserve_mem();
756 682
757 /* 683 /*
758 * Ensure that total memory size is page-aligned, because otherwise 684 * Ensure that total memory size is page-aligned, because otherwise
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index eca626ea3f23..ea4e311e09d2 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -48,14 +48,6 @@
48#include <linux/linux_logo.h> 48#include <linux/linux_logo.h>
49 49
50/* 50/*
51 * Properties whose value is longer than this get excluded from our
52 * copy of the device tree. This value does need to be big enough to
53 * ensure that we don't lose things like the interrupt-map property
54 * on a PCI-PCI bridge.
55 */
56#define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
57
58/*
59 * Eventually bump that one up 51 * Eventually bump that one up
60 */ 52 */
61#define DEVTREE_CHUNK_SIZE 0x100000 53#define DEVTREE_CHUNK_SIZE 0x100000
@@ -455,7 +447,7 @@ static void __init __attribute__((noreturn)) prom_panic(const char *reason)
455 if (RELOC(of_platform) == PLATFORM_POWERMAC) 447 if (RELOC(of_platform) == PLATFORM_POWERMAC)
456 asm("trap\n"); 448 asm("trap\n");
457 449
458 /* ToDo: should put up an SRC here on p/iSeries */ 450 /* ToDo: should put up an SRC here on pSeries */
459 call_prom("exit", 0, 0); 451 call_prom("exit", 0, 0);
460 452
461 for (;;) /* should never get here */ 453 for (;;) /* should never get here */
@@ -2273,13 +2265,6 @@ static void __init scan_dt_build_struct(phandle node, unsigned long *mem_start,
2273 /* sanity checks */ 2265 /* sanity checks */
2274 if (l == PROM_ERROR) 2266 if (l == PROM_ERROR)
2275 continue; 2267 continue;
2276 if (l > MAX_PROPERTY_LENGTH) {
2277 prom_printf("WARNING: ignoring large property ");
2278 /* It seems OF doesn't null-terminate the path :-( */
2279 prom_printf("[%s] ", path);
2280 prom_printf("%s length 0x%x\n", RELOC(pname), l);
2281 continue;
2282 }
2283 2268
2284 /* push property head */ 2269 /* push property head */
2285 dt_push_token(OF_DT_PROP, mem_start, mem_end); 2270 dt_push_token(OF_DT_PROP, mem_start, mem_end);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 517b1d8f455b..4d1a7babe342 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -716,7 +716,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
716 int cpu; 716 int cpu;
717 717
718 slb_set_size(SLB_MIN_SIZE); 718 slb_set_size(SLB_MIN_SIZE);
719 stop_topology_update();
720 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); 719 printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id());
721 720
722 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && 721 while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) &&
@@ -732,7 +731,6 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w
732 rc = atomic_read(&data->error); 731 rc = atomic_read(&data->error);
733 732
734 atomic_set(&data->error, rc); 733 atomic_set(&data->error, rc);
735 start_topology_update();
736 pSeries_coalesce_init(); 734 pSeries_coalesce_init();
737 735
738 if (wake_when_done) { 736 if (wake_when_done) {
@@ -846,6 +844,7 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
846 atomic_set(&data.error, 0); 844 atomic_set(&data.error, 0);
847 data.token = rtas_token("ibm,suspend-me"); 845 data.token = rtas_token("ibm,suspend-me");
848 data.complete = &done; 846 data.complete = &done;
847 stop_topology_update();
849 848
850 /* Call function on all CPUs. One of us will make the 849 /* Call function on all CPUs. One of us will make the
851 * rtas call 850 * rtas call
@@ -858,6 +857,8 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
858 if (atomic_read(&data.error) != 0) 857 if (atomic_read(&data.error) != 0)
859 printk(KERN_ERR "Error doing global join\n"); 858 printk(KERN_ERR "Error doing global join\n");
860 859
860 start_topology_update();
861
861 return atomic_read(&data.error); 862 return atomic_read(&data.error);
862} 863}
863#else /* CONFIG_PPC_PSERIES */ 864#else /* CONFIG_PPC_PSERIES */
@@ -867,6 +868,40 @@ int rtas_ibm_suspend_me(struct rtas_args *args)
867} 868}
868#endif 869#endif
869 870
871/**
872 * Find a specific pseries error log in an RTAS extended event log.
873 * @log: RTAS error/event log
874 * @section_id: two character section identifier
875 *
876 * Returns a pointer to the specified errorlog or NULL if not found.
877 */
878struct pseries_errorlog *get_pseries_errorlog(struct rtas_error_log *log,
879 uint16_t section_id)
880{
881 struct rtas_ext_event_log_v6 *ext_log =
882 (struct rtas_ext_event_log_v6 *)log->buffer;
883 struct pseries_errorlog *sect;
884 unsigned char *p, *log_end;
885
886 /* Check that we understand the format */
887 if (log->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
888 ext_log->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
889 ext_log->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
890 return NULL;
891
892 log_end = log->buffer + log->extended_log_length;
893 p = ext_log->vendor_log;
894
895 while (p < log_end) {
896 sect = (struct pseries_errorlog *)p;
897 if (sect->id == section_id)
898 return sect;
899 p += sect->length;
900 }
901
902 return NULL;
903}
904
870asmlinkage int ppc_rtas(struct rtas_args __user *uargs) 905asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
871{ 906{
872 struct rtas_args args; 907 struct rtas_args args;
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 6cd8f0196b6d..179af906dcda 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -275,8 +275,11 @@ void __init find_and_init_phbs(void)
275 of_node_put(root); 275 of_node_put(root);
276 pci_devs_phb_init(); 276 pci_devs_phb_init();
277 277
278 /* Create EEH devices for all PHBs */
279 eeh_dev_phb_init();
280
278 /* 281 /*
279 * pci_probe_only and pci_assign_all_buses can be set via properties 282 * PCI_PROBE_ONLY and PCI_REASSIGN_ALL_BUS can be set via properties
280 * in chosen. 283 * in chosen.
281 */ 284 */
282 if (of_chosen) { 285 if (of_chosen) {
@@ -284,8 +287,12 @@ void __init find_and_init_phbs(void)
284 287
285 prop = of_get_property(of_chosen, 288 prop = of_get_property(of_chosen,
286 "linux,pci-probe-only", NULL); 289 "linux,pci-probe-only", NULL);
287 if (prop) 290 if (prop) {
288 pci_probe_only = *prop; 291 if (*prop)
292 pci_add_flags(PCI_PROBE_ONLY);
293 else
294 pci_clear_flags(PCI_PROBE_ONLY);
295 }
289 296
290#ifdef CONFIG_PPC32 /* Will be made generic soon */ 297#ifdef CONFIG_PPC32 /* Will be made generic soon */
291 prop = of_get_property(of_chosen, 298 prop = of_get_property(of_chosen,
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 77bb77da05c1..b0ebdeab9494 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -61,6 +61,7 @@
61#include <asm/xmon.h> 61#include <asm/xmon.h>
62#include <asm/cputhreads.h> 62#include <asm/cputhreads.h>
63#include <mm/mmu_decl.h> 63#include <mm/mmu_decl.h>
64#include <asm/fadump.h>
64 65
65#include "setup.h" 66#include "setup.h"
66 67
@@ -109,6 +110,14 @@ EXPORT_SYMBOL(ppc_do_canonicalize_irqs);
109/* also used by kexec */ 110/* also used by kexec */
110void machine_shutdown(void) 111void machine_shutdown(void)
111{ 112{
113#ifdef CONFIG_FA_DUMP
114 /*
115 * if fadump is active, cleanup the fadump registration before we
116 * shutdown.
117 */
118 fadump_cleanup();
119#endif
120
112 if (ppc_md.machine_shutdown) 121 if (ppc_md.machine_shutdown)
113 ppc_md.machine_shutdown(); 122 ppc_md.machine_shutdown();
114} 123}
@@ -639,6 +648,11 @@ EXPORT_SYMBOL(check_legacy_ioport);
639static int ppc_panic_event(struct notifier_block *this, 648static int ppc_panic_event(struct notifier_block *this,
640 unsigned long event, void *ptr) 649 unsigned long event, void *ptr)
641{ 650{
651 /*
652 * If firmware-assisted dump has been registered then trigger
653 * firmware-assisted dump and let firmware handle everything else.
654 */
655 crash_fadump(NULL, ptr);
642 ppc_md.panic(ptr); /* May not return */ 656 ppc_md.panic(ptr); /* May not return */
643 return NOTIFY_DONE; 657 return NOTIFY_DONE;
644} 658}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4cb8f1e9d044..4721b0c8d7b7 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -598,7 +598,7 @@ void __init setup_arch(char **cmdline_p)
598 /* Initialize the MMU context management stuff */ 598 /* Initialize the MMU context management stuff */
599 mmu_context_init(); 599 mmu_context_init();
600 600
601 kvm_rma_init(); 601 kvm_linear_init();
602 602
603 ppc64_boot_msg(0x15, "Setup Done"); 603 ppc64_boot_msg(0x15, "Setup Done");
604} 604}
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 2300426e531a..7006b7f4267a 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -11,6 +11,7 @@
11 11
12#include <linux/tracehook.h> 12#include <linux/tracehook.h>
13#include <linux/signal.h> 13#include <linux/signal.h>
14#include <linux/key.h>
14#include <asm/hw_breakpoint.h> 15#include <asm/hw_breakpoint.h>
15#include <asm/uaccess.h> 16#include <asm/uaccess.h>
16#include <asm/unistd.h> 17#include <asm/unistd.h>
@@ -56,10 +57,7 @@ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
56void restore_sigmask(sigset_t *set) 57void restore_sigmask(sigset_t *set)
57{ 58{
58 sigdelsetmask(set, ~_BLOCKABLE); 59 sigdelsetmask(set, ~_BLOCKABLE);
59 spin_lock_irq(&current->sighand->siglock); 60 set_current_blocked(set);
60 current->blocked = *set;
61 recalc_sigpending();
62 spin_unlock_irq(&current->sighand->siglock);
63} 61}
64 62
65static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, 63static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
@@ -113,8 +111,9 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
113 } 111 }
114} 112}
115 113
116static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs) 114static int do_signal(struct pt_regs *regs)
117{ 115{
116 sigset_t *oldset;
118 siginfo_t info; 117 siginfo_t info;
119 int signr; 118 int signr;
120 struct k_sigaction ka; 119 struct k_sigaction ka;
@@ -123,7 +122,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
123 122
124 if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK) 123 if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK)
125 oldset = &current->saved_sigmask; 124 oldset = &current->saved_sigmask;
126 else if (!oldset) 125 else
127 oldset = &current->blocked; 126 oldset = &current->blocked;
128 127
129 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 128 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
@@ -167,13 +166,7 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
167 166
168 regs->trap = 0; 167 regs->trap = 0;
169 if (ret) { 168 if (ret) {
170 spin_lock_irq(&current->sighand->siglock); 169 block_sigmask(&ka, signr);
171 sigorsets(&current->blocked, &current->blocked,
172 &ka.sa.sa_mask);
173 if (!(ka.sa.sa_flags & SA_NODEFER))
174 sigaddset(&current->blocked, signr);
175 recalc_sigpending();
176 spin_unlock_irq(&current->sighand->siglock);
177 170
178 /* 171 /*
179 * A signal was successfully delivered; the saved sigmask is in 172 * A signal was successfully delivered; the saved sigmask is in
@@ -191,14 +184,16 @@ static int do_signal_pending(sigset_t *oldset, struct pt_regs *regs)
191 return ret; 184 return ret;
192} 185}
193 186
194void do_signal(struct pt_regs *regs, unsigned long thread_info_flags) 187void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
195{ 188{
196 if (thread_info_flags & _TIF_SIGPENDING) 189 if (thread_info_flags & _TIF_SIGPENDING)
197 do_signal_pending(NULL, regs); 190 do_signal(regs);
198 191
199 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 192 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
200 clear_thread_flag(TIF_NOTIFY_RESUME); 193 clear_thread_flag(TIF_NOTIFY_RESUME);
201 tracehook_notify_resume(regs); 194 tracehook_notify_resume(regs);
195 if (current->replacement_session_keyring)
196 key_replace_session_keyring();
202 } 197 }
203} 198}
204 199
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 6c0ddfc0603e..8dde973aaaf5 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -12,7 +12,7 @@
12 12
13#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 13#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
14 14
15extern void do_signal(struct pt_regs *regs, unsigned long thread_info_flags); 15extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
16 16
17extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 17extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
18 size_t frame_size, int is_32); 18 size_t frame_size, int is_32);
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 836a5a19eb2c..e061ef5dd449 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -242,12 +242,13 @@ static inline int restore_general_regs(struct pt_regs *regs,
242 */ 242 */
243long sys_sigsuspend(old_sigset_t mask) 243long sys_sigsuspend(old_sigset_t mask)
244{ 244{
245 mask &= _BLOCKABLE; 245 sigset_t blocked;
246 spin_lock_irq(&current->sighand->siglock); 246
247 current->saved_sigmask = current->blocked; 247 current->saved_sigmask = current->blocked;
248 siginitset(&current->blocked, mask); 248
249 recalc_sigpending(); 249 mask &= _BLOCKABLE;
250 spin_unlock_irq(&current->sighand->siglock); 250 siginitset(&blocked, mask);
251 set_current_blocked(&blocked);
251 252
252 current->state = TASK_INTERRUPTIBLE; 253 current->state = TASK_INTERRUPTIBLE;
253 schedule(); 254 schedule();
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 883e74c0d1b3..0c683d376b1c 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -12,7 +12,6 @@
12#include <asm/current.h> 12#include <asm/current.h>
13#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/cputable.h> 14#include <asm/cputable.h>
15#include <asm/firmware.h>
16#include <asm/hvcall.h> 15#include <asm/hvcall.h>
17#include <asm/prom.h> 16#include <asm/prom.h>
18#include <asm/machdep.h> 17#include <asm/machdep.h>
@@ -341,8 +340,7 @@ static void __cpuinit register_cpu_online(unsigned int cpu)
341 int i, nattrs; 340 int i, nattrs;
342 341
343#ifdef CONFIG_PPC64 342#ifdef CONFIG_PPC64
344 if (!firmware_has_feature(FW_FEATURE_ISERIES) && 343 if (cpu_has_feature(CPU_FTR_SMT))
345 cpu_has_feature(CPU_FTR_SMT))
346 device_create_file(s, &dev_attr_smt_snooze_delay); 344 device_create_file(s, &dev_attr_smt_snooze_delay);
347#endif 345#endif
348 346
@@ -414,8 +412,7 @@ static void unregister_cpu_online(unsigned int cpu)
414 BUG_ON(!c->hotpluggable); 412 BUG_ON(!c->hotpluggable);
415 413
416#ifdef CONFIG_PPC64 414#ifdef CONFIG_PPC64
417 if (!firmware_has_feature(FW_FEATURE_ISERIES) && 415 if (cpu_has_feature(CPU_FTR_SMT))
418 cpu_has_feature(CPU_FTR_SMT))
419 device_remove_file(s, &dev_attr_smt_snooze_delay); 416 device_remove_file(s, &dev_attr_smt_snooze_delay);
420#endif 417#endif
421 418
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 567dd7c3ac2a..2c42cd72d0f5 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -17,8 +17,7 @@
17 * 17 *
18 * TODO (not necessarily in this file): 18 * TODO (not necessarily in this file):
19 * - improve precision and reproducibility of timebase frequency 19 * - improve precision and reproducibility of timebase frequency
20 * measurement at boot time. (for iSeries, we calibrate the timebase 20 * measurement at boot time.
21 * against the Titan chip's clock.)
22 * - for astronomical applications: add a new function to get 21 * - for astronomical applications: add a new function to get
23 * non ambiguous timestamps even around leap seconds. This needs 22 * non ambiguous timestamps even around leap seconds. This needs
24 * a new timestamp format and a good name. 23 * a new timestamp format and a good name.
@@ -70,10 +69,6 @@
70#include <asm/vdso_datapage.h> 69#include <asm/vdso_datapage.h>
71#include <asm/firmware.h> 70#include <asm/firmware.h>
72#include <asm/cputime.h> 71#include <asm/cputime.h>
73#ifdef CONFIG_PPC_ISERIES
74#include <asm/iseries/it_lp_queue.h>
75#include <asm/iseries/hv_call_xm.h>
76#endif
77 72
78/* powerpc clocksource/clockevent code */ 73/* powerpc clocksource/clockevent code */
79 74
@@ -117,14 +112,6 @@ static struct clock_event_device decrementer_clockevent = {
117DEFINE_PER_CPU(u64, decrementers_next_tb); 112DEFINE_PER_CPU(u64, decrementers_next_tb);
118static DEFINE_PER_CPU(struct clock_event_device, decrementers); 113static DEFINE_PER_CPU(struct clock_event_device, decrementers);
119 114
120#ifdef CONFIG_PPC_ISERIES
121static unsigned long __initdata iSeries_recal_titan;
122static signed long __initdata iSeries_recal_tb;
123
124/* Forward declaration is only needed for iSereis compiles */
125static void __init clocksource_init(void);
126#endif
127
128#define XSEC_PER_SEC (1024*1024) 115#define XSEC_PER_SEC (1024*1024)
129 116
130#ifdef CONFIG_PPC64 117#ifdef CONFIG_PPC64
@@ -259,7 +246,6 @@ void accumulate_stolen_time(void)
259 u64 sst, ust; 246 u64 sst, ust;
260 247
261 u8 save_soft_enabled = local_paca->soft_enabled; 248 u8 save_soft_enabled = local_paca->soft_enabled;
262 u8 save_hard_enabled = local_paca->hard_enabled;
263 249
264 /* We are called early in the exception entry, before 250 /* We are called early in the exception entry, before
265 * soft/hard_enabled are sync'ed to the expected state 251 * soft/hard_enabled are sync'ed to the expected state
@@ -268,7 +254,6 @@ void accumulate_stolen_time(void)
268 * complain 254 * complain
269 */ 255 */
270 local_paca->soft_enabled = 0; 256 local_paca->soft_enabled = 0;
271 local_paca->hard_enabled = 0;
272 257
273 sst = scan_dispatch_log(local_paca->starttime_user); 258 sst = scan_dispatch_log(local_paca->starttime_user);
274 ust = scan_dispatch_log(local_paca->starttime); 259 ust = scan_dispatch_log(local_paca->starttime);
@@ -277,7 +262,6 @@ void accumulate_stolen_time(void)
277 local_paca->stolen_time += ust + sst; 262 local_paca->stolen_time += ust + sst;
278 263
279 local_paca->soft_enabled = save_soft_enabled; 264 local_paca->soft_enabled = save_soft_enabled;
280 local_paca->hard_enabled = save_hard_enabled;
281} 265}
282 266
283static inline u64 calculate_stolen_time(u64 stop_tb) 267static inline u64 calculate_stolen_time(u64 stop_tb)
@@ -426,74 +410,6 @@ unsigned long profile_pc(struct pt_regs *regs)
426EXPORT_SYMBOL(profile_pc); 410EXPORT_SYMBOL(profile_pc);
427#endif 411#endif
428 412
429#ifdef CONFIG_PPC_ISERIES
430
431/*
432 * This function recalibrates the timebase based on the 49-bit time-of-day
433 * value in the Titan chip. The Titan is much more accurate than the value
434 * returned by the service processor for the timebase frequency.
435 */
436
437static int __init iSeries_tb_recal(void)
438{
439 unsigned long titan, tb;
440
441 /* Make sure we only run on iSeries */
442 if (!firmware_has_feature(FW_FEATURE_ISERIES))
443 return -ENODEV;
444
445 tb = get_tb();
446 titan = HvCallXm_loadTod();
447 if ( iSeries_recal_titan ) {
448 unsigned long tb_ticks = tb - iSeries_recal_tb;
449 unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12;
450 unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec;
451 unsigned long new_tb_ticks_per_jiffy =
452 DIV_ROUND_CLOSEST(new_tb_ticks_per_sec, HZ);
453 long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy;
454 char sign = '+';
455 /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */
456 new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ;
457
458 if ( tick_diff < 0 ) {
459 tick_diff = -tick_diff;
460 sign = '-';
461 }
462 if ( tick_diff ) {
463 if ( tick_diff < tb_ticks_per_jiffy/25 ) {
464 printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n",
465 new_tb_ticks_per_jiffy, sign, tick_diff );
466 tb_ticks_per_jiffy = new_tb_ticks_per_jiffy;
467 tb_ticks_per_sec = new_tb_ticks_per_sec;
468 calc_cputime_factors();
469 vdso_data->tb_ticks_per_sec = tb_ticks_per_sec;
470 setup_cputime_one_jiffy();
471 }
472 else {
473 printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
474 " new tb_ticks_per_jiffy = %lu\n"
475 " old tb_ticks_per_jiffy = %lu\n",
476 new_tb_ticks_per_jiffy, tb_ticks_per_jiffy );
477 }
478 }
479 }
480 iSeries_recal_titan = titan;
481 iSeries_recal_tb = tb;
482
483 /* Called here as now we know accurate values for the timebase */
484 clocksource_init();
485 return 0;
486}
487late_initcall(iSeries_tb_recal);
488
489/* Called from platform early init */
490void __init iSeries_time_init_early(void)
491{
492 iSeries_recal_tb = get_tb();
493 iSeries_recal_titan = HvCallXm_loadTod();
494}
495#endif /* CONFIG_PPC_ISERIES */
496
497#ifdef CONFIG_IRQ_WORK 413#ifdef CONFIG_IRQ_WORK
498 414
499/* 415/*
@@ -550,16 +466,6 @@ void arch_irq_work_raise(void)
550#endif /* CONFIG_IRQ_WORK */ 466#endif /* CONFIG_IRQ_WORK */
551 467
552/* 468/*
553 * For iSeries shared processors, we have to let the hypervisor
554 * set the hardware decrementer. We set a virtual decrementer
555 * in the lppaca and call the hypervisor if the virtual
556 * decrementer is less than the current value in the hardware
557 * decrementer. (almost always the new decrementer value will
558 * be greater than the current hardware decementer so the hypervisor
559 * call will not be needed)
560 */
561
562/*
563 * timer_interrupt - gets called when the decrementer overflows, 469 * timer_interrupt - gets called when the decrementer overflows,
564 * with interrupts disabled. 470 * with interrupts disabled.
565 */ 471 */
@@ -580,6 +486,11 @@ void timer_interrupt(struct pt_regs * regs)
580 if (!cpu_online(smp_processor_id())) 486 if (!cpu_online(smp_processor_id()))
581 return; 487 return;
582 488
489 /* Conditionally hard-enable interrupts now that the DEC has been
490 * bumped to its maximum value
491 */
492 may_hard_irq_enable();
493
583 trace_timer_interrupt_entry(regs); 494 trace_timer_interrupt_entry(regs);
584 495
585 __get_cpu_var(irq_stat).timer_irqs++; 496 __get_cpu_var(irq_stat).timer_irqs++;
@@ -597,20 +508,10 @@ void timer_interrupt(struct pt_regs * regs)
597 irq_work_run(); 508 irq_work_run();
598 } 509 }
599 510
600#ifdef CONFIG_PPC_ISERIES
601 if (firmware_has_feature(FW_FEATURE_ISERIES))
602 get_lppaca()->int_dword.fields.decr_int = 0;
603#endif
604
605 *next_tb = ~(u64)0; 511 *next_tb = ~(u64)0;
606 if (evt->event_handler) 512 if (evt->event_handler)
607 evt->event_handler(evt); 513 evt->event_handler(evt);
608 514
609#ifdef CONFIG_PPC_ISERIES
610 if (firmware_has_feature(FW_FEATURE_ISERIES) && hvlpevent_is_pending())
611 process_hvlpevents();
612#endif
613
614#ifdef CONFIG_PPC64 515#ifdef CONFIG_PPC64
615 /* collect purr register values often, for accurate calculations */ 516 /* collect purr register values often, for accurate calculations */
616 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 517 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
@@ -982,9 +883,8 @@ void __init time_init(void)
982 */ 883 */
983 start_cpu_decrementer(); 884 start_cpu_decrementer();
984 885
985 /* Register the clocksource, if we're not running on iSeries */ 886 /* Register the clocksource */
986 if (!firmware_has_feature(FW_FEATURE_ISERIES)) 887 clocksource_init();
987 clocksource_init();
988 888
989 init_decrementer_clockevent(); 889 init_decrementer_clockevent();
990} 890}
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index c091527efd89..a750409ccc4e 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -57,6 +57,7 @@
57#include <asm/kexec.h> 57#include <asm/kexec.h>
58#include <asm/ppc-opcode.h> 58#include <asm/ppc-opcode.h>
59#include <asm/rio.h> 59#include <asm/rio.h>
60#include <asm/fadump.h>
60 61
61#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) 62#if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC)
62int (*__debugger)(struct pt_regs *regs) __read_mostly; 63int (*__debugger)(struct pt_regs *regs) __read_mostly;
@@ -145,6 +146,8 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
145 arch_spin_unlock(&die_lock); 146 arch_spin_unlock(&die_lock);
146 raw_local_irq_restore(flags); 147 raw_local_irq_restore(flags);
147 148
149 crash_fadump(regs, "die oops");
150
148 /* 151 /*
149 * A system reset (0x100) is a request to dump, so we always send 152 * A system reset (0x100) is a request to dump, so we always send
150 * it through the crashdump code. 153 * it through the crashdump code.
@@ -244,6 +247,9 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
244 addr, regs->nip, regs->link, code); 247 addr, regs->nip, regs->link, code);
245 } 248 }
246 249
250 if (!arch_irq_disabled_regs(regs))
251 local_irq_enable();
252
247 memset(&info, 0, sizeof(info)); 253 memset(&info, 0, sizeof(info));
248 info.si_signo = signr; 254 info.si_signo = signr;
249 info.si_code = code; 255 info.si_code = code;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index 57fa2c0a531c..c39c1ca77f46 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -46,9 +46,6 @@ void __init udbg_early_init(void)
46#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE) 46#elif defined(CONFIG_PPC_EARLY_DEBUG_MAPLE)
47 /* Maple real mode debug */ 47 /* Maple real mode debug */
48 udbg_init_maple_realmode(); 48 udbg_init_maple_realmode();
49#elif defined(CONFIG_PPC_EARLY_DEBUG_ISERIES)
50 /* For iSeries - hit Ctrl-x Ctrl-x to see the output */
51 udbg_init_iseries();
52#elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT) 49#elif defined(CONFIG_PPC_EARLY_DEBUG_BEAT)
53 udbg_init_debug_beat(); 50 udbg_init_debug_beat();
54#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE) 51#elif defined(CONFIG_PPC_EARLY_DEBUG_PAS_REALMODE)
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index 7d14bb697d40..972cca278f98 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -263,17 +263,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
263 * the "data" page of the vDSO or you'll stop getting kernel updates 263 * the "data" page of the vDSO or you'll stop getting kernel updates
264 * and your nice userland gettimeofday will be totally dead. 264 * and your nice userland gettimeofday will be totally dead.
265 * It's fine to use that for setting breakpoints in the vDSO code 265 * It's fine to use that for setting breakpoints in the vDSO code
266 * pages though 266 * pages though.
267 *
268 * Make sure the vDSO gets into every core dump.
269 * Dumping its contents makes post-mortem fully interpretable later
270 * without matching up the same kernel and hardware config to see
271 * what PC values meant.
272 */ 267 */
273 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT, 268 rc = install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
274 VM_READ|VM_EXEC| 269 VM_READ|VM_EXEC|
275 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC| 270 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
276 VM_ALWAYSDUMP,
277 vdso_pagelist); 271 vdso_pagelist);
278 if (rc) { 272 if (rc) {
279 current->mm->context.vdso_base = 0; 273 current->mm->context.vdso_base = 0;
@@ -727,10 +721,10 @@ static int __init vdso_init(void)
727 vdso_data->version.minor = SYSTEMCFG_MINOR; 721 vdso_data->version.minor = SYSTEMCFG_MINOR;
728 vdso_data->processor = mfspr(SPRN_PVR); 722 vdso_data->processor = mfspr(SPRN_PVR);
729 /* 723 /*
730 * Fake the old platform number for pSeries and iSeries and add 724 * Fake the old platform number for pSeries and add
731 * in LPAR bit if necessary 725 * in LPAR bit if necessary
732 */ 726 */
733 vdso_data->platform = machine_is(iseries) ? 0x200 : 0x100; 727 vdso_data->platform = 0x100;
734 if (firmware_has_feature(FW_FEATURE_LPAR)) 728 if (firmware_has_feature(FW_FEATURE_LPAR))
735 vdso_data->platform |= 1; 729 vdso_data->platform |= 1;
736 vdso_data->physicalMemorySize = memblock_phys_mem_size(); 730 vdso_data->physicalMemorySize = memblock_phys_mem_size();
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c
index 8b086299ba25..b2f7c8480bf6 100644
--- a/arch/powerpc/kernel/vio.c
+++ b/arch/powerpc/kernel/vio.c
@@ -34,11 +34,6 @@
34#include <asm/abs_addr.h> 34#include <asm/abs_addr.h>
35#include <asm/page.h> 35#include <asm/page.h>
36#include <asm/hvcall.h> 36#include <asm/hvcall.h>
37#include <asm/iseries/vio.h>
38#include <asm/iseries/hv_types.h>
39#include <asm/iseries/hv_lp_config.h>
40#include <asm/iseries/hv_call_xm.h>
41#include <asm/iseries/iommu.h>
42 37
43static struct bus_type vio_bus_type; 38static struct bus_type vio_bus_type;
44 39
@@ -1042,7 +1037,6 @@ static void vio_cmo_sysfs_init(void)
1042 vio_bus_type.bus_attrs = vio_cmo_bus_attrs; 1037 vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
1043} 1038}
1044#else /* CONFIG_PPC_SMLPAR */ 1039#else /* CONFIG_PPC_SMLPAR */
1045/* Dummy functions for iSeries platform */
1046int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; } 1040int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1047void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {} 1041void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1048static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; } 1042static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
@@ -1060,9 +1054,6 @@ static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1060 struct iommu_table *tbl; 1054 struct iommu_table *tbl;
1061 unsigned long offset, size; 1055 unsigned long offset, size;
1062 1056
1063 if (firmware_has_feature(FW_FEATURE_ISERIES))
1064 return vio_build_iommu_table_iseries(dev);
1065
1066 dma_window = of_get_property(dev->dev.of_node, 1057 dma_window = of_get_property(dev->dev.of_node,
1067 "ibm,my-dma-window", NULL); 1058 "ibm,my-dma-window", NULL);
1068 if (!dma_window) 1059 if (!dma_window)
@@ -1168,17 +1159,21 @@ static int vio_bus_remove(struct device *dev)
1168 * vio_register_driver: - Register a new vio driver 1159 * vio_register_driver: - Register a new vio driver
1169 * @drv: The vio_driver structure to be registered. 1160 * @drv: The vio_driver structure to be registered.
1170 */ 1161 */
1171int vio_register_driver(struct vio_driver *viodrv) 1162int __vio_register_driver(struct vio_driver *viodrv, struct module *owner,
1163 const char *mod_name)
1172{ 1164{
1173 printk(KERN_DEBUG "%s: driver %s registering\n", __func__, 1165 pr_debug("%s: driver %s registering\n", __func__, viodrv->name);
1174 viodrv->driver.name);
1175 1166
1176 /* fill in 'struct driver' fields */ 1167 /* fill in 'struct driver' fields */
1168 viodrv->driver.name = viodrv->name;
1169 viodrv->driver.pm = viodrv->pm;
1177 viodrv->driver.bus = &vio_bus_type; 1170 viodrv->driver.bus = &vio_bus_type;
1171 viodrv->driver.owner = owner;
1172 viodrv->driver.mod_name = mod_name;
1178 1173
1179 return driver_register(&viodrv->driver); 1174 return driver_register(&viodrv->driver);
1180} 1175}
1181EXPORT_SYMBOL(vio_register_driver); 1176EXPORT_SYMBOL(__vio_register_driver);
1182 1177
1183/** 1178/**
1184 * vio_unregister_driver - Remove registration of vio driver. 1179 * vio_unregister_driver - Remove registration of vio driver.
@@ -1195,8 +1190,7 @@ static void __devinit vio_dev_release(struct device *dev)
1195{ 1190{
1196 struct iommu_table *tbl = get_iommu_table_base(dev); 1191 struct iommu_table *tbl = get_iommu_table_base(dev);
1197 1192
1198 /* iSeries uses a common table for all vio devices */ 1193 if (tbl)
1199 if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl)
1200 iommu_free_table(tbl, dev->of_node ? 1194 iommu_free_table(tbl, dev->of_node ?
1201 dev->of_node->full_name : dev_name(dev)); 1195 dev->of_node->full_name : dev_name(dev));
1202 of_node_put(dev->of_node); 1196 of_node_put(dev->of_node);
@@ -1244,12 +1238,6 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node)
1244 viodev->name = of_node->name; 1238 viodev->name = of_node->name;
1245 viodev->type = of_node->type; 1239 viodev->type = of_node->type;
1246 viodev->unit_address = *unit_address; 1240 viodev->unit_address = *unit_address;
1247 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
1248 unit_address = of_get_property(of_node,
1249 "linux,unit_address", NULL);
1250 if (unit_address != NULL)
1251 viodev->unit_address = *unit_address;
1252 }
1253 viodev->dev.of_node = of_node_get(of_node); 1241 viodev->dev.of_node = of_node_get(of_node);
1254 1242
1255 if (firmware_has_feature(FW_FEATURE_CMO)) 1243 if (firmware_has_feature(FW_FEATURE_CMO))
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index 710a54005dfb..65d1c08cf09e 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -109,11 +109,6 @@ SECTIONS
109 __ptov_table_begin = .; 109 __ptov_table_begin = .;
110 *(.ptov_fixup); 110 *(.ptov_fixup);
111 __ptov_table_end = .; 111 __ptov_table_end = .;
112#ifdef CONFIG_PPC_ISERIES
113 __dt_strings_start = .;
114 *(.dt_strings);
115 __dt_strings_end = .;
116#endif
117 } 112 }
118 113
119 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) { 114 .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 78133deb4b64..8f64709ae331 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -69,6 +69,7 @@ config KVM_BOOK3S_64
69config KVM_BOOK3S_64_HV 69config KVM_BOOK3S_64_HV
70 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host" 70 bool "KVM support for POWER7 and PPC970 using hypervisor mode in host"
71 depends on KVM_BOOK3S_64 71 depends on KVM_BOOK3S_64
72 select MMU_NOTIFIER
72 ---help--- 73 ---help---
73 Support running unmodified book3s_64 guest kernels in 74 Support running unmodified book3s_64 guest kernels in
74 virtual machines on POWER7 and PPC970 processors that have 75 virtual machines on POWER7 and PPC970 processors that have
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index e41ac6f7dcf1..7d54f4ed6d96 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
258 return true; 258 return true;
259} 259}
260 260
261void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) 261void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
262{ 262{
263 unsigned long *pending = &vcpu->arch.pending_exceptions; 263 unsigned long *pending = &vcpu->arch.pending_exceptions;
264 unsigned long old_pending = vcpu->arch.pending_exceptions; 264 unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -423,10 +423,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
423 regs->sprg1 = vcpu->arch.shared->sprg1; 423 regs->sprg1 = vcpu->arch.shared->sprg1;
424 regs->sprg2 = vcpu->arch.shared->sprg2; 424 regs->sprg2 = vcpu->arch.shared->sprg2;
425 regs->sprg3 = vcpu->arch.shared->sprg3; 425 regs->sprg3 = vcpu->arch.shared->sprg3;
426 regs->sprg4 = vcpu->arch.sprg4; 426 regs->sprg4 = vcpu->arch.shared->sprg4;
427 regs->sprg5 = vcpu->arch.sprg5; 427 regs->sprg5 = vcpu->arch.shared->sprg5;
428 regs->sprg6 = vcpu->arch.sprg6; 428 regs->sprg6 = vcpu->arch.shared->sprg6;
429 regs->sprg7 = vcpu->arch.sprg7; 429 regs->sprg7 = vcpu->arch.shared->sprg7;
430 430
431 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 431 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
432 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 432 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -450,10 +450,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
450 vcpu->arch.shared->sprg1 = regs->sprg1; 450 vcpu->arch.shared->sprg1 = regs->sprg1;
451 vcpu->arch.shared->sprg2 = regs->sprg2; 451 vcpu->arch.shared->sprg2 = regs->sprg2;
452 vcpu->arch.shared->sprg3 = regs->sprg3; 452 vcpu->arch.shared->sprg3 = regs->sprg3;
453 vcpu->arch.sprg4 = regs->sprg4; 453 vcpu->arch.shared->sprg4 = regs->sprg4;
454 vcpu->arch.sprg5 = regs->sprg5; 454 vcpu->arch.shared->sprg5 = regs->sprg5;
455 vcpu->arch.sprg6 = regs->sprg6; 455 vcpu->arch.shared->sprg6 = regs->sprg6;
456 vcpu->arch.sprg7 = regs->sprg7; 456 vcpu->arch.shared->sprg7 = regs->sprg7;
457 457
458 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 458 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
459 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 459 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -477,41 +477,10 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
477 return 0; 477 return 0;
478} 478}
479 479
480/* 480void kvmppc_decrementer_func(unsigned long data)
481 * Get (and clear) the dirty memory log for a memory slot.
482 */
483int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
484 struct kvm_dirty_log *log)
485{ 481{
486 struct kvm_memory_slot *memslot; 482 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
487 struct kvm_vcpu *vcpu;
488 ulong ga, ga_end;
489 int is_dirty = 0;
490 int r;
491 unsigned long n;
492
493 mutex_lock(&kvm->slots_lock);
494
495 r = kvm_get_dirty_log(kvm, log, &is_dirty);
496 if (r)
497 goto out;
498
499 /* If nothing is dirty, don't bother messing with page tables. */
500 if (is_dirty) {
501 memslot = id_to_memslot(kvm->memslots, log->slot);
502 483
503 ga = memslot->base_gfn << PAGE_SHIFT; 484 kvmppc_core_queue_dec(vcpu);
504 ga_end = ga + (memslot->npages << PAGE_SHIFT); 485 kvm_vcpu_kick(vcpu);
505
506 kvm_for_each_vcpu(n, vcpu, kvm)
507 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
508
509 n = kvm_dirty_bitmap_bytes(memslot);
510 memset(memslot->dirty_bitmap, 0, n);
511 }
512
513 r = 0;
514out:
515 mutex_unlock(&kvm->slots_lock);
516 return r;
517} 486}
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c
index 9fecbfbce773..f922c29bb234 100644
--- a/arch/powerpc/kvm/book3s_32_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_32_mmu_host.c
@@ -151,13 +151,15 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
151 bool primary = false; 151 bool primary = false;
152 bool evict = false; 152 bool evict = false;
153 struct hpte_cache *pte; 153 struct hpte_cache *pte;
154 int r = 0;
154 155
155 /* Get host physical address for gpa */ 156 /* Get host physical address for gpa */
156 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 157 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
157 if (is_error_pfn(hpaddr)) { 158 if (is_error_pfn(hpaddr)) {
158 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", 159 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n",
159 orig_pte->eaddr); 160 orig_pte->eaddr);
160 return -EINVAL; 161 r = -EINVAL;
162 goto out;
161 } 163 }
162 hpaddr <<= PAGE_SHIFT; 164 hpaddr <<= PAGE_SHIFT;
163 165
@@ -249,7 +251,8 @@ next_pteg:
249 251
250 kvmppc_mmu_hpte_cache_map(vcpu, pte); 252 kvmppc_mmu_hpte_cache_map(vcpu, pte);
251 253
252 return 0; 254out:
255 return r;
253} 256}
254 257
255static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 258static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -297,12 +300,14 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
297 u64 gvsid; 300 u64 gvsid;
298 u32 sr; 301 u32 sr;
299 struct kvmppc_sid_map *map; 302 struct kvmppc_sid_map *map;
300 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 303 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
304 int r = 0;
301 305
302 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 306 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
303 /* Invalidate an entry */ 307 /* Invalidate an entry */
304 svcpu->sr[esid] = SR_INVALID; 308 svcpu->sr[esid] = SR_INVALID;
305 return -ENOENT; 309 r = -ENOENT;
310 goto out;
306 } 311 }
307 312
308 map = find_sid_vsid(vcpu, gvsid); 313 map = find_sid_vsid(vcpu, gvsid);
@@ -315,17 +320,21 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
315 320
316 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr); 321 dprintk_sr("MMU: mtsr %d, 0x%x\n", esid, sr);
317 322
318 return 0; 323out:
324 svcpu_put(svcpu);
325 return r;
319} 326}
320 327
321void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 328void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
322{ 329{
323 int i; 330 int i;
324 struct kvmppc_book3s_shadow_vcpu *svcpu = to_svcpu(vcpu); 331 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
325 332
326 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr)); 333 dprintk_sr("MMU: flushing all segments (%d)\n", ARRAY_SIZE(svcpu->sr));
327 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++) 334 for (i = 0; i < ARRAY_SIZE(svcpu->sr); i++)
328 svcpu->sr[i] = SR_INVALID; 335 svcpu->sr[i] = SR_INVALID;
336
337 svcpu_put(svcpu);
329} 338}
330 339
331void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 340void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c
index fa2f08434ba5..6f87f39a1ac2 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_host.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_host.c
@@ -88,12 +88,14 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
88 int vflags = 0; 88 int vflags = 0;
89 int attempt = 0; 89 int attempt = 0;
90 struct kvmppc_sid_map *map; 90 struct kvmppc_sid_map *map;
91 int r = 0;
91 92
92 /* Get host physical address for gpa */ 93 /* Get host physical address for gpa */
93 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); 94 hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT);
94 if (is_error_pfn(hpaddr)) { 95 if (is_error_pfn(hpaddr)) {
95 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); 96 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
96 return -EINVAL; 97 r = -EINVAL;
98 goto out;
97 } 99 }
98 hpaddr <<= PAGE_SHIFT; 100 hpaddr <<= PAGE_SHIFT;
99 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); 101 hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK);
@@ -110,7 +112,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
110 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n", 112 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
111 vsid, orig_pte->eaddr); 113 vsid, orig_pte->eaddr);
112 WARN_ON(true); 114 WARN_ON(true);
113 return -EINVAL; 115 r = -EINVAL;
116 goto out;
114 } 117 }
115 118
116 vsid = map->host_vsid; 119 vsid = map->host_vsid;
@@ -131,8 +134,10 @@ map_again:
131 134
132 /* In case we tried normal mapping already, let's nuke old entries */ 135 /* In case we tried normal mapping already, let's nuke old entries */
133 if (attempt > 1) 136 if (attempt > 1)
134 if (ppc_md.hpte_remove(hpteg) < 0) 137 if (ppc_md.hpte_remove(hpteg) < 0) {
135 return -1; 138 r = -1;
139 goto out;
140 }
136 141
137 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M); 142 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
138 143
@@ -162,7 +167,8 @@ map_again:
162 kvmppc_mmu_hpte_cache_map(vcpu, pte); 167 kvmppc_mmu_hpte_cache_map(vcpu, pte);
163 } 168 }
164 169
165 return 0; 170out:
171 return r;
166} 172}
167 173
168static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) 174static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
@@ -207,25 +213,30 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
207 213
208static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) 214static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
209{ 215{
216 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
210 int i; 217 int i;
211 int max_slb_size = 64; 218 int max_slb_size = 64;
212 int found_inval = -1; 219 int found_inval = -1;
213 int r; 220 int r;
214 221
215 if (!to_svcpu(vcpu)->slb_max) 222 if (!svcpu->slb_max)
216 to_svcpu(vcpu)->slb_max = 1; 223 svcpu->slb_max = 1;
217 224
218 /* Are we overwriting? */ 225 /* Are we overwriting? */
219 for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) { 226 for (i = 1; i < svcpu->slb_max; i++) {
220 if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V)) 227 if (!(svcpu->slb[i].esid & SLB_ESID_V))
221 found_inval = i; 228 found_inval = i;
222 else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid) 229 else if ((svcpu->slb[i].esid & ESID_MASK) == esid) {
223 return i; 230 r = i;
231 goto out;
232 }
224 } 233 }
225 234
226 /* Found a spare entry that was invalidated before */ 235 /* Found a spare entry that was invalidated before */
227 if (found_inval > 0) 236 if (found_inval > 0) {
228 return found_inval; 237 r = found_inval;
238 goto out;
239 }
229 240
230 /* No spare invalid entry, so create one */ 241 /* No spare invalid entry, so create one */
231 242
@@ -233,30 +244,35 @@ static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
233 max_slb_size = mmu_slb_size; 244 max_slb_size = mmu_slb_size;
234 245
235 /* Overflowing -> purge */ 246 /* Overflowing -> purge */
236 if ((to_svcpu(vcpu)->slb_max) == max_slb_size) 247 if ((svcpu->slb_max) == max_slb_size)
237 kvmppc_mmu_flush_segments(vcpu); 248 kvmppc_mmu_flush_segments(vcpu);
238 249
239 r = to_svcpu(vcpu)->slb_max; 250 r = svcpu->slb_max;
240 to_svcpu(vcpu)->slb_max++; 251 svcpu->slb_max++;
241 252
253out:
254 svcpu_put(svcpu);
242 return r; 255 return r;
243} 256}
244 257
245int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) 258int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
246{ 259{
260 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
247 u64 esid = eaddr >> SID_SHIFT; 261 u64 esid = eaddr >> SID_SHIFT;
248 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; 262 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
249 u64 slb_vsid = SLB_VSID_USER; 263 u64 slb_vsid = SLB_VSID_USER;
250 u64 gvsid; 264 u64 gvsid;
251 int slb_index; 265 int slb_index;
252 struct kvmppc_sid_map *map; 266 struct kvmppc_sid_map *map;
267 int r = 0;
253 268
254 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); 269 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
255 270
256 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { 271 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
257 /* Invalidate an entry */ 272 /* Invalidate an entry */
258 to_svcpu(vcpu)->slb[slb_index].esid = 0; 273 svcpu->slb[slb_index].esid = 0;
259 return -ENOENT; 274 r = -ENOENT;
275 goto out;
260 } 276 }
261 277
262 map = find_sid_vsid(vcpu, gvsid); 278 map = find_sid_vsid(vcpu, gvsid);
@@ -269,18 +285,22 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
269 slb_vsid &= ~SLB_VSID_KP; 285 slb_vsid &= ~SLB_VSID_KP;
270 slb_esid |= slb_index; 286 slb_esid |= slb_index;
271 287
272 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; 288 svcpu->slb[slb_index].esid = slb_esid;
273 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; 289 svcpu->slb[slb_index].vsid = slb_vsid;
274 290
275 trace_kvm_book3s_slbmte(slb_vsid, slb_esid); 291 trace_kvm_book3s_slbmte(slb_vsid, slb_esid);
276 292
277 return 0; 293out:
294 svcpu_put(svcpu);
295 return r;
278} 296}
279 297
280void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) 298void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
281{ 299{
282 to_svcpu(vcpu)->slb_max = 1; 300 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
283 to_svcpu(vcpu)->slb[0].esid = 0; 301 svcpu->slb_max = 1;
302 svcpu->slb[0].esid = 0;
303 svcpu_put(svcpu);
284} 304}
285 305
286void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) 306void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index bc3a2ea94217..ddc485a529f2 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -23,6 +23,7 @@
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/hugetlb.h> 25#include <linux/hugetlb.h>
26#include <linux/vmalloc.h>
26 27
27#include <asm/tlbflush.h> 28#include <asm/tlbflush.h>
28#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
@@ -33,15 +34,6 @@
33#include <asm/ppc-opcode.h> 34#include <asm/ppc-opcode.h>
34#include <asm/cputable.h> 35#include <asm/cputable.h>
35 36
36/* For now use fixed-size 16MB page table */
37#define HPT_ORDER 24
38#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */
39#define HPT_HASH_MASK (HPT_NPTEG - 1)
40
41/* Pages in the VRMA are 16MB pages */
42#define VRMA_PAGE_ORDER 24
43#define VRMA_VSID 0x1ffffffUL /* 1TB VSID reserved for VRMA */
44
45/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ 37/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
46#define MAX_LPID_970 63 38#define MAX_LPID_970 63
47#define NR_LPIDS (LPID_RSVD + 1) 39#define NR_LPIDS (LPID_RSVD + 1)
@@ -51,21 +43,41 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
51{ 43{
52 unsigned long hpt; 44 unsigned long hpt;
53 unsigned long lpid; 45 unsigned long lpid;
46 struct revmap_entry *rev;
47 struct kvmppc_linear_info *li;
48
49 /* Allocate guest's hashed page table */
50 li = kvm_alloc_hpt();
51 if (li) {
52 /* using preallocated memory */
53 hpt = (ulong)li->base_virt;
54 kvm->arch.hpt_li = li;
55 } else {
56 /* using dynamic memory */
57 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
58 __GFP_NOWARN, HPT_ORDER - PAGE_SHIFT);
59 }
54 60
55 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|__GFP_NOWARN,
56 HPT_ORDER - PAGE_SHIFT);
57 if (!hpt) { 61 if (!hpt) {
58 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n"); 62 pr_err("kvm_alloc_hpt: Couldn't alloc HPT\n");
59 return -ENOMEM; 63 return -ENOMEM;
60 } 64 }
61 kvm->arch.hpt_virt = hpt; 65 kvm->arch.hpt_virt = hpt;
62 66
67 /* Allocate reverse map array */
68 rev = vmalloc(sizeof(struct revmap_entry) * HPT_NPTE);
69 if (!rev) {
70 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
71 goto out_freehpt;
72 }
73 kvm->arch.revmap = rev;
74
75 /* Allocate the guest's logical partition ID */
63 do { 76 do {
64 lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS); 77 lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS);
65 if (lpid >= NR_LPIDS) { 78 if (lpid >= NR_LPIDS) {
66 pr_err("kvm_alloc_hpt: No LPIDs free\n"); 79 pr_err("kvm_alloc_hpt: No LPIDs free\n");
67 free_pages(hpt, HPT_ORDER - PAGE_SHIFT); 80 goto out_freeboth;
68 return -ENOMEM;
69 } 81 }
70 } while (test_and_set_bit(lpid, lpid_inuse)); 82 } while (test_and_set_bit(lpid, lpid_inuse));
71 83
@@ -74,37 +86,64 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
74 86
75 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid); 87 pr_info("KVM guest htab at %lx, LPID %lx\n", hpt, lpid);
76 return 0; 88 return 0;
89
90 out_freeboth:
91 vfree(rev);
92 out_freehpt:
93 free_pages(hpt, HPT_ORDER - PAGE_SHIFT);
94 return -ENOMEM;
77} 95}
78 96
79void kvmppc_free_hpt(struct kvm *kvm) 97void kvmppc_free_hpt(struct kvm *kvm)
80{ 98{
81 clear_bit(kvm->arch.lpid, lpid_inuse); 99 clear_bit(kvm->arch.lpid, lpid_inuse);
82 free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT); 100 vfree(kvm->arch.revmap);
101 if (kvm->arch.hpt_li)
102 kvm_release_hpt(kvm->arch.hpt_li);
103 else
104 free_pages(kvm->arch.hpt_virt, HPT_ORDER - PAGE_SHIFT);
105}
106
107/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
108static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
109{
110 return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
111}
112
113/* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
114static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
115{
116 return (pgsize == 0x10000) ? 0x1000 : 0;
83} 117}
84 118
85void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem) 119void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
120 unsigned long porder)
86{ 121{
87 unsigned long i; 122 unsigned long i;
88 unsigned long npages = kvm->arch.ram_npages; 123 unsigned long npages;
89 unsigned long pfn; 124 unsigned long hp_v, hp_r;
90 unsigned long *hpte; 125 unsigned long addr, hash;
91 unsigned long hash; 126 unsigned long psize;
92 struct kvmppc_pginfo *pginfo = kvm->arch.ram_pginfo; 127 unsigned long hp0, hp1;
128 long ret;
93 129
94 if (!pginfo) 130 psize = 1ul << porder;
95 return; 131 npages = memslot->npages >> (porder - PAGE_SHIFT);
96 132
97 /* VRMA can't be > 1TB */ 133 /* VRMA can't be > 1TB */
98 if (npages > 1ul << (40 - kvm->arch.ram_porder)) 134 if (npages > 1ul << (40 - porder))
99 npages = 1ul << (40 - kvm->arch.ram_porder); 135 npages = 1ul << (40 - porder);
100 /* Can't use more than 1 HPTE per HPTEG */ 136 /* Can't use more than 1 HPTE per HPTEG */
101 if (npages > HPT_NPTEG) 137 if (npages > HPT_NPTEG)
102 npages = HPT_NPTEG; 138 npages = HPT_NPTEG;
103 139
140 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
141 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
142 hp1 = hpte1_pgsize_encoding(psize) |
143 HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
144
104 for (i = 0; i < npages; ++i) { 145 for (i = 0; i < npages; ++i) {
105 pfn = pginfo[i].pfn; 146 addr = i << porder;
106 if (!pfn)
107 break;
108 /* can't use hpt_hash since va > 64 bits */ 147 /* can't use hpt_hash since va > 64 bits */
109 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK; 148 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & HPT_HASH_MASK;
110 /* 149 /*
@@ -113,15 +152,15 @@ void kvmppc_map_vrma(struct kvm *kvm, struct kvm_userspace_memory_region *mem)
113 * at most one HPTE per HPTEG, we just assume entry 7 152 * at most one HPTE per HPTEG, we just assume entry 7
114 * is available and use it. 153 * is available and use it.
115 */ 154 */
116 hpte = (unsigned long *) (kvm->arch.hpt_virt + (hash << 7)); 155 hash = (hash << 3) + 7;
117 hpte += 7 * 2; 156 hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
118 /* HPTE low word - RPN, protection, etc. */ 157 hp_r = hp1 | addr;
119 hpte[1] = (pfn << PAGE_SHIFT) | HPTE_R_R | HPTE_R_C | 158 ret = kvmppc_virtmode_h_enter(vcpu, H_EXACT, hash, hp_v, hp_r);
120 HPTE_R_M | PP_RWXX; 159 if (ret != H_SUCCESS) {
121 wmb(); 160 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
122 hpte[0] = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | 161 addr, ret);
123 (i << (VRMA_PAGE_ORDER - 16)) | HPTE_V_BOLTED | 162 break;
124 HPTE_V_LARGE | HPTE_V_VALID; 163 }
125 } 164 }
126} 165}
127 166
@@ -158,10 +197,814 @@ static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
158 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME); 197 kvmppc_set_msr(vcpu, MSR_SF | MSR_ME);
159} 198}
160 199
200/*
201 * This is called to get a reference to a guest page if there isn't
202 * one already in the kvm->arch.slot_phys[][] arrays.
203 */
204static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
205 struct kvm_memory_slot *memslot,
206 unsigned long psize)
207{
208 unsigned long start;
209 long np, err;
210 struct page *page, *hpage, *pages[1];
211 unsigned long s, pgsize;
212 unsigned long *physp;
213 unsigned int is_io, got, pgorder;
214 struct vm_area_struct *vma;
215 unsigned long pfn, i, npages;
216
217 physp = kvm->arch.slot_phys[memslot->id];
218 if (!physp)
219 return -EINVAL;
220 if (physp[gfn - memslot->base_gfn])
221 return 0;
222
223 is_io = 0;
224 got = 0;
225 page = NULL;
226 pgsize = psize;
227 err = -EINVAL;
228 start = gfn_to_hva_memslot(memslot, gfn);
229
230 /* Instantiate and get the page we want access to */
231 np = get_user_pages_fast(start, 1, 1, pages);
232 if (np != 1) {
233 /* Look up the vma for the page */
234 down_read(&current->mm->mmap_sem);
235 vma = find_vma(current->mm, start);
236 if (!vma || vma->vm_start > start ||
237 start + psize > vma->vm_end ||
238 !(vma->vm_flags & VM_PFNMAP))
239 goto up_err;
240 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
241 pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
242 /* check alignment of pfn vs. requested page size */
243 if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
244 goto up_err;
245 up_read(&current->mm->mmap_sem);
246
247 } else {
248 page = pages[0];
249 got = KVMPPC_GOT_PAGE;
250
251 /* See if this is a large page */
252 s = PAGE_SIZE;
253 if (PageHuge(page)) {
254 hpage = compound_head(page);
255 s <<= compound_order(hpage);
256 /* Get the whole large page if slot alignment is ok */
257 if (s > psize && slot_is_aligned(memslot, s) &&
258 !(memslot->userspace_addr & (s - 1))) {
259 start &= ~(s - 1);
260 pgsize = s;
261 page = hpage;
262 }
263 }
264 if (s < psize)
265 goto out;
266 pfn = page_to_pfn(page);
267 }
268
269 npages = pgsize >> PAGE_SHIFT;
270 pgorder = __ilog2(npages);
271 physp += (gfn - memslot->base_gfn) & ~(npages - 1);
272 spin_lock(&kvm->arch.slot_phys_lock);
273 for (i = 0; i < npages; ++i) {
274 if (!physp[i]) {
275 physp[i] = ((pfn + i) << PAGE_SHIFT) +
276 got + is_io + pgorder;
277 got = 0;
278 }
279 }
280 spin_unlock(&kvm->arch.slot_phys_lock);
281 err = 0;
282
283 out:
284 if (got) {
285 if (PageHuge(page))
286 page = compound_head(page);
287 put_page(page);
288 }
289 return err;
290
291 up_err:
292 up_read(&current->mm->mmap_sem);
293 return err;
294}
295
296/*
297 * We come here on a H_ENTER call from the guest when we are not
298 * using mmu notifiers and we don't have the requested page pinned
299 * already.
300 */
301long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
302 long pte_index, unsigned long pteh, unsigned long ptel)
303{
304 struct kvm *kvm = vcpu->kvm;
305 unsigned long psize, gpa, gfn;
306 struct kvm_memory_slot *memslot;
307 long ret;
308
309 if (kvm->arch.using_mmu_notifiers)
310 goto do_insert;
311
312 psize = hpte_page_size(pteh, ptel);
313 if (!psize)
314 return H_PARAMETER;
315
316 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
317
318 /* Find the memslot (if any) for this address */
319 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
320 gfn = gpa >> PAGE_SHIFT;
321 memslot = gfn_to_memslot(kvm, gfn);
322 if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
323 if (!slot_is_aligned(memslot, psize))
324 return H_PARAMETER;
325 if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
326 return H_PARAMETER;
327 }
328
329 do_insert:
330 /* Protect linux PTE lookup from page table destruction */
331 rcu_read_lock_sched(); /* this disables preemption too */
332 vcpu->arch.pgdir = current->mm->pgd;
333 ret = kvmppc_h_enter(vcpu, flags, pte_index, pteh, ptel);
334 rcu_read_unlock_sched();
335 if (ret == H_TOO_HARD) {
336 /* this can't happen */
337 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
338 ret = H_RESOURCE; /* or something */
339 }
340 return ret;
341
342}
343
344static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
345 gva_t eaddr)
346{
347 u64 mask;
348 int i;
349
350 for (i = 0; i < vcpu->arch.slb_nr; i++) {
351 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
352 continue;
353
354 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
355 mask = ESID_MASK_1T;
356 else
357 mask = ESID_MASK;
358
359 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
360 return &vcpu->arch.slb[i];
361 }
362 return NULL;
363}
364
365static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
366 unsigned long ea)
367{
368 unsigned long ra_mask;
369
370 ra_mask = hpte_page_size(v, r) - 1;
371 return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
372}
373
161static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, 374static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
162 struct kvmppc_pte *gpte, bool data) 375 struct kvmppc_pte *gpte, bool data)
376{
377 struct kvm *kvm = vcpu->kvm;
378 struct kvmppc_slb *slbe;
379 unsigned long slb_v;
380 unsigned long pp, key;
381 unsigned long v, gr;
382 unsigned long *hptep;
383 int index;
384 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
385
386 /* Get SLB entry */
387 if (virtmode) {
388 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
389 if (!slbe)
390 return -EINVAL;
391 slb_v = slbe->origv;
392 } else {
393 /* real mode access */
394 slb_v = vcpu->kvm->arch.vrma_slb_v;
395 }
396
397 /* Find the HPTE in the hash table */
398 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
399 HPTE_V_VALID | HPTE_V_ABSENT);
400 if (index < 0)
401 return -ENOENT;
402 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
403 v = hptep[0] & ~HPTE_V_HVLOCK;
404 gr = kvm->arch.revmap[index].guest_rpte;
405
406 /* Unlock the HPTE */
407 asm volatile("lwsync" : : : "memory");
408 hptep[0] = v;
409
410 gpte->eaddr = eaddr;
411 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
412
413 /* Get PP bits and key for permission check */
414 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
415 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
416 key &= slb_v;
417
418 /* Calculate permissions */
419 gpte->may_read = hpte_read_permission(pp, key);
420 gpte->may_write = hpte_write_permission(pp, key);
421 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
422
423 /* Storage key permission check for POWER7 */
424 if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
425 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
426 if (amrfield & 1)
427 gpte->may_read = 0;
428 if (amrfield & 2)
429 gpte->may_write = 0;
430 }
431
432 /* Get the guest physical address */
433 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
434 return 0;
435}
436
437/*
438 * Quick test for whether an instruction is a load or a store.
439 * If the instruction is a load or a store, then this will indicate
440 * which it is, at least on server processors. (Embedded processors
441 * have some external PID instructions that don't follow the rule
442 * embodied here.) If the instruction isn't a load or store, then
443 * this doesn't return anything useful.
444 */
445static int instruction_is_store(unsigned int instr)
446{
447 unsigned int mask;
448
449 mask = 0x10000000;
450 if ((instr & 0xfc000000) == 0x7c000000)
451 mask = 0x100; /* major opcode 31 */
452 return (instr & mask) != 0;
453}
454
455static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
456 unsigned long gpa, int is_store)
457{
458 int ret;
459 u32 last_inst;
460 unsigned long srr0 = kvmppc_get_pc(vcpu);
461
462 /* We try to load the last instruction. We don't let
463 * emulate_instruction do it as it doesn't check what
464 * kvmppc_ld returns.
465 * If we fail, we just return to the guest and try executing it again.
466 */
467 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
468 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
469 if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
470 return RESUME_GUEST;
471 vcpu->arch.last_inst = last_inst;
472 }
473
474 /*
475 * WARNING: We do not know for sure whether the instruction we just
476 * read from memory is the same that caused the fault in the first
477 * place. If the instruction we read is neither an load or a store,
478 * then it can't access memory, so we don't need to worry about
479 * enforcing access permissions. So, assuming it is a load or
480 * store, we just check that its direction (load or store) is
481 * consistent with the original fault, since that's what we
482 * checked the access permissions against. If there is a mismatch
483 * we just return and retry the instruction.
484 */
485
486 if (instruction_is_store(vcpu->arch.last_inst) != !!is_store)
487 return RESUME_GUEST;
488
489 /*
490 * Emulated accesses are emulated by looking at the hash for
491 * translation once, then performing the access later. The
492 * translation could be invalidated in the meantime in which
493 * point performing the subsequent memory access on the old
494 * physical address could possibly be a security hole for the
495 * guest (but not the host).
496 *
497 * This is less of an issue for MMIO stores since they aren't
498 * globally visible. It could be an issue for MMIO loads to
499 * a certain extent but we'll ignore it for now.
500 */
501
502 vcpu->arch.paddr_accessed = gpa;
503 return kvmppc_emulate_mmio(run, vcpu);
504}
505
506int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
507 unsigned long ea, unsigned long dsisr)
508{
509 struct kvm *kvm = vcpu->kvm;
510 unsigned long *hptep, hpte[3], r;
511 unsigned long mmu_seq, psize, pte_size;
512 unsigned long gfn, hva, pfn;
513 struct kvm_memory_slot *memslot;
514 unsigned long *rmap;
515 struct revmap_entry *rev;
516 struct page *page, *pages[1];
517 long index, ret, npages;
518 unsigned long is_io;
519 unsigned int writing, write_ok;
520 struct vm_area_struct *vma;
521 unsigned long rcbits;
522
523 /*
524 * Real-mode code has already searched the HPT and found the
525 * entry we're interested in. Lock the entry and check that
526 * it hasn't changed. If it has, just return and re-execute the
527 * instruction.
528 */
529 if (ea != vcpu->arch.pgfault_addr)
530 return RESUME_GUEST;
531 index = vcpu->arch.pgfault_index;
532 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
533 rev = &kvm->arch.revmap[index];
534 preempt_disable();
535 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
536 cpu_relax();
537 hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
538 hpte[1] = hptep[1];
539 hpte[2] = r = rev->guest_rpte;
540 asm volatile("lwsync" : : : "memory");
541 hptep[0] = hpte[0];
542 preempt_enable();
543
544 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
545 hpte[1] != vcpu->arch.pgfault_hpte[1])
546 return RESUME_GUEST;
547
548 /* Translate the logical address and get the page */
549 psize = hpte_page_size(hpte[0], r);
550 gfn = hpte_rpn(r, psize);
551 memslot = gfn_to_memslot(kvm, gfn);
552
553 /* No memslot means it's an emulated MMIO region */
554 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
555 unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
556 return kvmppc_hv_emulate_mmio(run, vcpu, gpa,
557 dsisr & DSISR_ISSTORE);
558 }
559
560 if (!kvm->arch.using_mmu_notifiers)
561 return -EFAULT; /* should never get here */
562
563 /* used to check for invalidations in progress */
564 mmu_seq = kvm->mmu_notifier_seq;
565 smp_rmb();
566
567 is_io = 0;
568 pfn = 0;
569 page = NULL;
570 pte_size = PAGE_SIZE;
571 writing = (dsisr & DSISR_ISSTORE) != 0;
572 /* If writing != 0, then the HPTE must allow writing, if we get here */
573 write_ok = writing;
574 hva = gfn_to_hva_memslot(memslot, gfn);
575 npages = get_user_pages_fast(hva, 1, writing, pages);
576 if (npages < 1) {
577 /* Check if it's an I/O mapping */
578 down_read(&current->mm->mmap_sem);
579 vma = find_vma(current->mm, hva);
580 if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
581 (vma->vm_flags & VM_PFNMAP)) {
582 pfn = vma->vm_pgoff +
583 ((hva - vma->vm_start) >> PAGE_SHIFT);
584 pte_size = psize;
585 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
586 write_ok = vma->vm_flags & VM_WRITE;
587 }
588 up_read(&current->mm->mmap_sem);
589 if (!pfn)
590 return -EFAULT;
591 } else {
592 page = pages[0];
593 if (PageHuge(page)) {
594 page = compound_head(page);
595 pte_size <<= compound_order(page);
596 }
597 /* if the guest wants write access, see if that is OK */
598 if (!writing && hpte_is_writable(r)) {
599 pte_t *ptep, pte;
600
601 /*
602 * We need to protect against page table destruction
603 * while looking up and updating the pte.
604 */
605 rcu_read_lock_sched();
606 ptep = find_linux_pte_or_hugepte(current->mm->pgd,
607 hva, NULL);
608 if (ptep && pte_present(*ptep)) {
609 pte = kvmppc_read_update_linux_pte(ptep, 1);
610 if (pte_write(pte))
611 write_ok = 1;
612 }
613 rcu_read_unlock_sched();
614 }
615 pfn = page_to_pfn(page);
616 }
617
618 ret = -EFAULT;
619 if (psize > pte_size)
620 goto out_put;
621
622 /* Check WIMG vs. the actual page we're accessing */
623 if (!hpte_cache_flags_ok(r, is_io)) {
624 if (is_io)
625 return -EFAULT;
626 /*
627 * Allow guest to map emulated device memory as
628 * uncacheable, but actually make it cacheable.
629 */
630 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
631 }
632
633 /* Set the HPTE to point to pfn */
634 r = (r & ~(HPTE_R_PP0 - pte_size)) | (pfn << PAGE_SHIFT);
635 if (hpte_is_writable(r) && !write_ok)
636 r = hpte_make_readonly(r);
637 ret = RESUME_GUEST;
638 preempt_disable();
639 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
640 cpu_relax();
641 if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
642 rev->guest_rpte != hpte[2])
643 /* HPTE has been changed under us; let the guest retry */
644 goto out_unlock;
645 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
646
647 rmap = &memslot->rmap[gfn - memslot->base_gfn];
648 lock_rmap(rmap);
649
650 /* Check if we might have been invalidated; let the guest retry if so */
651 ret = RESUME_GUEST;
652 if (mmu_notifier_retry(vcpu, mmu_seq)) {
653 unlock_rmap(rmap);
654 goto out_unlock;
655 }
656
657 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
658 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
659 r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
660
661 if (hptep[0] & HPTE_V_VALID) {
662 /* HPTE was previously valid, so we need to invalidate it */
663 unlock_rmap(rmap);
664 hptep[0] |= HPTE_V_ABSENT;
665 kvmppc_invalidate_hpte(kvm, hptep, index);
666 /* don't lose previous R and C bits */
667 r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
668 } else {
669 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
670 }
671
672 hptep[1] = r;
673 eieio();
674 hptep[0] = hpte[0];
675 asm volatile("ptesync" : : : "memory");
676 preempt_enable();
677 if (page && hpte_is_writable(r))
678 SetPageDirty(page);
679
680 out_put:
681 if (page)
682 put_page(page);
683 return ret;
684
685 out_unlock:
686 hptep[0] &= ~HPTE_V_HVLOCK;
687 preempt_enable();
688 goto out_put;
689}
690
691static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
692 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
693 unsigned long gfn))
694{
695 int ret;
696 int retval = 0;
697 struct kvm_memslots *slots;
698 struct kvm_memory_slot *memslot;
699
700 slots = kvm_memslots(kvm);
701 kvm_for_each_memslot(memslot, slots) {
702 unsigned long start = memslot->userspace_addr;
703 unsigned long end;
704
705 end = start + (memslot->npages << PAGE_SHIFT);
706 if (hva >= start && hva < end) {
707 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
708
709 ret = handler(kvm, &memslot->rmap[gfn_offset],
710 memslot->base_gfn + gfn_offset);
711 retval |= ret;
712 }
713 }
714
715 return retval;
716}
717
718static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
719 unsigned long gfn)
720{
721 struct revmap_entry *rev = kvm->arch.revmap;
722 unsigned long h, i, j;
723 unsigned long *hptep;
724 unsigned long ptel, psize, rcbits;
725
726 for (;;) {
727 lock_rmap(rmapp);
728 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
729 unlock_rmap(rmapp);
730 break;
731 }
732
733 /*
734 * To avoid an ABBA deadlock with the HPTE lock bit,
735 * we can't spin on the HPTE lock while holding the
736 * rmap chain lock.
737 */
738 i = *rmapp & KVMPPC_RMAP_INDEX;
739 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
740 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
741 /* unlock rmap before spinning on the HPTE lock */
742 unlock_rmap(rmapp);
743 while (hptep[0] & HPTE_V_HVLOCK)
744 cpu_relax();
745 continue;
746 }
747 j = rev[i].forw;
748 if (j == i) {
749 /* chain is now empty */
750 *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
751 } else {
752 /* remove i from chain */
753 h = rev[i].back;
754 rev[h].forw = j;
755 rev[j].back = h;
756 rev[i].forw = rev[i].back = i;
757 *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
758 }
759
760 /* Now check and modify the HPTE */
761 ptel = rev[i].guest_rpte;
762 psize = hpte_page_size(hptep[0], ptel);
763 if ((hptep[0] & HPTE_V_VALID) &&
764 hpte_rpn(ptel, psize) == gfn) {
765 hptep[0] |= HPTE_V_ABSENT;
766 kvmppc_invalidate_hpte(kvm, hptep, i);
767 /* Harvest R and C */
768 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
769 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
770 rev[i].guest_rpte = ptel | rcbits;
771 }
772 unlock_rmap(rmapp);
773 hptep[0] &= ~HPTE_V_HVLOCK;
774 }
775 return 0;
776}
777
778int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
779{
780 if (kvm->arch.using_mmu_notifiers)
781 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
782 return 0;
783}
784
785static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
786 unsigned long gfn)
787{
788 struct revmap_entry *rev = kvm->arch.revmap;
789 unsigned long head, i, j;
790 unsigned long *hptep;
791 int ret = 0;
792
793 retry:
794 lock_rmap(rmapp);
795 if (*rmapp & KVMPPC_RMAP_REFERENCED) {
796 *rmapp &= ~KVMPPC_RMAP_REFERENCED;
797 ret = 1;
798 }
799 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
800 unlock_rmap(rmapp);
801 return ret;
802 }
803
804 i = head = *rmapp & KVMPPC_RMAP_INDEX;
805 do {
806 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
807 j = rev[i].forw;
808
809 /* If this HPTE isn't referenced, ignore it */
810 if (!(hptep[1] & HPTE_R_R))
811 continue;
812
813 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
814 /* unlock rmap before spinning on the HPTE lock */
815 unlock_rmap(rmapp);
816 while (hptep[0] & HPTE_V_HVLOCK)
817 cpu_relax();
818 goto retry;
819 }
820
821 /* Now check and modify the HPTE */
822 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
823 kvmppc_clear_ref_hpte(kvm, hptep, i);
824 rev[i].guest_rpte |= HPTE_R_R;
825 ret = 1;
826 }
827 hptep[0] &= ~HPTE_V_HVLOCK;
828 } while ((i = j) != head);
829
830 unlock_rmap(rmapp);
831 return ret;
832}
833
834int kvm_age_hva(struct kvm *kvm, unsigned long hva)
835{
836 if (!kvm->arch.using_mmu_notifiers)
837 return 0;
838 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
839}
840
841static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
842 unsigned long gfn)
843{
844 struct revmap_entry *rev = kvm->arch.revmap;
845 unsigned long head, i, j;
846 unsigned long *hp;
847 int ret = 1;
848
849 if (*rmapp & KVMPPC_RMAP_REFERENCED)
850 return 1;
851
852 lock_rmap(rmapp);
853 if (*rmapp & KVMPPC_RMAP_REFERENCED)
854 goto out;
855
856 if (*rmapp & KVMPPC_RMAP_PRESENT) {
857 i = head = *rmapp & KVMPPC_RMAP_INDEX;
858 do {
859 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
860 j = rev[i].forw;
861 if (hp[1] & HPTE_R_R)
862 goto out;
863 } while ((i = j) != head);
864 }
865 ret = 0;
866
867 out:
868 unlock_rmap(rmapp);
869 return ret;
870}
871
872int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
873{
874 if (!kvm->arch.using_mmu_notifiers)
875 return 0;
876 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
877}
878
879void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
163{ 880{
164 return -ENOENT; 881 if (!kvm->arch.using_mmu_notifiers)
882 return;
883 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
884}
885
886static int kvm_test_clear_dirty(struct kvm *kvm, unsigned long *rmapp)
887{
888 struct revmap_entry *rev = kvm->arch.revmap;
889 unsigned long head, i, j;
890 unsigned long *hptep;
891 int ret = 0;
892
893 retry:
894 lock_rmap(rmapp);
895 if (*rmapp & KVMPPC_RMAP_CHANGED) {
896 *rmapp &= ~KVMPPC_RMAP_CHANGED;
897 ret = 1;
898 }
899 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
900 unlock_rmap(rmapp);
901 return ret;
902 }
903
904 i = head = *rmapp & KVMPPC_RMAP_INDEX;
905 do {
906 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
907 j = rev[i].forw;
908
909 if (!(hptep[1] & HPTE_R_C))
910 continue;
911
912 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
913 /* unlock rmap before spinning on the HPTE lock */
914 unlock_rmap(rmapp);
915 while (hptep[0] & HPTE_V_HVLOCK)
916 cpu_relax();
917 goto retry;
918 }
919
920 /* Now check and modify the HPTE */
921 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_C)) {
922 /* need to make it temporarily absent to clear C */
923 hptep[0] |= HPTE_V_ABSENT;
924 kvmppc_invalidate_hpte(kvm, hptep, i);
925 hptep[1] &= ~HPTE_R_C;
926 eieio();
927 hptep[0] = (hptep[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
928 rev[i].guest_rpte |= HPTE_R_C;
929 ret = 1;
930 }
931 hptep[0] &= ~HPTE_V_HVLOCK;
932 } while ((i = j) != head);
933
934 unlock_rmap(rmapp);
935 return ret;
936}
937
938long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
939{
940 unsigned long i;
941 unsigned long *rmapp, *map;
942
943 preempt_disable();
944 rmapp = memslot->rmap;
945 map = memslot->dirty_bitmap;
946 for (i = 0; i < memslot->npages; ++i) {
947 if (kvm_test_clear_dirty(kvm, rmapp))
948 __set_bit_le(i, map);
949 ++rmapp;
950 }
951 preempt_enable();
952 return 0;
953}
954
955void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
956 unsigned long *nb_ret)
957{
958 struct kvm_memory_slot *memslot;
959 unsigned long gfn = gpa >> PAGE_SHIFT;
960 struct page *page, *pages[1];
961 int npages;
962 unsigned long hva, psize, offset;
963 unsigned long pa;
964 unsigned long *physp;
965
966 memslot = gfn_to_memslot(kvm, gfn);
967 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
968 return NULL;
969 if (!kvm->arch.using_mmu_notifiers) {
970 physp = kvm->arch.slot_phys[memslot->id];
971 if (!physp)
972 return NULL;
973 physp += gfn - memslot->base_gfn;
974 pa = *physp;
975 if (!pa) {
976 if (kvmppc_get_guest_page(kvm, gfn, memslot,
977 PAGE_SIZE) < 0)
978 return NULL;
979 pa = *physp;
980 }
981 page = pfn_to_page(pa >> PAGE_SHIFT);
982 } else {
983 hva = gfn_to_hva_memslot(memslot, gfn);
984 npages = get_user_pages_fast(hva, 1, 1, pages);
985 if (npages < 1)
986 return NULL;
987 page = pages[0];
988 }
989 psize = PAGE_SIZE;
990 if (PageHuge(page)) {
991 page = compound_head(page);
992 psize <<= compound_order(page);
993 }
994 if (!kvm->arch.using_mmu_notifiers)
995 get_page(page);
996 offset = gpa & (psize - 1);
997 if (nb_ret)
998 *nb_ret = psize - offset;
999 return page_address(page) + offset;
1000}
1001
1002void kvmppc_unpin_guest_page(struct kvm *kvm, void *va)
1003{
1004 struct page *page = virt_to_page(va);
1005
1006 page = compound_head(page);
1007 put_page(page);
165} 1008}
166 1009
167void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) 1010void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 0c9dc62532d0..f1950d131827 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -230,9 +230,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
230 230
231 r = kvmppc_st(vcpu, &addr, 32, zeros, true); 231 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
232 if ((r == -ENOENT) || (r == -EPERM)) { 232 if ((r == -ENOENT) || (r == -EPERM)) {
233 struct kvmppc_book3s_shadow_vcpu *svcpu;
234
235 svcpu = svcpu_get(vcpu);
233 *advance = 0; 236 *advance = 0;
234 vcpu->arch.shared->dar = vaddr; 237 vcpu->arch.shared->dar = vaddr;
235 to_svcpu(vcpu)->fault_dar = vaddr; 238 svcpu->fault_dar = vaddr;
236 239
237 dsisr = DSISR_ISSTORE; 240 dsisr = DSISR_ISSTORE;
238 if (r == -ENOENT) 241 if (r == -ENOENT)
@@ -241,7 +244,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
241 dsisr |= DSISR_PROTFAULT; 244 dsisr |= DSISR_PROTFAULT;
242 245
243 vcpu->arch.shared->dsisr = dsisr; 246 vcpu->arch.shared->dsisr = dsisr;
244 to_svcpu(vcpu)->fault_dsisr = dsisr; 247 svcpu->fault_dsisr = dsisr;
248 svcpu_put(svcpu);
245 249
246 kvmppc_book3s_queue_irqprio(vcpu, 250 kvmppc_book3s_queue_irqprio(vcpu,
247 BOOK3S_INTERRUPT_DATA_STORAGE); 251 BOOK3S_INTERRUPT_DATA_STORAGE);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 336983da9e72..d386b6198bc7 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -46,25 +46,16 @@
46#include <asm/page.h> 46#include <asm/page.h>
47#include <asm/hvcall.h> 47#include <asm/hvcall.h>
48#include <linux/gfp.h> 48#include <linux/gfp.h>
49#include <linux/sched.h>
50#include <linux/vmalloc.h> 49#include <linux/vmalloc.h>
51#include <linux/highmem.h> 50#include <linux/highmem.h>
52 51#include <linux/hugetlb.h>
53/*
54 * For now, limit memory to 64GB and require it to be large pages.
55 * This value is chosen because it makes the ram_pginfo array be
56 * 64kB in size, which is about as large as we want to be trying
57 * to allocate with kmalloc.
58 */
59#define MAX_MEM_ORDER 36
60
61#define LARGE_PAGE_ORDER 24 /* 16MB pages */
62 52
63/* #define EXIT_DEBUG */ 53/* #define EXIT_DEBUG */
64/* #define EXIT_DEBUG_SIMPLE */ 54/* #define EXIT_DEBUG_SIMPLE */
65/* #define EXIT_DEBUG_INT */ 55/* #define EXIT_DEBUG_INT */
66 56
67static void kvmppc_end_cede(struct kvm_vcpu *vcpu); 57static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
58static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
68 59
69void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 60void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
70{ 61{
@@ -147,10 +138,10 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
147 unsigned long vcpuid, unsigned long vpa) 138 unsigned long vcpuid, unsigned long vpa)
148{ 139{
149 struct kvm *kvm = vcpu->kvm; 140 struct kvm *kvm = vcpu->kvm;
150 unsigned long pg_index, ra, len; 141 unsigned long len, nb;
151 unsigned long pg_offset;
152 void *va; 142 void *va;
153 struct kvm_vcpu *tvcpu; 143 struct kvm_vcpu *tvcpu;
144 int err = H_PARAMETER;
154 145
155 tvcpu = kvmppc_find_vcpu(kvm, vcpuid); 146 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
156 if (!tvcpu) 147 if (!tvcpu)
@@ -163,45 +154,41 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
163 if (flags < 4) { 154 if (flags < 4) {
164 if (vpa & 0x7f) 155 if (vpa & 0x7f)
165 return H_PARAMETER; 156 return H_PARAMETER;
157 if (flags >= 2 && !tvcpu->arch.vpa)
158 return H_RESOURCE;
166 /* registering new area; convert logical addr to real */ 159 /* registering new area; convert logical addr to real */
167 pg_index = vpa >> kvm->arch.ram_porder; 160 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
168 pg_offset = vpa & (kvm->arch.ram_psize - 1); 161 if (va == NULL)
169 if (pg_index >= kvm->arch.ram_npages)
170 return H_PARAMETER;
171 if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
172 return H_PARAMETER; 162 return H_PARAMETER;
173 ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
174 ra |= pg_offset;
175 va = __va(ra);
176 if (flags <= 1) 163 if (flags <= 1)
177 len = *(unsigned short *)(va + 4); 164 len = *(unsigned short *)(va + 4);
178 else 165 else
179 len = *(unsigned int *)(va + 4); 166 len = *(unsigned int *)(va + 4);
180 if (pg_offset + len > kvm->arch.ram_psize) 167 if (len > nb)
181 return H_PARAMETER; 168 goto out_unpin;
182 switch (flags) { 169 switch (flags) {
183 case 1: /* register VPA */ 170 case 1: /* register VPA */
184 if (len < 640) 171 if (len < 640)
185 return H_PARAMETER; 172 goto out_unpin;
173 if (tvcpu->arch.vpa)
174 kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa);
186 tvcpu->arch.vpa = va; 175 tvcpu->arch.vpa = va;
187 init_vpa(vcpu, va); 176 init_vpa(vcpu, va);
188 break; 177 break;
189 case 2: /* register DTL */ 178 case 2: /* register DTL */
190 if (len < 48) 179 if (len < 48)
191 return H_PARAMETER; 180 goto out_unpin;
192 if (!tvcpu->arch.vpa)
193 return H_RESOURCE;
194 len -= len % 48; 181 len -= len % 48;
182 if (tvcpu->arch.dtl)
183 kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl);
195 tvcpu->arch.dtl = va; 184 tvcpu->arch.dtl = va;
196 tvcpu->arch.dtl_end = va + len; 185 tvcpu->arch.dtl_end = va + len;
197 break; 186 break;
198 case 3: /* register SLB shadow buffer */ 187 case 3: /* register SLB shadow buffer */
199 if (len < 8) 188 if (len < 16)
200 return H_PARAMETER; 189 goto out_unpin;
201 if (!tvcpu->arch.vpa) 190 if (tvcpu->arch.slb_shadow)
202 return H_RESOURCE; 191 kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow);
203 tvcpu->arch.slb_shadow = va;
204 len = (len - 16) / 16;
205 tvcpu->arch.slb_shadow = va; 192 tvcpu->arch.slb_shadow = va;
206 break; 193 break;
207 } 194 }
@@ -210,17 +197,30 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
210 case 5: /* unregister VPA */ 197 case 5: /* unregister VPA */
211 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl) 198 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
212 return H_RESOURCE; 199 return H_RESOURCE;
200 if (!tvcpu->arch.vpa)
201 break;
202 kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
213 tvcpu->arch.vpa = NULL; 203 tvcpu->arch.vpa = NULL;
214 break; 204 break;
215 case 6: /* unregister DTL */ 205 case 6: /* unregister DTL */
206 if (!tvcpu->arch.dtl)
207 break;
208 kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl);
216 tvcpu->arch.dtl = NULL; 209 tvcpu->arch.dtl = NULL;
217 break; 210 break;
218 case 7: /* unregister SLB shadow buffer */ 211 case 7: /* unregister SLB shadow buffer */
212 if (!tvcpu->arch.slb_shadow)
213 break;
214 kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow);
219 tvcpu->arch.slb_shadow = NULL; 215 tvcpu->arch.slb_shadow = NULL;
220 break; 216 break;
221 } 217 }
222 } 218 }
223 return H_SUCCESS; 219 return H_SUCCESS;
220
221 out_unpin:
222 kvmppc_unpin_guest_page(kvm, va);
223 return err;
224} 224}
225 225
226int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) 226int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
@@ -230,6 +230,12 @@ int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
230 struct kvm_vcpu *tvcpu; 230 struct kvm_vcpu *tvcpu;
231 231
232 switch (req) { 232 switch (req) {
233 case H_ENTER:
234 ret = kvmppc_virtmode_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4),
235 kvmppc_get_gpr(vcpu, 5),
236 kvmppc_get_gpr(vcpu, 6),
237 kvmppc_get_gpr(vcpu, 7));
238 break;
233 case H_CEDE: 239 case H_CEDE:
234 break; 240 break;
235 case H_PROD: 241 case H_PROD:
@@ -319,20 +325,19 @@ static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
319 break; 325 break;
320 } 326 }
321 /* 327 /*
322 * We get these next two if the guest does a bad real-mode access, 328 * We get these next two if the guest accesses a page which it thinks
323 * as we have enabled VRMA (virtualized real mode area) mode in the 329 * it has mapped but which is not actually present, either because
324 * LPCR. We just generate an appropriate DSI/ISI to the guest. 330 * it is for an emulated I/O device or because the corresonding
331 * host page has been paged out. Any other HDSI/HISI interrupts
332 * have been handled already.
325 */ 333 */
326 case BOOK3S_INTERRUPT_H_DATA_STORAGE: 334 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
327 vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr; 335 r = kvmppc_book3s_hv_page_fault(run, vcpu,
328 vcpu->arch.shregs.dar = vcpu->arch.fault_dar; 336 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
329 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
330 r = RESUME_GUEST;
331 break; 337 break;
332 case BOOK3S_INTERRUPT_H_INST_STORAGE: 338 case BOOK3S_INTERRUPT_H_INST_STORAGE:
333 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE, 339 r = kvmppc_book3s_hv_page_fault(run, vcpu,
334 0x08000000); 340 kvmppc_get_pc(vcpu), 0);
335 r = RESUME_GUEST;
336 break; 341 break;
337 /* 342 /*
338 * This occurs if the guest executes an illegal instruction. 343 * This occurs if the guest executes an illegal instruction.
@@ -392,6 +397,42 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
392 return 0; 397 return 0;
393} 398}
394 399
400int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
401{
402 int r = -EINVAL;
403
404 switch (reg->id) {
405 case KVM_REG_PPC_HIOR:
406 r = put_user(0, (u64 __user *)reg->addr);
407 break;
408 default:
409 break;
410 }
411
412 return r;
413}
414
415int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
416{
417 int r = -EINVAL;
418
419 switch (reg->id) {
420 case KVM_REG_PPC_HIOR:
421 {
422 u64 hior;
423 /* Only allow this to be set to zero */
424 r = get_user(hior, (u64 __user *)reg->addr);
425 if (!r && (hior != 0))
426 r = -EINVAL;
427 break;
428 }
429 default:
430 break;
431 }
432
433 return r;
434}
435
395int kvmppc_core_check_processor_compat(void) 436int kvmppc_core_check_processor_compat(void)
396{ 437{
397 if (cpu_has_feature(CPU_FTR_HVMODE)) 438 if (cpu_has_feature(CPU_FTR_HVMODE))
@@ -411,7 +452,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
411 goto out; 452 goto out;
412 453
413 err = -ENOMEM; 454 err = -ENOMEM;
414 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); 455 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
415 if (!vcpu) 456 if (!vcpu)
416 goto out; 457 goto out;
417 458
@@ -463,15 +504,21 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
463 return vcpu; 504 return vcpu;
464 505
465free_vcpu: 506free_vcpu:
466 kfree(vcpu); 507 kmem_cache_free(kvm_vcpu_cache, vcpu);
467out: 508out:
468 return ERR_PTR(err); 509 return ERR_PTR(err);
469} 510}
470 511
471void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 512void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
472{ 513{
514 if (vcpu->arch.dtl)
515 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl);
516 if (vcpu->arch.slb_shadow)
517 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow);
518 if (vcpu->arch.vpa)
519 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa);
473 kvm_vcpu_uninit(vcpu); 520 kvm_vcpu_uninit(vcpu);
474 kfree(vcpu); 521 kmem_cache_free(kvm_vcpu_cache, vcpu);
475} 522}
476 523
477static void kvmppc_set_timer(struct kvm_vcpu *vcpu) 524static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
@@ -482,7 +529,7 @@ static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
482 if (now > vcpu->arch.dec_expires) { 529 if (now > vcpu->arch.dec_expires) {
483 /* decrementer has already gone negative */ 530 /* decrementer has already gone negative */
484 kvmppc_core_queue_dec(vcpu); 531 kvmppc_core_queue_dec(vcpu);
485 kvmppc_core_deliver_interrupts(vcpu); 532 kvmppc_core_prepare_to_enter(vcpu);
486 return; 533 return;
487 } 534 }
488 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC 535 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
@@ -797,7 +844,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
797 844
798 list_for_each_entry_safe(v, vn, &vc->runnable_threads, 845 list_for_each_entry_safe(v, vn, &vc->runnable_threads,
799 arch.run_list) { 846 arch.run_list) {
800 kvmppc_core_deliver_interrupts(v); 847 kvmppc_core_prepare_to_enter(v);
801 if (signal_pending(v->arch.run_task)) { 848 if (signal_pending(v->arch.run_task)) {
802 kvmppc_remove_runnable(vc, v); 849 kvmppc_remove_runnable(vc, v);
803 v->stat.signal_exits++; 850 v->stat.signal_exits++;
@@ -836,20 +883,26 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
836 return -EINVAL; 883 return -EINVAL;
837 } 884 }
838 885
886 kvmppc_core_prepare_to_enter(vcpu);
887
839 /* No need to go into the guest when all we'll do is come back out */ 888 /* No need to go into the guest when all we'll do is come back out */
840 if (signal_pending(current)) { 889 if (signal_pending(current)) {
841 run->exit_reason = KVM_EXIT_INTR; 890 run->exit_reason = KVM_EXIT_INTR;
842 return -EINTR; 891 return -EINTR;
843 } 892 }
844 893
845 /* On PPC970, check that we have an RMA region */ 894 /* On the first time here, set up VRMA or RMA */
846 if (!vcpu->kvm->arch.rma && cpu_has_feature(CPU_FTR_ARCH_201)) 895 if (!vcpu->kvm->arch.rma_setup_done) {
847 return -EPERM; 896 r = kvmppc_hv_setup_rma(vcpu);
897 if (r)
898 return r;
899 }
848 900
849 flush_fp_to_thread(current); 901 flush_fp_to_thread(current);
850 flush_altivec_to_thread(current); 902 flush_altivec_to_thread(current);
851 flush_vsx_to_thread(current); 903 flush_vsx_to_thread(current);
852 vcpu->arch.wqp = &vcpu->arch.vcore->wq; 904 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
905 vcpu->arch.pgdir = current->mm->pgd;
853 906
854 do { 907 do {
855 r = kvmppc_run_vcpu(run, vcpu); 908 r = kvmppc_run_vcpu(run, vcpu);
@@ -857,7 +910,7 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
857 if (run->exit_reason == KVM_EXIT_PAPR_HCALL && 910 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
858 !(vcpu->arch.shregs.msr & MSR_PR)) { 911 !(vcpu->arch.shregs.msr & MSR_PR)) {
859 r = kvmppc_pseries_do_hcall(vcpu); 912 r = kvmppc_pseries_do_hcall(vcpu);
860 kvmppc_core_deliver_interrupts(vcpu); 913 kvmppc_core_prepare_to_enter(vcpu);
861 } 914 }
862 } while (r == RESUME_GUEST); 915 } while (r == RESUME_GUEST);
863 return r; 916 return r;
@@ -1001,7 +1054,7 @@ static inline int lpcr_rmls(unsigned long rma_size)
1001 1054
1002static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1055static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1003{ 1056{
1004 struct kvmppc_rma_info *ri = vma->vm_file->private_data; 1057 struct kvmppc_linear_info *ri = vma->vm_file->private_data;
1005 struct page *page; 1058 struct page *page;
1006 1059
1007 if (vmf->pgoff >= ri->npages) 1060 if (vmf->pgoff >= ri->npages)
@@ -1026,7 +1079,7 @@ static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
1026 1079
1027static int kvm_rma_release(struct inode *inode, struct file *filp) 1080static int kvm_rma_release(struct inode *inode, struct file *filp)
1028{ 1081{
1029 struct kvmppc_rma_info *ri = filp->private_data; 1082 struct kvmppc_linear_info *ri = filp->private_data;
1030 1083
1031 kvm_release_rma(ri); 1084 kvm_release_rma(ri);
1032 return 0; 1085 return 0;
@@ -1039,7 +1092,7 @@ static struct file_operations kvm_rma_fops = {
1039 1092
1040long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret) 1093long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1041{ 1094{
1042 struct kvmppc_rma_info *ri; 1095 struct kvmppc_linear_info *ri;
1043 long fd; 1096 long fd;
1044 1097
1045 ri = kvm_alloc_rma(); 1098 ri = kvm_alloc_rma();
@@ -1054,89 +1107,189 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1054 return fd; 1107 return fd;
1055} 1108}
1056 1109
1057static struct page *hva_to_page(unsigned long addr) 1110/*
1111 * Get (and clear) the dirty memory log for a memory slot.
1112 */
1113int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1058{ 1114{
1059 struct page *page[1]; 1115 struct kvm_memory_slot *memslot;
1060 int npages; 1116 int r;
1117 unsigned long n;
1061 1118
1062 might_sleep(); 1119 mutex_lock(&kvm->slots_lock);
1063 1120
1064 npages = get_user_pages_fast(addr, 1, 1, page); 1121 r = -EINVAL;
1122 if (log->slot >= KVM_MEMORY_SLOTS)
1123 goto out;
1065 1124
1066 if (unlikely(npages != 1)) 1125 memslot = id_to_memslot(kvm->memslots, log->slot);
1067 return 0; 1126 r = -ENOENT;
1127 if (!memslot->dirty_bitmap)
1128 goto out;
1129
1130 n = kvm_dirty_bitmap_bytes(memslot);
1131 memset(memslot->dirty_bitmap, 0, n);
1132
1133 r = kvmppc_hv_get_dirty_log(kvm, memslot);
1134 if (r)
1135 goto out;
1136
1137 r = -EFAULT;
1138 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1139 goto out;
1140
1141 r = 0;
1142out:
1143 mutex_unlock(&kvm->slots_lock);
1144 return r;
1145}
1068 1146
1069 return page[0]; 1147static unsigned long slb_pgsize_encoding(unsigned long psize)
1148{
1149 unsigned long senc = 0;
1150
1151 if (psize > 0x1000) {
1152 senc = SLB_VSID_L;
1153 if (psize == 0x10000)
1154 senc |= SLB_VSID_LP_01;
1155 }
1156 return senc;
1070} 1157}
1071 1158
1072int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1159int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1073 struct kvm_userspace_memory_region *mem) 1160 struct kvm_userspace_memory_region *mem)
1074{ 1161{
1075 unsigned long psize, porder; 1162 unsigned long npages;
1076 unsigned long i, npages, totalpages; 1163 unsigned long *phys;
1077 unsigned long pg_ix; 1164
1078 struct kvmppc_pginfo *pginfo; 1165 /* Allocate a slot_phys array */
1079 unsigned long hva; 1166 phys = kvm->arch.slot_phys[mem->slot];
1080 struct kvmppc_rma_info *ri = NULL; 1167 if (!kvm->arch.using_mmu_notifiers && !phys) {
1168 npages = mem->memory_size >> PAGE_SHIFT;
1169 phys = vzalloc(npages * sizeof(unsigned long));
1170 if (!phys)
1171 return -ENOMEM;
1172 kvm->arch.slot_phys[mem->slot] = phys;
1173 kvm->arch.slot_npages[mem->slot] = npages;
1174 }
1175
1176 return 0;
1177}
1178
1179static void unpin_slot(struct kvm *kvm, int slot_id)
1180{
1181 unsigned long *physp;
1182 unsigned long j, npages, pfn;
1081 struct page *page; 1183 struct page *page;
1082 1184
1083 /* For now, only allow 16MB pages */ 1185 physp = kvm->arch.slot_phys[slot_id];
1084 porder = LARGE_PAGE_ORDER; 1186 npages = kvm->arch.slot_npages[slot_id];
1085 psize = 1ul << porder; 1187 if (physp) {
1086 if ((mem->memory_size & (psize - 1)) || 1188 spin_lock(&kvm->arch.slot_phys_lock);
1087 (mem->guest_phys_addr & (psize - 1))) { 1189 for (j = 0; j < npages; j++) {
1088 pr_err("bad memory_size=%llx @ %llx\n", 1190 if (!(physp[j] & KVMPPC_GOT_PAGE))
1089 mem->memory_size, mem->guest_phys_addr); 1191 continue;
1090 return -EINVAL; 1192 pfn = physp[j] >> PAGE_SHIFT;
1193 page = pfn_to_page(pfn);
1194 if (PageHuge(page))
1195 page = compound_head(page);
1196 SetPageDirty(page);
1197 put_page(page);
1198 }
1199 kvm->arch.slot_phys[slot_id] = NULL;
1200 spin_unlock(&kvm->arch.slot_phys_lock);
1201 vfree(physp);
1091 } 1202 }
1203}
1092 1204
1093 npages = mem->memory_size >> porder; 1205void kvmppc_core_commit_memory_region(struct kvm *kvm,
1094 totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder; 1206 struct kvm_userspace_memory_region *mem)
1207{
1208}
1095 1209
1096 /* More memory than we have space to track? */ 1210static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu)
1097 if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER))) 1211{
1098 return -EINVAL; 1212 int err = 0;
1213 struct kvm *kvm = vcpu->kvm;
1214 struct kvmppc_linear_info *ri = NULL;
1215 unsigned long hva;
1216 struct kvm_memory_slot *memslot;
1217 struct vm_area_struct *vma;
1218 unsigned long lpcr, senc;
1219 unsigned long psize, porder;
1220 unsigned long rma_size;
1221 unsigned long rmls;
1222 unsigned long *physp;
1223 unsigned long i, npages;
1099 1224
1100 /* Do we already have an RMA registered? */ 1225 mutex_lock(&kvm->lock);
1101 if (mem->guest_phys_addr == 0 && kvm->arch.rma) 1226 if (kvm->arch.rma_setup_done)
1102 return -EINVAL; 1227 goto out; /* another vcpu beat us to it */
1228
1229 /* Look up the memslot for guest physical address 0 */
1230 memslot = gfn_to_memslot(kvm, 0);
1231
1232 /* We must have some memory at 0 by now */
1233 err = -EINVAL;
1234 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
1235 goto out;
1236
1237 /* Look up the VMA for the start of this memory slot */
1238 hva = memslot->userspace_addr;
1239 down_read(&current->mm->mmap_sem);
1240 vma = find_vma(current->mm, hva);
1241 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
1242 goto up_out;
1103 1243
1104 if (totalpages > kvm->arch.ram_npages) 1244 psize = vma_kernel_pagesize(vma);
1105 kvm->arch.ram_npages = totalpages; 1245 porder = __ilog2(psize);
1106 1246
1107 /* Is this one of our preallocated RMAs? */ 1247 /* Is this one of our preallocated RMAs? */
1108 if (mem->guest_phys_addr == 0) { 1248 if (vma->vm_file && vma->vm_file->f_op == &kvm_rma_fops &&
1109 struct vm_area_struct *vma; 1249 hva == vma->vm_start)
1110 1250 ri = vma->vm_file->private_data;
1111 down_read(&current->mm->mmap_sem); 1251
1112 vma = find_vma(current->mm, mem->userspace_addr); 1252 up_read(&current->mm->mmap_sem);
1113 if (vma && vma->vm_file && 1253
1114 vma->vm_file->f_op == &kvm_rma_fops && 1254 if (!ri) {
1115 mem->userspace_addr == vma->vm_start) 1255 /* On POWER7, use VRMA; on PPC970, give up */
1116 ri = vma->vm_file->private_data; 1256 err = -EPERM;
1117 up_read(&current->mm->mmap_sem); 1257 if (cpu_has_feature(CPU_FTR_ARCH_201)) {
1118 if (!ri && cpu_has_feature(CPU_FTR_ARCH_201)) { 1258 pr_err("KVM: CPU requires an RMO\n");
1119 pr_err("CPU requires an RMO\n"); 1259 goto out;
1120 return -EINVAL;
1121 } 1260 }
1122 }
1123 1261
1124 if (ri) { 1262 /* We can handle 4k, 64k or 16M pages in the VRMA */
1125 unsigned long rma_size; 1263 err = -EINVAL;
1126 unsigned long lpcr; 1264 if (!(psize == 0x1000 || psize == 0x10000 ||
1127 long rmls; 1265 psize == 0x1000000))
1266 goto out;
1267
1268 /* Update VRMASD field in the LPCR */
1269 senc = slb_pgsize_encoding(psize);
1270 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1271 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1272 lpcr = kvm->arch.lpcr & ~LPCR_VRMASD;
1273 lpcr |= senc << (LPCR_VRMASD_SH - 4);
1274 kvm->arch.lpcr = lpcr;
1275
1276 /* Create HPTEs in the hash page table for the VRMA */
1277 kvmppc_map_vrma(vcpu, memslot, porder);
1128 1278
1129 rma_size = ri->npages << PAGE_SHIFT; 1279 } else {
1130 if (rma_size > mem->memory_size) 1280 /* Set up to use an RMO region */
1131 rma_size = mem->memory_size; 1281 rma_size = ri->npages;
1282 if (rma_size > memslot->npages)
1283 rma_size = memslot->npages;
1284 rma_size <<= PAGE_SHIFT;
1132 rmls = lpcr_rmls(rma_size); 1285 rmls = lpcr_rmls(rma_size);
1286 err = -EINVAL;
1133 if (rmls < 0) { 1287 if (rmls < 0) {
1134 pr_err("Can't use RMA of 0x%lx bytes\n", rma_size); 1288 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size);
1135 return -EINVAL; 1289 goto out;
1136 } 1290 }
1137 atomic_inc(&ri->use_count); 1291 atomic_inc(&ri->use_count);
1138 kvm->arch.rma = ri; 1292 kvm->arch.rma = ri;
1139 kvm->arch.n_rma_pages = rma_size >> porder;
1140 1293
1141 /* Update LPCR and RMOR */ 1294 /* Update LPCR and RMOR */
1142 lpcr = kvm->arch.lpcr; 1295 lpcr = kvm->arch.lpcr;
@@ -1156,53 +1309,35 @@ int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1156 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT; 1309 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1157 } 1310 }
1158 kvm->arch.lpcr = lpcr; 1311 kvm->arch.lpcr = lpcr;
1159 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n", 1312 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1160 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr); 1313 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1161 }
1162 1314
1163 pg_ix = mem->guest_phys_addr >> porder; 1315 /* Initialize phys addrs of pages in RMO */
1164 pginfo = kvm->arch.ram_pginfo + pg_ix; 1316 npages = ri->npages;
1165 for (i = 0; i < npages; ++i, ++pg_ix) { 1317 porder = __ilog2(npages);
1166 if (ri && pg_ix < kvm->arch.n_rma_pages) { 1318 physp = kvm->arch.slot_phys[memslot->id];
1167 pginfo[i].pfn = ri->base_pfn + 1319 spin_lock(&kvm->arch.slot_phys_lock);
1168 (pg_ix << (porder - PAGE_SHIFT)); 1320 for (i = 0; i < npages; ++i)
1169 continue; 1321 physp[i] = ((ri->base_pfn + i) << PAGE_SHIFT) + porder;
1170 } 1322 spin_unlock(&kvm->arch.slot_phys_lock);
1171 hva = mem->userspace_addr + (i << porder);
1172 page = hva_to_page(hva);
1173 if (!page) {
1174 pr_err("oops, no pfn for hva %lx\n", hva);
1175 goto err;
1176 }
1177 /* Check it's a 16MB page */
1178 if (!PageHead(page) ||
1179 compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) {
1180 pr_err("page at %lx isn't 16MB (o=%d)\n",
1181 hva, compound_order(page));
1182 goto err;
1183 }
1184 pginfo[i].pfn = page_to_pfn(page);
1185 } 1323 }
1186 1324
1187 return 0; 1325 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1188 1326 smp_wmb();
1189 err: 1327 kvm->arch.rma_setup_done = 1;
1190 return -EINVAL; 1328 err = 0;
1191} 1329 out:
1330 mutex_unlock(&kvm->lock);
1331 return err;
1192 1332
1193void kvmppc_core_commit_memory_region(struct kvm *kvm, 1333 up_out:
1194 struct kvm_userspace_memory_region *mem) 1334 up_read(&current->mm->mmap_sem);
1195{ 1335 goto out;
1196 if (mem->guest_phys_addr == 0 && mem->memory_size != 0 &&
1197 !kvm->arch.rma)
1198 kvmppc_map_vrma(kvm, mem);
1199} 1336}
1200 1337
1201int kvmppc_core_init_vm(struct kvm *kvm) 1338int kvmppc_core_init_vm(struct kvm *kvm)
1202{ 1339{
1203 long r; 1340 long r;
1204 unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER);
1205 long err = -ENOMEM;
1206 unsigned long lpcr; 1341 unsigned long lpcr;
1207 1342
1208 /* Allocate hashed page table */ 1343 /* Allocate hashed page table */
@@ -1212,19 +1347,7 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1212 1347
1213 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables); 1348 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1214 1349
1215 kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo),
1216 GFP_KERNEL);
1217 if (!kvm->arch.ram_pginfo) {
1218 pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n",
1219 npages * sizeof(struct kvmppc_pginfo));
1220 goto out_free;
1221 }
1222
1223 kvm->arch.ram_npages = 0;
1224 kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER;
1225 kvm->arch.ram_porder = LARGE_PAGE_ORDER;
1226 kvm->arch.rma = NULL; 1350 kvm->arch.rma = NULL;
1227 kvm->arch.n_rma_pages = 0;
1228 1351
1229 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); 1352 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
1230 1353
@@ -1242,30 +1365,25 @@ int kvmppc_core_init_vm(struct kvm *kvm)
1242 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); 1365 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
1243 lpcr &= LPCR_PECE | LPCR_LPES; 1366 lpcr &= LPCR_PECE | LPCR_LPES;
1244 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | 1367 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1245 LPCR_VPM0 | LPCR_VRMA_L; 1368 LPCR_VPM0 | LPCR_VPM1;
1369 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
1370 (VRMA_VSID << SLB_VSID_SHIFT_1T);
1246 } 1371 }
1247 kvm->arch.lpcr = lpcr; 1372 kvm->arch.lpcr = lpcr;
1248 1373
1374 kvm->arch.using_mmu_notifiers = !!cpu_has_feature(CPU_FTR_ARCH_206);
1375 spin_lock_init(&kvm->arch.slot_phys_lock);
1249 return 0; 1376 return 0;
1250
1251 out_free:
1252 kvmppc_free_hpt(kvm);
1253 return err;
1254} 1377}
1255 1378
1256void kvmppc_core_destroy_vm(struct kvm *kvm) 1379void kvmppc_core_destroy_vm(struct kvm *kvm)
1257{ 1380{
1258 struct kvmppc_pginfo *pginfo;
1259 unsigned long i; 1381 unsigned long i;
1260 1382
1261 if (kvm->arch.ram_pginfo) { 1383 if (!kvm->arch.using_mmu_notifiers)
1262 pginfo = kvm->arch.ram_pginfo; 1384 for (i = 0; i < KVM_MEM_SLOTS_NUM; i++)
1263 kvm->arch.ram_pginfo = NULL; 1385 unpin_slot(kvm, i);
1264 for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i) 1386
1265 if (pginfo[i].pfn)
1266 put_page(pfn_to_page(pginfo[i].pfn));
1267 kfree(pginfo);
1268 }
1269 if (kvm->arch.rma) { 1387 if (kvm->arch.rma) {
1270 kvm_release_rma(kvm->arch.rma); 1388 kvm_release_rma(kvm->arch.rma);
1271 kvm->arch.rma = NULL; 1389 kvm->arch.rma = NULL;
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index a795a13f4a70..bed1279aa6a8 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -18,6 +18,15 @@
18#include <asm/kvm_ppc.h> 18#include <asm/kvm_ppc.h>
19#include <asm/kvm_book3s.h> 19#include <asm/kvm_book3s.h>
20 20
21#define KVM_LINEAR_RMA 0
22#define KVM_LINEAR_HPT 1
23
24static void __init kvm_linear_init_one(ulong size, int count, int type);
25static struct kvmppc_linear_info *kvm_alloc_linear(int type);
26static void kvm_release_linear(struct kvmppc_linear_info *ri);
27
28/*************** RMA *************/
29
21/* 30/*
22 * This maintains a list of RMAs (real mode areas) for KVM guests to use. 31 * This maintains a list of RMAs (real mode areas) for KVM guests to use.
23 * Each RMA has to be physically contiguous and of a size that the 32 * Each RMA has to be physically contiguous and of a size that the
@@ -29,32 +38,6 @@
29static unsigned long kvm_rma_size = 64 << 20; /* 64MB */ 38static unsigned long kvm_rma_size = 64 << 20; /* 64MB */
30static unsigned long kvm_rma_count; 39static unsigned long kvm_rma_count;
31 40
32static int __init early_parse_rma_size(char *p)
33{
34 if (!p)
35 return 1;
36
37 kvm_rma_size = memparse(p, &p);
38
39 return 0;
40}
41early_param("kvm_rma_size", early_parse_rma_size);
42
43static int __init early_parse_rma_count(char *p)
44{
45 if (!p)
46 return 1;
47
48 kvm_rma_count = simple_strtoul(p, NULL, 0);
49
50 return 0;
51}
52early_param("kvm_rma_count", early_parse_rma_count);
53
54static struct kvmppc_rma_info *rma_info;
55static LIST_HEAD(free_rmas);
56static DEFINE_SPINLOCK(rma_lock);
57
58/* Work out RMLS (real mode limit selector) field value for a given RMA size. 41/* Work out RMLS (real mode limit selector) field value for a given RMA size.
59 Assumes POWER7 or PPC970. */ 42 Assumes POWER7 or PPC970. */
60static inline int lpcr_rmls(unsigned long rma_size) 43static inline int lpcr_rmls(unsigned long rma_size)
@@ -81,45 +64,106 @@ static inline int lpcr_rmls(unsigned long rma_size)
81 } 64 }
82} 65}
83 66
67static int __init early_parse_rma_size(char *p)
68{
69 if (!p)
70 return 1;
71
72 kvm_rma_size = memparse(p, &p);
73
74 return 0;
75}
76early_param("kvm_rma_size", early_parse_rma_size);
77
78static int __init early_parse_rma_count(char *p)
79{
80 if (!p)
81 return 1;
82
83 kvm_rma_count = simple_strtoul(p, NULL, 0);
84
85 return 0;
86}
87early_param("kvm_rma_count", early_parse_rma_count);
88
89struct kvmppc_linear_info *kvm_alloc_rma(void)
90{
91 return kvm_alloc_linear(KVM_LINEAR_RMA);
92}
93EXPORT_SYMBOL_GPL(kvm_alloc_rma);
94
95void kvm_release_rma(struct kvmppc_linear_info *ri)
96{
97 kvm_release_linear(ri);
98}
99EXPORT_SYMBOL_GPL(kvm_release_rma);
100
101/*************** HPT *************/
102
84/* 103/*
85 * Called at boot time while the bootmem allocator is active, 104 * This maintains a list of big linear HPT tables that contain the GVA->HPA
86 * to allocate contiguous physical memory for the real memory 105 * memory mappings. If we don't reserve those early on, we might not be able
87 * areas for guests. 106 * to get a big (usually 16MB) linear memory region from the kernel anymore.
88 */ 107 */
89void __init kvm_rma_init(void) 108
109static unsigned long kvm_hpt_count;
110
111static int __init early_parse_hpt_count(char *p)
112{
113 if (!p)
114 return 1;
115
116 kvm_hpt_count = simple_strtoul(p, NULL, 0);
117
118 return 0;
119}
120early_param("kvm_hpt_count", early_parse_hpt_count);
121
122struct kvmppc_linear_info *kvm_alloc_hpt(void)
123{
124 return kvm_alloc_linear(KVM_LINEAR_HPT);
125}
126EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
127
128void kvm_release_hpt(struct kvmppc_linear_info *li)
129{
130 kvm_release_linear(li);
131}
132EXPORT_SYMBOL_GPL(kvm_release_hpt);
133
134/*************** generic *************/
135
136static LIST_HEAD(free_linears);
137static DEFINE_SPINLOCK(linear_lock);
138
139static void __init kvm_linear_init_one(ulong size, int count, int type)
90{ 140{
91 unsigned long i; 141 unsigned long i;
92 unsigned long j, npages; 142 unsigned long j, npages;
93 void *rma; 143 void *linear;
94 struct page *pg; 144 struct page *pg;
145 const char *typestr;
146 struct kvmppc_linear_info *linear_info;
95 147
96 /* Only do this on PPC970 in HV mode */ 148 if (!count)
97 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
98 !cpu_has_feature(CPU_FTR_ARCH_201))
99 return;
100
101 if (!kvm_rma_size || !kvm_rma_count)
102 return; 149 return;
103 150
104 /* Check that the requested size is one supported in hardware */ 151 typestr = (type == KVM_LINEAR_RMA) ? "RMA" : "HPT";
105 if (lpcr_rmls(kvm_rma_size) < 0) { 152
106 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size); 153 npages = size >> PAGE_SHIFT;
107 return; 154 linear_info = alloc_bootmem(count * sizeof(struct kvmppc_linear_info));
108 } 155 for (i = 0; i < count; ++i) {
109 156 linear = alloc_bootmem_align(size, size);
110 npages = kvm_rma_size >> PAGE_SHIFT; 157 pr_info("Allocated KVM %s at %p (%ld MB)\n", typestr, linear,
111 rma_info = alloc_bootmem(kvm_rma_count * sizeof(struct kvmppc_rma_info)); 158 size >> 20);
112 for (i = 0; i < kvm_rma_count; ++i) { 159 linear_info[i].base_virt = linear;
113 rma = alloc_bootmem_align(kvm_rma_size, kvm_rma_size); 160 linear_info[i].base_pfn = __pa(linear) >> PAGE_SHIFT;
114 pr_info("Allocated KVM RMA at %p (%ld MB)\n", rma, 161 linear_info[i].npages = npages;
115 kvm_rma_size >> 20); 162 linear_info[i].type = type;
116 rma_info[i].base_virt = rma; 163 list_add_tail(&linear_info[i].list, &free_linears);
117 rma_info[i].base_pfn = __pa(rma) >> PAGE_SHIFT; 164 atomic_set(&linear_info[i].use_count, 0);
118 rma_info[i].npages = npages; 165
119 list_add_tail(&rma_info[i].list, &free_rmas); 166 pg = pfn_to_page(linear_info[i].base_pfn);
120 atomic_set(&rma_info[i].use_count, 0);
121
122 pg = pfn_to_page(rma_info[i].base_pfn);
123 for (j = 0; j < npages; ++j) { 167 for (j = 0; j < npages; ++j) {
124 atomic_inc(&pg->_count); 168 atomic_inc(&pg->_count);
125 ++pg; 169 ++pg;
@@ -127,30 +171,59 @@ void __init kvm_rma_init(void)
127 } 171 }
128} 172}
129 173
130struct kvmppc_rma_info *kvm_alloc_rma(void) 174static struct kvmppc_linear_info *kvm_alloc_linear(int type)
131{ 175{
132 struct kvmppc_rma_info *ri; 176 struct kvmppc_linear_info *ri;
133 177
134 ri = NULL; 178 ri = NULL;
135 spin_lock(&rma_lock); 179 spin_lock(&linear_lock);
136 if (!list_empty(&free_rmas)) { 180 list_for_each_entry(ri, &free_linears, list) {
137 ri = list_first_entry(&free_rmas, struct kvmppc_rma_info, list); 181 if (ri->type != type)
182 continue;
183
138 list_del(&ri->list); 184 list_del(&ri->list);
139 atomic_inc(&ri->use_count); 185 atomic_inc(&ri->use_count);
186 break;
140 } 187 }
141 spin_unlock(&rma_lock); 188 spin_unlock(&linear_lock);
189 memset(ri->base_virt, 0, ri->npages << PAGE_SHIFT);
142 return ri; 190 return ri;
143} 191}
144EXPORT_SYMBOL_GPL(kvm_alloc_rma);
145 192
146void kvm_release_rma(struct kvmppc_rma_info *ri) 193static void kvm_release_linear(struct kvmppc_linear_info *ri)
147{ 194{
148 if (atomic_dec_and_test(&ri->use_count)) { 195 if (atomic_dec_and_test(&ri->use_count)) {
149 spin_lock(&rma_lock); 196 spin_lock(&linear_lock);
150 list_add_tail(&ri->list, &free_rmas); 197 list_add_tail(&ri->list, &free_linears);
151 spin_unlock(&rma_lock); 198 spin_unlock(&linear_lock);
152 199
153 } 200 }
154} 201}
155EXPORT_SYMBOL_GPL(kvm_release_rma);
156 202
203/*
204 * Called at boot time while the bootmem allocator is active,
205 * to allocate contiguous physical memory for the hash page
206 * tables for guests.
207 */
208void __init kvm_linear_init(void)
209{
210 /* HPT */
211 kvm_linear_init_one(1 << HPT_ORDER, kvm_hpt_count, KVM_LINEAR_HPT);
212
213 /* RMA */
214 /* Only do this on PPC970 in HV mode */
215 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
216 !cpu_has_feature(CPU_FTR_ARCH_201))
217 return;
218
219 if (!kvm_rma_size || !kvm_rma_count)
220 return;
221
222 /* Check that the requested size is one supported in hardware */
223 if (lpcr_rmls(kvm_rma_size) < 0) {
224 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
225 return;
226 }
227
228 kvm_linear_init_one(kvm_rma_size, kvm_rma_count, KVM_LINEAR_RMA);
229}
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index bacb0cfa3602..def880aea63a 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -11,6 +11,7 @@
11#include <linux/kvm.h> 11#include <linux/kvm.h>
12#include <linux/kvm_host.h> 12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h> 13#include <linux/hugetlb.h>
14#include <linux/module.h>
14 15
15#include <asm/tlbflush.h> 16#include <asm/tlbflush.h>
16#include <asm/kvm_ppc.h> 17#include <asm/kvm_ppc.h>
@@ -20,95 +21,307 @@
20#include <asm/synch.h> 21#include <asm/synch.h>
21#include <asm/ppc-opcode.h> 22#include <asm/ppc-opcode.h>
22 23
23/* For now use fixed-size 16MB page table */ 24/* Translate address of a vmalloc'd thing to a linear map address */
24#define HPT_ORDER 24 25static void *real_vmalloc_addr(void *x)
25#define HPT_NPTEG (1ul << (HPT_ORDER - 7)) /* 128B per pteg */ 26{
26#define HPT_HASH_MASK (HPT_NPTEG - 1) 27 unsigned long addr = (unsigned long) x;
28 pte_t *p;
27 29
28#define HPTE_V_HVLOCK 0x40UL 30 p = find_linux_pte(swapper_pg_dir, addr);
31 if (!p || !pte_present(*p))
32 return NULL;
33 /* assume we don't have huge pages in vmalloc space... */
34 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
35 return __va(addr);
36}
29 37
30static inline long lock_hpte(unsigned long *hpte, unsigned long bits) 38/*
39 * Add this HPTE into the chain for the real page.
40 * Must be called with the chain locked; it unlocks the chain.
41 */
42void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
43 unsigned long *rmap, long pte_index, int realmode)
31{ 44{
32 unsigned long tmp, old; 45 struct revmap_entry *head, *tail;
46 unsigned long i;
33 47
34 asm volatile(" ldarx %0,0,%2\n" 48 if (*rmap & KVMPPC_RMAP_PRESENT) {
35 " and. %1,%0,%3\n" 49 i = *rmap & KVMPPC_RMAP_INDEX;
36 " bne 2f\n" 50 head = &kvm->arch.revmap[i];
37 " ori %0,%0,%4\n" 51 if (realmode)
38 " stdcx. %0,0,%2\n" 52 head = real_vmalloc_addr(head);
39 " beq+ 2f\n" 53 tail = &kvm->arch.revmap[head->back];
40 " li %1,%3\n" 54 if (realmode)
41 "2: isync" 55 tail = real_vmalloc_addr(tail);
42 : "=&r" (tmp), "=&r" (old) 56 rev->forw = i;
43 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK) 57 rev->back = head->back;
44 : "cc", "memory"); 58 tail->forw = pte_index;
45 return old == 0; 59 head->back = pte_index;
60 } else {
61 rev->forw = rev->back = pte_index;
62 i = pte_index;
63 }
64 smp_wmb();
65 *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
66}
67EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
68
69/* Remove this HPTE from the chain for a real page */
70static void remove_revmap_chain(struct kvm *kvm, long pte_index,
71 struct revmap_entry *rev,
72 unsigned long hpte_v, unsigned long hpte_r)
73{
74 struct revmap_entry *next, *prev;
75 unsigned long gfn, ptel, head;
76 struct kvm_memory_slot *memslot;
77 unsigned long *rmap;
78 unsigned long rcbits;
79
80 rcbits = hpte_r & (HPTE_R_R | HPTE_R_C);
81 ptel = rev->guest_rpte |= rcbits;
82 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
83 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
84 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
85 return;
86
87 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
88 lock_rmap(rmap);
89
90 head = *rmap & KVMPPC_RMAP_INDEX;
91 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
92 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
93 next->back = rev->back;
94 prev->forw = rev->forw;
95 if (head == pte_index) {
96 head = rev->forw;
97 if (head == pte_index)
98 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
99 else
100 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
101 }
102 *rmap |= rcbits << KVMPPC_RMAP_RC_SHIFT;
103 unlock_rmap(rmap);
104}
105
106static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
107 int writing, unsigned long *pte_sizep)
108{
109 pte_t *ptep;
110 unsigned long ps = *pte_sizep;
111 unsigned int shift;
112
113 ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift);
114 if (!ptep)
115 return __pte(0);
116 if (shift)
117 *pte_sizep = 1ul << shift;
118 else
119 *pte_sizep = PAGE_SIZE;
120 if (ps > *pte_sizep)
121 return __pte(0);
122 if (!pte_present(*ptep))
123 return __pte(0);
124 return kvmppc_read_update_linux_pte(ptep, writing);
125}
126
127static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
128{
129 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
130 hpte[0] = hpte_v;
46} 131}
47 132
48long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, 133long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
49 long pte_index, unsigned long pteh, unsigned long ptel) 134 long pte_index, unsigned long pteh, unsigned long ptel)
50{ 135{
51 unsigned long porder;
52 struct kvm *kvm = vcpu->kvm; 136 struct kvm *kvm = vcpu->kvm;
53 unsigned long i, lpn, pa; 137 unsigned long i, pa, gpa, gfn, psize;
138 unsigned long slot_fn, hva;
54 unsigned long *hpte; 139 unsigned long *hpte;
140 struct revmap_entry *rev;
141 unsigned long g_ptel = ptel;
142 struct kvm_memory_slot *memslot;
143 unsigned long *physp, pte_size;
144 unsigned long is_io;
145 unsigned long *rmap;
146 pte_t pte;
147 unsigned int writing;
148 unsigned long mmu_seq;
149 unsigned long rcbits;
150 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
55 151
56 /* only handle 4k, 64k and 16M pages for now */ 152 psize = hpte_page_size(pteh, ptel);
57 porder = 12; 153 if (!psize)
58 if (pteh & HPTE_V_LARGE) { 154 return H_PARAMETER;
59 if (cpu_has_feature(CPU_FTR_ARCH_206) && 155 writing = hpte_is_writable(ptel);
60 (ptel & 0xf000) == 0x1000) { 156 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
61 /* 64k page */ 157
62 porder = 16; 158 /* used later to detect if we might have been invalidated */
63 } else if ((ptel & 0xff000) == 0) { 159 mmu_seq = kvm->mmu_notifier_seq;
64 /* 16M page */ 160 smp_rmb();
65 porder = 24; 161
66 /* lowest AVA bit must be 0 for 16M pages */ 162 /* Find the memslot (if any) for this address */
67 if (pteh & 0x80) 163 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
68 return H_PARAMETER; 164 gfn = gpa >> PAGE_SHIFT;
69 } else 165 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
166 pa = 0;
167 is_io = ~0ul;
168 rmap = NULL;
169 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
170 /* PPC970 can't do emulated MMIO */
171 if (!cpu_has_feature(CPU_FTR_ARCH_206))
70 return H_PARAMETER; 172 return H_PARAMETER;
173 /* Emulated MMIO - mark this with key=31 */
174 pteh |= HPTE_V_ABSENT;
175 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
176 goto do_insert;
71 } 177 }
72 lpn = (ptel & HPTE_R_RPN) >> kvm->arch.ram_porder; 178
73 if (lpn >= kvm->arch.ram_npages || porder > kvm->arch.ram_porder) 179 /* Check if the requested page fits entirely in the memslot. */
74 return H_PARAMETER; 180 if (!slot_is_aligned(memslot, psize))
75 pa = kvm->arch.ram_pginfo[lpn].pfn << PAGE_SHIFT;
76 if (!pa)
77 return H_PARAMETER; 181 return H_PARAMETER;
78 /* Check WIMG */ 182 slot_fn = gfn - memslot->base_gfn;
79 if ((ptel & HPTE_R_WIMG) != HPTE_R_M && 183 rmap = &memslot->rmap[slot_fn];
80 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M)) 184
185 if (!kvm->arch.using_mmu_notifiers) {
186 physp = kvm->arch.slot_phys[memslot->id];
187 if (!physp)
188 return H_PARAMETER;
189 physp += slot_fn;
190 if (realmode)
191 physp = real_vmalloc_addr(physp);
192 pa = *physp;
193 if (!pa)
194 return H_TOO_HARD;
195 is_io = pa & (HPTE_R_I | HPTE_R_W);
196 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
197 pa &= PAGE_MASK;
198 } else {
199 /* Translate to host virtual address */
200 hva = gfn_to_hva_memslot(memslot, gfn);
201
202 /* Look up the Linux PTE for the backing page */
203 pte_size = psize;
204 pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
205 if (pte_present(pte)) {
206 if (writing && !pte_write(pte))
207 /* make the actual HPTE be read-only */
208 ptel = hpte_make_readonly(ptel);
209 is_io = hpte_cache_bits(pte_val(pte));
210 pa = pte_pfn(pte) << PAGE_SHIFT;
211 }
212 }
213 if (pte_size < psize)
81 return H_PARAMETER; 214 return H_PARAMETER;
82 pteh &= ~0x60UL; 215 if (pa && pte_size > psize)
83 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize); 216 pa |= gpa & (pte_size - 1);
217
218 ptel &= ~(HPTE_R_PP0 - psize);
84 ptel |= pa; 219 ptel |= pa;
85 if (pte_index >= (HPT_NPTEG << 3)) 220
221 if (pa)
222 pteh |= HPTE_V_VALID;
223 else
224 pteh |= HPTE_V_ABSENT;
225
226 /* Check WIMG */
227 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
228 if (is_io)
229 return H_PARAMETER;
230 /*
231 * Allow guest to map emulated device memory as
232 * uncacheable, but actually make it cacheable.
233 */
234 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
235 ptel |= HPTE_R_M;
236 }
237
238 /* Find and lock the HPTEG slot to use */
239 do_insert:
240 if (pte_index >= HPT_NPTE)
86 return H_PARAMETER; 241 return H_PARAMETER;
87 if (likely((flags & H_EXACT) == 0)) { 242 if (likely((flags & H_EXACT) == 0)) {
88 pte_index &= ~7UL; 243 pte_index &= ~7UL;
89 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 244 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
90 for (i = 0; ; ++i) { 245 for (i = 0; i < 8; ++i) {
91 if (i == 8)
92 return H_PTEG_FULL;
93 if ((*hpte & HPTE_V_VALID) == 0 && 246 if ((*hpte & HPTE_V_VALID) == 0 &&
94 lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) 247 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
248 HPTE_V_ABSENT))
95 break; 249 break;
96 hpte += 2; 250 hpte += 2;
97 } 251 }
252 if (i == 8) {
253 /*
254 * Since try_lock_hpte doesn't retry (not even stdcx.
255 * failures), it could be that there is a free slot
256 * but we transiently failed to lock it. Try again,
257 * actually locking each slot and checking it.
258 */
259 hpte -= 16;
260 for (i = 0; i < 8; ++i) {
261 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
262 cpu_relax();
263 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
264 break;
265 *hpte &= ~HPTE_V_HVLOCK;
266 hpte += 2;
267 }
268 if (i == 8)
269 return H_PTEG_FULL;
270 }
271 pte_index += i;
98 } else { 272 } else {
99 i = 0;
100 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 273 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
101 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) 274 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
102 return H_PTEG_FULL; 275 HPTE_V_ABSENT)) {
276 /* Lock the slot and check again */
277 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
278 cpu_relax();
279 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
280 *hpte &= ~HPTE_V_HVLOCK;
281 return H_PTEG_FULL;
282 }
283 }
103 } 284 }
285
286 /* Save away the guest's idea of the second HPTE dword */
287 rev = &kvm->arch.revmap[pte_index];
288 if (realmode)
289 rev = real_vmalloc_addr(rev);
290 if (rev)
291 rev->guest_rpte = g_ptel;
292
293 /* Link HPTE into reverse-map chain */
294 if (pteh & HPTE_V_VALID) {
295 if (realmode)
296 rmap = real_vmalloc_addr(rmap);
297 lock_rmap(rmap);
298 /* Check for pending invalidations under the rmap chain lock */
299 if (kvm->arch.using_mmu_notifiers &&
300 mmu_notifier_retry(vcpu, mmu_seq)) {
301 /* inval in progress, write a non-present HPTE */
302 pteh |= HPTE_V_ABSENT;
303 pteh &= ~HPTE_V_VALID;
304 unlock_rmap(rmap);
305 } else {
306 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
307 realmode);
308 /* Only set R/C in real HPTE if already set in *rmap */
309 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
310 ptel &= rcbits | ~(HPTE_R_R | HPTE_R_C);
311 }
312 }
313
104 hpte[1] = ptel; 314 hpte[1] = ptel;
315
316 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
105 eieio(); 317 eieio();
106 hpte[0] = pteh; 318 hpte[0] = pteh;
107 asm volatile("ptesync" : : : "memory"); 319 asm volatile("ptesync" : : : "memory");
108 atomic_inc(&kvm->arch.ram_pginfo[lpn].refcnt); 320
109 vcpu->arch.gpr[4] = pte_index + i; 321 vcpu->arch.gpr[4] = pte_index;
110 return H_SUCCESS; 322 return H_SUCCESS;
111} 323}
324EXPORT_SYMBOL_GPL(kvmppc_h_enter);
112 325
113#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token)) 326#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
114 327
@@ -137,37 +350,46 @@ long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
137 struct kvm *kvm = vcpu->kvm; 350 struct kvm *kvm = vcpu->kvm;
138 unsigned long *hpte; 351 unsigned long *hpte;
139 unsigned long v, r, rb; 352 unsigned long v, r, rb;
353 struct revmap_entry *rev;
140 354
141 if (pte_index >= (HPT_NPTEG << 3)) 355 if (pte_index >= HPT_NPTE)
142 return H_PARAMETER; 356 return H_PARAMETER;
143 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 357 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
144 while (!lock_hpte(hpte, HPTE_V_HVLOCK)) 358 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
145 cpu_relax(); 359 cpu_relax();
146 if ((hpte[0] & HPTE_V_VALID) == 0 || 360 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
147 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) || 361 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
148 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) { 362 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
149 hpte[0] &= ~HPTE_V_HVLOCK; 363 hpte[0] &= ~HPTE_V_HVLOCK;
150 return H_NOT_FOUND; 364 return H_NOT_FOUND;
151 } 365 }
152 if (atomic_read(&kvm->online_vcpus) == 1) 366
153 flags |= H_LOCAL; 367 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
154 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK; 368 v = hpte[0] & ~HPTE_V_HVLOCK;
155 vcpu->arch.gpr[5] = r = hpte[1]; 369 if (v & HPTE_V_VALID) {
156 rb = compute_tlbie_rb(v, r, pte_index); 370 hpte[0] &= ~HPTE_V_VALID;
157 hpte[0] = 0; 371 rb = compute_tlbie_rb(v, hpte[1], pte_index);
158 if (!(flags & H_LOCAL)) { 372 if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) {
159 while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) 373 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
160 cpu_relax(); 374 cpu_relax();
161 asm volatile("ptesync" : : : "memory"); 375 asm volatile("ptesync" : : : "memory");
162 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" 376 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
163 : : "r" (rb), "r" (kvm->arch.lpid)); 377 : : "r" (rb), "r" (kvm->arch.lpid));
164 asm volatile("ptesync" : : : "memory"); 378 asm volatile("ptesync" : : : "memory");
165 kvm->arch.tlbie_lock = 0; 379 kvm->arch.tlbie_lock = 0;
166 } else { 380 } else {
167 asm volatile("ptesync" : : : "memory"); 381 asm volatile("ptesync" : : : "memory");
168 asm volatile("tlbiel %0" : : "r" (rb)); 382 asm volatile("tlbiel %0" : : "r" (rb));
169 asm volatile("ptesync" : : : "memory"); 383 asm volatile("ptesync" : : : "memory");
384 }
385 /* Read PTE low word after tlbie to get final R/C values */
386 remove_revmap_chain(kvm, pte_index, rev, v, hpte[1]);
170 } 387 }
388 r = rev->guest_rpte;
389 unlock_hpte(hpte, 0);
390
391 vcpu->arch.gpr[4] = v;
392 vcpu->arch.gpr[5] = r;
171 return H_SUCCESS; 393 return H_SUCCESS;
172} 394}
173 395
@@ -175,78 +397,117 @@ long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
175{ 397{
176 struct kvm *kvm = vcpu->kvm; 398 struct kvm *kvm = vcpu->kvm;
177 unsigned long *args = &vcpu->arch.gpr[4]; 399 unsigned long *args = &vcpu->arch.gpr[4];
178 unsigned long *hp, tlbrb[4]; 400 unsigned long *hp, *hptes[4], tlbrb[4];
179 long int i, found; 401 long int i, j, k, n, found, indexes[4];
180 long int n_inval = 0; 402 unsigned long flags, req, pte_index, rcbits;
181 unsigned long flags, req, pte_index;
182 long int local = 0; 403 long int local = 0;
183 long int ret = H_SUCCESS; 404 long int ret = H_SUCCESS;
405 struct revmap_entry *rev, *revs[4];
184 406
185 if (atomic_read(&kvm->online_vcpus) == 1) 407 if (atomic_read(&kvm->online_vcpus) == 1)
186 local = 1; 408 local = 1;
187 for (i = 0; i < 4; ++i) { 409 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
188 pte_index = args[i * 2]; 410 n = 0;
189 flags = pte_index >> 56; 411 for (; i < 4; ++i) {
190 pte_index &= ((1ul << 56) - 1); 412 j = i * 2;
191 req = flags >> 6; 413 pte_index = args[j];
192 flags &= 3; 414 flags = pte_index >> 56;
193 if (req == 3) 415 pte_index &= ((1ul << 56) - 1);
194 break; 416 req = flags >> 6;
195 if (req != 1 || flags == 3 || 417 flags &= 3;
196 pte_index >= (HPT_NPTEG << 3)) { 418 if (req == 3) { /* no more requests */
197 /* parameter error */ 419 i = 4;
198 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
199 ret = H_PARAMETER;
200 break;
201 }
202 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
203 while (!lock_hpte(hp, HPTE_V_HVLOCK))
204 cpu_relax();
205 found = 0;
206 if (hp[0] & HPTE_V_VALID) {
207 switch (flags & 3) {
208 case 0: /* absolute */
209 found = 1;
210 break; 420 break;
211 case 1: /* andcond */ 421 }
212 if (!(hp[0] & args[i * 2 + 1])) 422 if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) {
213 found = 1; 423 /* parameter error */
424 args[j] = ((0xa0 | flags) << 56) + pte_index;
425 ret = H_PARAMETER;
214 break; 426 break;
215 case 2: /* AVPN */ 427 }
216 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1]) 428 hp = (unsigned long *)
429 (kvm->arch.hpt_virt + (pte_index << 4));
430 /* to avoid deadlock, don't spin except for first */
431 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
432 if (n)
433 break;
434 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
435 cpu_relax();
436 }
437 found = 0;
438 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
439 switch (flags & 3) {
440 case 0: /* absolute */
217 found = 1; 441 found = 1;
218 break; 442 break;
443 case 1: /* andcond */
444 if (!(hp[0] & args[j + 1]))
445 found = 1;
446 break;
447 case 2: /* AVPN */
448 if ((hp[0] & ~0x7fUL) == args[j + 1])
449 found = 1;
450 break;
451 }
452 }
453 if (!found) {
454 hp[0] &= ~HPTE_V_HVLOCK;
455 args[j] = ((0x90 | flags) << 56) + pte_index;
456 continue;
219 } 457 }
458
459 args[j] = ((0x80 | flags) << 56) + pte_index;
460 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
461
462 if (!(hp[0] & HPTE_V_VALID)) {
463 /* insert R and C bits from PTE */
464 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
465 args[j] |= rcbits << (56 - 5);
466 continue;
467 }
468
469 hp[0] &= ~HPTE_V_VALID; /* leave it locked */
470 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
471 indexes[n] = j;
472 hptes[n] = hp;
473 revs[n] = rev;
474 ++n;
475 }
476
477 if (!n)
478 break;
479
480 /* Now that we've collected a batch, do the tlbies */
481 if (!local) {
482 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
483 cpu_relax();
484 asm volatile("ptesync" : : : "memory");
485 for (k = 0; k < n; ++k)
486 asm volatile(PPC_TLBIE(%1,%0) : :
487 "r" (tlbrb[k]),
488 "r" (kvm->arch.lpid));
489 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
490 kvm->arch.tlbie_lock = 0;
491 } else {
492 asm volatile("ptesync" : : : "memory");
493 for (k = 0; k < n; ++k)
494 asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
495 asm volatile("ptesync" : : : "memory");
220 } 496 }
221 if (!found) { 497
222 hp[0] &= ~HPTE_V_HVLOCK; 498 /* Read PTE low words after tlbie to get final R/C values */
223 args[i * 2] = ((0x90 | flags) << 56) + pte_index; 499 for (k = 0; k < n; ++k) {
224 continue; 500 j = indexes[k];
501 pte_index = args[j] & ((1ul << 56) - 1);
502 hp = hptes[k];
503 rev = revs[k];
504 remove_revmap_chain(kvm, pte_index, rev, hp[0], hp[1]);
505 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
506 args[j] |= rcbits << (56 - 5);
507 hp[0] = 0;
225 } 508 }
226 /* insert R and C bits from PTE */
227 flags |= (hp[1] >> 5) & 0x0c;
228 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
229 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
230 hp[0] = 0;
231 }
232 if (n_inval == 0)
233 return ret;
234
235 if (!local) {
236 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
237 cpu_relax();
238 asm volatile("ptesync" : : : "memory");
239 for (i = 0; i < n_inval; ++i)
240 asm volatile(PPC_TLBIE(%1,%0)
241 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
242 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
243 kvm->arch.tlbie_lock = 0;
244 } else {
245 asm volatile("ptesync" : : : "memory");
246 for (i = 0; i < n_inval; ++i)
247 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
248 asm volatile("ptesync" : : : "memory");
249 } 509 }
510
250 return ret; 511 return ret;
251} 512}
252 513
@@ -256,40 +517,55 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
256{ 517{
257 struct kvm *kvm = vcpu->kvm; 518 struct kvm *kvm = vcpu->kvm;
258 unsigned long *hpte; 519 unsigned long *hpte;
259 unsigned long v, r, rb; 520 struct revmap_entry *rev;
521 unsigned long v, r, rb, mask, bits;
260 522
261 if (pte_index >= (HPT_NPTEG << 3)) 523 if (pte_index >= HPT_NPTE)
262 return H_PARAMETER; 524 return H_PARAMETER;
525
263 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 526 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
264 while (!lock_hpte(hpte, HPTE_V_HVLOCK)) 527 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
265 cpu_relax(); 528 cpu_relax();
266 if ((hpte[0] & HPTE_V_VALID) == 0 || 529 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
267 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) { 530 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
268 hpte[0] &= ~HPTE_V_HVLOCK; 531 hpte[0] &= ~HPTE_V_HVLOCK;
269 return H_NOT_FOUND; 532 return H_NOT_FOUND;
270 } 533 }
534
271 if (atomic_read(&kvm->online_vcpus) == 1) 535 if (atomic_read(&kvm->online_vcpus) == 1)
272 flags |= H_LOCAL; 536 flags |= H_LOCAL;
273 v = hpte[0]; 537 v = hpte[0];
274 r = hpte[1] & ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | 538 bits = (flags << 55) & HPTE_R_PP0;
275 HPTE_R_KEY_HI | HPTE_R_KEY_LO); 539 bits |= (flags << 48) & HPTE_R_KEY_HI;
276 r |= (flags << 55) & HPTE_R_PP0; 540 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
277 r |= (flags << 48) & HPTE_R_KEY_HI; 541
278 r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); 542 /* Update guest view of 2nd HPTE dword */
279 rb = compute_tlbie_rb(v, r, pte_index); 543 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
280 hpte[0] = v & ~HPTE_V_VALID; 544 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
281 if (!(flags & H_LOCAL)) { 545 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
282 while(!try_lock_tlbie(&kvm->arch.tlbie_lock)) 546 if (rev) {
283 cpu_relax(); 547 r = (rev->guest_rpte & ~mask) | bits;
284 asm volatile("ptesync" : : : "memory"); 548 rev->guest_rpte = r;
285 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync" 549 }
286 : : "r" (rb), "r" (kvm->arch.lpid)); 550 r = (hpte[1] & ~mask) | bits;
287 asm volatile("ptesync" : : : "memory"); 551
288 kvm->arch.tlbie_lock = 0; 552 /* Update HPTE */
289 } else { 553 if (v & HPTE_V_VALID) {
290 asm volatile("ptesync" : : : "memory"); 554 rb = compute_tlbie_rb(v, r, pte_index);
291 asm volatile("tlbiel %0" : : "r" (rb)); 555 hpte[0] = v & ~HPTE_V_VALID;
292 asm volatile("ptesync" : : : "memory"); 556 if (!(flags & H_LOCAL)) {
557 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
558 cpu_relax();
559 asm volatile("ptesync" : : : "memory");
560 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
561 : : "r" (rb), "r" (kvm->arch.lpid));
562 asm volatile("ptesync" : : : "memory");
563 kvm->arch.tlbie_lock = 0;
564 } else {
565 asm volatile("ptesync" : : : "memory");
566 asm volatile("tlbiel %0" : : "r" (rb));
567 asm volatile("ptesync" : : : "memory");
568 }
293 } 569 }
294 hpte[1] = r; 570 hpte[1] = r;
295 eieio(); 571 eieio();
@@ -298,40 +574,243 @@ long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
298 return H_SUCCESS; 574 return H_SUCCESS;
299} 575}
300 576
301static unsigned long reverse_xlate(struct kvm *kvm, unsigned long realaddr)
302{
303 long int i;
304 unsigned long offset, rpn;
305
306 offset = realaddr & (kvm->arch.ram_psize - 1);
307 rpn = (realaddr - offset) >> PAGE_SHIFT;
308 for (i = 0; i < kvm->arch.ram_npages; ++i)
309 if (rpn == kvm->arch.ram_pginfo[i].pfn)
310 return (i << PAGE_SHIFT) + offset;
311 return HPTE_R_RPN; /* all 1s in the RPN field */
312}
313
314long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, 577long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
315 unsigned long pte_index) 578 unsigned long pte_index)
316{ 579{
317 struct kvm *kvm = vcpu->kvm; 580 struct kvm *kvm = vcpu->kvm;
318 unsigned long *hpte, r; 581 unsigned long *hpte, v, r;
319 int i, n = 1; 582 int i, n = 1;
583 struct revmap_entry *rev = NULL;
320 584
321 if (pte_index >= (HPT_NPTEG << 3)) 585 if (pte_index >= HPT_NPTE)
322 return H_PARAMETER; 586 return H_PARAMETER;
323 if (flags & H_READ_4) { 587 if (flags & H_READ_4) {
324 pte_index &= ~3; 588 pte_index &= ~3;
325 n = 4; 589 n = 4;
326 } 590 }
591 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
327 for (i = 0; i < n; ++i, ++pte_index) { 592 for (i = 0; i < n; ++i, ++pte_index) {
328 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4)); 593 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
594 v = hpte[0] & ~HPTE_V_HVLOCK;
329 r = hpte[1]; 595 r = hpte[1];
330 if ((flags & H_R_XLATE) && (hpte[0] & HPTE_V_VALID)) 596 if (v & HPTE_V_ABSENT) {
331 r = reverse_xlate(kvm, r & HPTE_R_RPN) | 597 v &= ~HPTE_V_ABSENT;
332 (r & ~HPTE_R_RPN); 598 v |= HPTE_V_VALID;
333 vcpu->arch.gpr[4 + i * 2] = hpte[0]; 599 }
600 if (v & HPTE_V_VALID)
601 r = rev[i].guest_rpte | (r & (HPTE_R_R | HPTE_R_C));
602 vcpu->arch.gpr[4 + i * 2] = v;
334 vcpu->arch.gpr[5 + i * 2] = r; 603 vcpu->arch.gpr[5 + i * 2] = r;
335 } 604 }
336 return H_SUCCESS; 605 return H_SUCCESS;
337} 606}
607
608void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
609 unsigned long pte_index)
610{
611 unsigned long rb;
612
613 hptep[0] &= ~HPTE_V_VALID;
614 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
615 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
616 cpu_relax();
617 asm volatile("ptesync" : : : "memory");
618 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
619 : : "r" (rb), "r" (kvm->arch.lpid));
620 asm volatile("ptesync" : : : "memory");
621 kvm->arch.tlbie_lock = 0;
622}
623EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
624
625void kvmppc_clear_ref_hpte(struct kvm *kvm, unsigned long *hptep,
626 unsigned long pte_index)
627{
628 unsigned long rb;
629 unsigned char rbyte;
630
631 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
632 rbyte = (hptep[1] & ~HPTE_R_R) >> 8;
633 /* modify only the second-last byte, which contains the ref bit */
634 *((char *)hptep + 14) = rbyte;
635 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
636 cpu_relax();
637 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
638 : : "r" (rb), "r" (kvm->arch.lpid));
639 asm volatile("ptesync" : : : "memory");
640 kvm->arch.tlbie_lock = 0;
641}
642EXPORT_SYMBOL_GPL(kvmppc_clear_ref_hpte);
643
644static int slb_base_page_shift[4] = {
645 24, /* 16M */
646 16, /* 64k */
647 34, /* 16G */
648 20, /* 1M, unsupported */
649};
650
651long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
652 unsigned long valid)
653{
654 unsigned int i;
655 unsigned int pshift;
656 unsigned long somask;
657 unsigned long vsid, hash;
658 unsigned long avpn;
659 unsigned long *hpte;
660 unsigned long mask, val;
661 unsigned long v, r;
662
663 /* Get page shift, work out hash and AVPN etc. */
664 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
665 val = 0;
666 pshift = 12;
667 if (slb_v & SLB_VSID_L) {
668 mask |= HPTE_V_LARGE;
669 val |= HPTE_V_LARGE;
670 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
671 }
672 if (slb_v & SLB_VSID_B_1T) {
673 somask = (1UL << 40) - 1;
674 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
675 vsid ^= vsid << 25;
676 } else {
677 somask = (1UL << 28) - 1;
678 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
679 }
680 hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
681 avpn = slb_v & ~(somask >> 16); /* also includes B */
682 avpn |= (eaddr & somask) >> 16;
683
684 if (pshift >= 24)
685 avpn &= ~((1UL << (pshift - 16)) - 1);
686 else
687 avpn &= ~0x7fUL;
688 val |= avpn;
689
690 for (;;) {
691 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
692
693 for (i = 0; i < 16; i += 2) {
694 /* Read the PTE racily */
695 v = hpte[i] & ~HPTE_V_HVLOCK;
696
697 /* Check valid/absent, hash, segment size and AVPN */
698 if (!(v & valid) || (v & mask) != val)
699 continue;
700
701 /* Lock the PTE and read it under the lock */
702 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
703 cpu_relax();
704 v = hpte[i] & ~HPTE_V_HVLOCK;
705 r = hpte[i+1];
706
707 /*
708 * Check the HPTE again, including large page size
709 * Since we don't currently allow any MPSS (mixed
710 * page-size segment) page sizes, it is sufficient
711 * to check against the actual page size.
712 */
713 if ((v & valid) && (v & mask) == val &&
714 hpte_page_size(v, r) == (1ul << pshift))
715 /* Return with the HPTE still locked */
716 return (hash << 3) + (i >> 1);
717
718 /* Unlock and move on */
719 hpte[i] = v;
720 }
721
722 if (val & HPTE_V_SECONDARY)
723 break;
724 val |= HPTE_V_SECONDARY;
725 hash = hash ^ HPT_HASH_MASK;
726 }
727 return -1;
728}
729EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
730
731/*
732 * Called in real mode to check whether an HPTE not found fault
733 * is due to accessing a paged-out page or an emulated MMIO page,
734 * or if a protection fault is due to accessing a page that the
735 * guest wanted read/write access to but which we made read-only.
736 * Returns a possibly modified status (DSISR) value if not
737 * (i.e. pass the interrupt to the guest),
738 * -1 to pass the fault up to host kernel mode code, -2 to do that
739 * and also load the instruction word (for MMIO emulation),
740 * or 0 if we should make the guest retry the access.
741 */
742long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
743 unsigned long slb_v, unsigned int status, bool data)
744{
745 struct kvm *kvm = vcpu->kvm;
746 long int index;
747 unsigned long v, r, gr;
748 unsigned long *hpte;
749 unsigned long valid;
750 struct revmap_entry *rev;
751 unsigned long pp, key;
752
753 /* For protection fault, expect to find a valid HPTE */
754 valid = HPTE_V_VALID;
755 if (status & DSISR_NOHPTE)
756 valid |= HPTE_V_ABSENT;
757
758 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
759 if (index < 0) {
760 if (status & DSISR_NOHPTE)
761 return status; /* there really was no HPTE */
762 return 0; /* for prot fault, HPTE disappeared */
763 }
764 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
765 v = hpte[0] & ~HPTE_V_HVLOCK;
766 r = hpte[1];
767 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
768 gr = rev->guest_rpte;
769
770 unlock_hpte(hpte, v);
771
772 /* For not found, if the HPTE is valid by now, retry the instruction */
773 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
774 return 0;
775
776 /* Check access permissions to the page */
777 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
778 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
779 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
780 if (!data) {
781 if (gr & (HPTE_R_N | HPTE_R_G))
782 return status | SRR1_ISI_N_OR_G;
783 if (!hpte_read_permission(pp, slb_v & key))
784 return status | SRR1_ISI_PROT;
785 } else if (status & DSISR_ISSTORE) {
786 /* check write permission */
787 if (!hpte_write_permission(pp, slb_v & key))
788 return status | DSISR_PROTFAULT;
789 } else {
790 if (!hpte_read_permission(pp, slb_v & key))
791 return status | DSISR_PROTFAULT;
792 }
793
794 /* Check storage key, if applicable */
795 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
796 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
797 if (status & DSISR_ISSTORE)
798 perm >>= 1;
799 if (perm & 1)
800 return status | DSISR_KEYFAULT;
801 }
802
803 /* Save HPTE info for virtual-mode handler */
804 vcpu->arch.pgfault_addr = addr;
805 vcpu->arch.pgfault_index = index;
806 vcpu->arch.pgfault_hpte[0] = v;
807 vcpu->arch.pgfault_hpte[1] = r;
808
809 /* Check the storage key to see if it is possibly emulated MMIO */
810 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
811 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
812 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
813 return -2; /* MMIO emulation - load instr word */
814
815 return -1; /* send fault up to host kernel mode */
816}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 5c8b26183f50..b70bf22a3ff3 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -601,6 +601,30 @@ kvmppc_interrupt:
601 601
602 stw r12,VCPU_TRAP(r9) 602 stw r12,VCPU_TRAP(r9)
603 603
604 /* Save HEIR (HV emulation assist reg) in last_inst
605 if this is an HEI (HV emulation interrupt, e40) */
606 li r3,KVM_INST_FETCH_FAILED
607BEGIN_FTR_SECTION
608 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
609 bne 11f
610 mfspr r3,SPRN_HEIR
611END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
61211: stw r3,VCPU_LAST_INST(r9)
613
614 /* these are volatile across C function calls */
615 mfctr r3
616 mfxer r4
617 std r3, VCPU_CTR(r9)
618 stw r4, VCPU_XER(r9)
619
620BEGIN_FTR_SECTION
621 /* If this is a page table miss then see if it's theirs or ours */
622 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
623 beq kvmppc_hdsi
624 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
625 beq kvmppc_hisi
626END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
627
604 /* See if this is a leftover HDEC interrupt */ 628 /* See if this is a leftover HDEC interrupt */
605 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER 629 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
606 bne 2f 630 bne 2f
@@ -608,7 +632,7 @@ kvmppc_interrupt:
608 cmpwi r3,0 632 cmpwi r3,0
609 bge ignore_hdec 633 bge ignore_hdec
6102: 6342:
611 /* See if this is something we can handle in real mode */ 635 /* See if this is an hcall we can handle in real mode */
612 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL 636 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
613 beq hcall_try_real_mode 637 beq hcall_try_real_mode
614 638
@@ -624,6 +648,7 @@ BEGIN_FTR_SECTION
6241: 6481:
625END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 649END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
626 650
651nohpte_cont:
627hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */ 652hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
628 /* Save DEC */ 653 /* Save DEC */
629 mfspr r5,SPRN_DEC 654 mfspr r5,SPRN_DEC
@@ -632,36 +657,21 @@ hcall_real_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
632 add r5,r5,r6 657 add r5,r5,r6
633 std r5,VCPU_DEC_EXPIRES(r9) 658 std r5,VCPU_DEC_EXPIRES(r9)
634 659
635 /* Save HEIR (HV emulation assist reg) in last_inst
636 if this is an HEI (HV emulation interrupt, e40) */
637 li r3,-1
638BEGIN_FTR_SECTION
639 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
640 bne 11f
641 mfspr r3,SPRN_HEIR
642END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
64311: stw r3,VCPU_LAST_INST(r9)
644
645 /* Save more register state */ 660 /* Save more register state */
646 mfxer r5
647 mfdar r6 661 mfdar r6
648 mfdsisr r7 662 mfdsisr r7
649 mfctr r8
650
651 stw r5, VCPU_XER(r9)
652 std r6, VCPU_DAR(r9) 663 std r6, VCPU_DAR(r9)
653 stw r7, VCPU_DSISR(r9) 664 stw r7, VCPU_DSISR(r9)
654 std r8, VCPU_CTR(r9)
655 /* grab HDAR & HDSISR if HV data storage interrupt (HDSI) */
656BEGIN_FTR_SECTION 665BEGIN_FTR_SECTION
666 /* don't overwrite fault_dar/fault_dsisr if HDSI */
657 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE 667 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
658 beq 6f 668 beq 6f
659END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 669END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
6607: std r6, VCPU_FAULT_DAR(r9) 670 std r6, VCPU_FAULT_DAR(r9)
661 stw r7, VCPU_FAULT_DSISR(r9) 671 stw r7, VCPU_FAULT_DSISR(r9)
662 672
663 /* Save guest CTRL register, set runlatch to 1 */ 673 /* Save guest CTRL register, set runlatch to 1 */
664 mfspr r6,SPRN_CTRLF 6746: mfspr r6,SPRN_CTRLF
665 stw r6,VCPU_CTRL(r9) 675 stw r6,VCPU_CTRL(r9)
666 andi. r0,r6,1 676 andi. r0,r6,1
667 bne 4f 677 bne 4f
@@ -1094,9 +1104,131 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1094 mtspr SPRN_HSRR1, r7 1104 mtspr SPRN_HSRR1, r7
1095 ba 0x500 1105 ba 0x500
1096 1106
10976: mfspr r6,SPRN_HDAR 1107/*
1098 mfspr r7,SPRN_HDSISR 1108 * Check whether an HDSI is an HPTE not found fault or something else.
1099 b 7b 1109 * If it is an HPTE not found fault that is due to the guest accessing
1110 * a page that they have mapped but which we have paged out, then
1111 * we continue on with the guest exit path. In all other cases,
1112 * reflect the HDSI to the guest as a DSI.
1113 */
1114kvmppc_hdsi:
1115 mfspr r4, SPRN_HDAR
1116 mfspr r6, SPRN_HDSISR
1117 /* HPTE not found fault or protection fault? */
1118 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
1119 beq 1f /* if not, send it to the guest */
1120 andi. r0, r11, MSR_DR /* data relocation enabled? */
1121 beq 3f
1122 clrrdi r0, r4, 28
1123 PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
1124 bne 1f /* if no SLB entry found */
11254: std r4, VCPU_FAULT_DAR(r9)
1126 stw r6, VCPU_FAULT_DSISR(r9)
1127
1128 /* Search the hash table. */
1129 mr r3, r9 /* vcpu pointer */
1130 li r7, 1 /* data fault */
1131 bl .kvmppc_hpte_hv_fault
1132 ld r9, HSTATE_KVM_VCPU(r13)
1133 ld r10, VCPU_PC(r9)
1134 ld r11, VCPU_MSR(r9)
1135 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1136 cmpdi r3, 0 /* retry the instruction */
1137 beq 6f
1138 cmpdi r3, -1 /* handle in kernel mode */
1139 beq nohpte_cont
1140 cmpdi r3, -2 /* MMIO emulation; need instr word */
1141 beq 2f
1142
1143 /* Synthesize a DSI for the guest */
1144 ld r4, VCPU_FAULT_DAR(r9)
1145 mr r6, r3
11461: mtspr SPRN_DAR, r4
1147 mtspr SPRN_DSISR, r6
1148 mtspr SPRN_SRR0, r10
1149 mtspr SPRN_SRR1, r11
1150 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1151 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1152 rotldi r11, r11, 63
11536: ld r7, VCPU_CTR(r9)
1154 lwz r8, VCPU_XER(r9)
1155 mtctr r7
1156 mtxer r8
1157 mr r4, r9
1158 b fast_guest_return
1159
11603: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1161 ld r5, KVM_VRMA_SLB_V(r5)
1162 b 4b
1163
1164 /* If this is for emulated MMIO, load the instruction word */
11652: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1166
1167 /* Set guest mode to 'jump over instruction' so if lwz faults
1168 * we'll just continue at the next IP. */
1169 li r0, KVM_GUEST_MODE_SKIP
1170 stb r0, HSTATE_IN_GUEST(r13)
1171
1172 /* Do the access with MSR:DR enabled */
1173 mfmsr r3
1174 ori r4, r3, MSR_DR /* Enable paging for data */
1175 mtmsrd r4
1176 lwz r8, 0(r10)
1177 mtmsrd r3
1178
1179 /* Store the result */
1180 stw r8, VCPU_LAST_INST(r9)
1181
1182 /* Unset guest mode. */
1183 li r0, KVM_GUEST_MODE_NONE
1184 stb r0, HSTATE_IN_GUEST(r13)
1185 b nohpte_cont
1186
1187/*
1188 * Similarly for an HISI, reflect it to the guest as an ISI unless
1189 * it is an HPTE not found fault for a page that we have paged out.
1190 */
1191kvmppc_hisi:
1192 andis. r0, r11, SRR1_ISI_NOPT@h
1193 beq 1f
1194 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1195 beq 3f
1196 clrrdi r0, r10, 28
1197 PPC_SLBFEE_DOT(r5, r0) /* if so, look up SLB */
1198 bne 1f /* if no SLB entry found */
11994:
1200 /* Search the hash table. */
1201 mr r3, r9 /* vcpu pointer */
1202 mr r4, r10
1203 mr r6, r11
1204 li r7, 0 /* instruction fault */
1205 bl .kvmppc_hpte_hv_fault
1206 ld r9, HSTATE_KVM_VCPU(r13)
1207 ld r10, VCPU_PC(r9)
1208 ld r11, VCPU_MSR(r9)
1209 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1210 cmpdi r3, 0 /* retry the instruction */
1211 beq 6f
1212 cmpdi r3, -1 /* handle in kernel mode */
1213 beq nohpte_cont
1214
1215 /* Synthesize an ISI for the guest */
1216 mr r11, r3
12171: mtspr SPRN_SRR0, r10
1218 mtspr SPRN_SRR1, r11
1219 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1220 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1221 rotldi r11, r11, 63
12226: ld r7, VCPU_CTR(r9)
1223 lwz r8, VCPU_XER(r9)
1224 mtctr r7
1225 mtxer r8
1226 mr r4, r9
1227 b fast_guest_return
1228
12293: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1230 ld r5, KVM_VRMA_SLB_V(r6)
1231 b 4b
1100 1232
1101/* 1233/*
1102 * Try to handle an hcall in real mode. 1234 * Try to handle an hcall in real mode.
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c
index 7b0ee96c1bed..e70ef2d86431 100644
--- a/arch/powerpc/kvm/book3s_paired_singles.c
+++ b/arch/powerpc/kvm/book3s_paired_singles.c
@@ -196,7 +196,8 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
196 kvmppc_inject_pf(vcpu, addr, false); 196 kvmppc_inject_pf(vcpu, addr, false);
197 goto done_load; 197 goto done_load;
198 } else if (r == EMULATE_DO_MMIO) { 198 } else if (r == EMULATE_DO_MMIO) {
199 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, len, 1); 199 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
200 len, 1);
200 goto done_load; 201 goto done_load;
201 } 202 }
202 203
@@ -286,11 +287,13 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
286 kvmppc_inject_pf(vcpu, addr, false); 287 kvmppc_inject_pf(vcpu, addr, false);
287 goto done_load; 288 goto done_load;
288 } else if ((r == EMULATE_DO_MMIO) && w) { 289 } else if ((r == EMULATE_DO_MMIO) && w) {
289 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FPR | rs, 4, 1); 290 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FPR | rs,
291 4, 1);
290 vcpu->arch.qpr[rs] = tmp[1]; 292 vcpu->arch.qpr[rs] = tmp[1];
291 goto done_load; 293 goto done_load;
292 } else if (r == EMULATE_DO_MMIO) { 294 } else if (r == EMULATE_DO_MMIO) {
293 emulated = kvmppc_handle_load(run, vcpu, KVM_REG_FQPR | rs, 8, 1); 295 emulated = kvmppc_handle_load(run, vcpu, KVM_MMIO_REG_FQPR | rs,
296 8, 1);
294 goto done_load; 297 goto done_load;
295 } 298 }
296 299
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index e2cfb9e1e20e..7340e1090b77 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -51,15 +51,19 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
51#define MSR_USER32 MSR_USER 51#define MSR_USER32 MSR_USER
52#define MSR_USER64 MSR_USER 52#define MSR_USER64 MSR_USER
53#define HW_PAGE_SIZE PAGE_SIZE 53#define HW_PAGE_SIZE PAGE_SIZE
54#define __hard_irq_disable local_irq_disable
55#define __hard_irq_enable local_irq_enable
54#endif 56#endif
55 57
56void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 58void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
57{ 59{
58#ifdef CONFIG_PPC_BOOK3S_64 60#ifdef CONFIG_PPC_BOOK3S_64
59 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb)); 61 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
62 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
60 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu, 63 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
61 sizeof(get_paca()->shadow_vcpu)); 64 sizeof(get_paca()->shadow_vcpu));
62 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max; 65 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
66 svcpu_put(svcpu);
63#endif 67#endif
64 68
65#ifdef CONFIG_PPC_BOOK3S_32 69#ifdef CONFIG_PPC_BOOK3S_32
@@ -70,10 +74,12 @@ void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
70void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 74void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
71{ 75{
72#ifdef CONFIG_PPC_BOOK3S_64 76#ifdef CONFIG_PPC_BOOK3S_64
73 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb)); 77 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
78 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
74 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu, 79 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
75 sizeof(get_paca()->shadow_vcpu)); 80 sizeof(get_paca()->shadow_vcpu));
76 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max; 81 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
82 svcpu_put(svcpu);
77#endif 83#endif
78 84
79 kvmppc_giveup_ext(vcpu, MSR_FP); 85 kvmppc_giveup_ext(vcpu, MSR_FP);
@@ -151,14 +157,16 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
151#ifdef CONFIG_PPC_BOOK3S_64 157#ifdef CONFIG_PPC_BOOK3S_64
152 if ((pvr >= 0x330000) && (pvr < 0x70330000)) { 158 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
153 kvmppc_mmu_book3s_64_init(vcpu); 159 kvmppc_mmu_book3s_64_init(vcpu);
154 to_book3s(vcpu)->hior = 0xfff00000; 160 if (!to_book3s(vcpu)->hior_explicit)
161 to_book3s(vcpu)->hior = 0xfff00000;
155 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; 162 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
156 vcpu->arch.cpu_type = KVM_CPU_3S_64; 163 vcpu->arch.cpu_type = KVM_CPU_3S_64;
157 } else 164 } else
158#endif 165#endif
159 { 166 {
160 kvmppc_mmu_book3s_32_init(vcpu); 167 kvmppc_mmu_book3s_32_init(vcpu);
161 to_book3s(vcpu)->hior = 0; 168 if (!to_book3s(vcpu)->hior_explicit)
169 to_book3s(vcpu)->hior = 0;
162 to_book3s(vcpu)->msr_mask = 0xffffffffULL; 170 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
163 vcpu->arch.cpu_type = KVM_CPU_3S_32; 171 vcpu->arch.cpu_type = KVM_CPU_3S_32;
164 } 172 }
@@ -227,14 +235,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
227 hpage_offset /= 4; 235 hpage_offset /= 4;
228 236
229 get_page(hpage); 237 get_page(hpage);
230 page = kmap_atomic(hpage, KM_USER0); 238 page = kmap_atomic(hpage);
231 239
232 /* patch dcbz into reserved instruction, so we trap */ 240 /* patch dcbz into reserved instruction, so we trap */
233 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) 241 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
234 if ((page[i] & 0xff0007ff) == INS_DCBZ) 242 if ((page[i] & 0xff0007ff) == INS_DCBZ)
235 page[i] &= 0xfffffff7; 243 page[i] &= 0xfffffff7;
236 244
237 kunmap_atomic(page, KM_USER0); 245 kunmap_atomic(page);
238 put_page(hpage); 246 put_page(hpage);
239} 247}
240 248
@@ -308,19 +316,22 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
308 316
309 if (page_found == -ENOENT) { 317 if (page_found == -ENOENT) {
310 /* Page not found in guest PTE entries */ 318 /* Page not found in guest PTE entries */
319 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
311 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 320 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
312 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; 321 vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
313 vcpu->arch.shared->msr |= 322 vcpu->arch.shared->msr |=
314 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 323 (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
324 svcpu_put(svcpu);
315 kvmppc_book3s_queue_irqprio(vcpu, vec); 325 kvmppc_book3s_queue_irqprio(vcpu, vec);
316 } else if (page_found == -EPERM) { 326 } else if (page_found == -EPERM) {
317 /* Storage protection */ 327 /* Storage protection */
328 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
318 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); 329 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
319 vcpu->arch.shared->dsisr = 330 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
320 to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
321 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; 331 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
322 vcpu->arch.shared->msr |= 332 vcpu->arch.shared->msr |=
323 (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); 333 svcpu->shadow_srr1 & 0x00000000f8000000ULL;
334 svcpu_put(svcpu);
324 kvmppc_book3s_queue_irqprio(vcpu, vec); 335 kvmppc_book3s_queue_irqprio(vcpu, vec);
325 } else if (page_found == -EINVAL) { 336 } else if (page_found == -EINVAL) {
326 /* Page not found in guest SLB */ 337 /* Page not found in guest SLB */
@@ -517,24 +528,29 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
517 run->ready_for_interrupt_injection = 1; 528 run->ready_for_interrupt_injection = 1;
518 529
519 trace_kvm_book3s_exit(exit_nr, vcpu); 530 trace_kvm_book3s_exit(exit_nr, vcpu);
531 preempt_enable();
520 kvm_resched(vcpu); 532 kvm_resched(vcpu);
521 switch (exit_nr) { 533 switch (exit_nr) {
522 case BOOK3S_INTERRUPT_INST_STORAGE: 534 case BOOK3S_INTERRUPT_INST_STORAGE:
535 {
536 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
537 ulong shadow_srr1 = svcpu->shadow_srr1;
523 vcpu->stat.pf_instruc++; 538 vcpu->stat.pf_instruc++;
524 539
525#ifdef CONFIG_PPC_BOOK3S_32 540#ifdef CONFIG_PPC_BOOK3S_32
526 /* We set segments as unused segments when invalidating them. So 541 /* We set segments as unused segments when invalidating them. So
527 * treat the respective fault as segment fault. */ 542 * treat the respective fault as segment fault. */
528 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] 543 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
529 == SR_INVALID) {
530 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); 544 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
531 r = RESUME_GUEST; 545 r = RESUME_GUEST;
546 svcpu_put(svcpu);
532 break; 547 break;
533 } 548 }
534#endif 549#endif
550 svcpu_put(svcpu);
535 551
536 /* only care about PTEG not found errors, but leave NX alone */ 552 /* only care about PTEG not found errors, but leave NX alone */
537 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) { 553 if (shadow_srr1 & 0x40000000) {
538 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); 554 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
539 vcpu->stat.sp_instruc++; 555 vcpu->stat.sp_instruc++;
540 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && 556 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
@@ -547,33 +563,37 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
547 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); 563 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
548 r = RESUME_GUEST; 564 r = RESUME_GUEST;
549 } else { 565 } else {
550 vcpu->arch.shared->msr |= 566 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
551 to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
552 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 567 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
553 r = RESUME_GUEST; 568 r = RESUME_GUEST;
554 } 569 }
555 break; 570 break;
571 }
556 case BOOK3S_INTERRUPT_DATA_STORAGE: 572 case BOOK3S_INTERRUPT_DATA_STORAGE:
557 { 573 {
558 ulong dar = kvmppc_get_fault_dar(vcpu); 574 ulong dar = kvmppc_get_fault_dar(vcpu);
575 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
576 u32 fault_dsisr = svcpu->fault_dsisr;
559 vcpu->stat.pf_storage++; 577 vcpu->stat.pf_storage++;
560 578
561#ifdef CONFIG_PPC_BOOK3S_32 579#ifdef CONFIG_PPC_BOOK3S_32
562 /* We set segments as unused segments when invalidating them. So 580 /* We set segments as unused segments when invalidating them. So
563 * treat the respective fault as segment fault. */ 581 * treat the respective fault as segment fault. */
564 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) { 582 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
565 kvmppc_mmu_map_segment(vcpu, dar); 583 kvmppc_mmu_map_segment(vcpu, dar);
566 r = RESUME_GUEST; 584 r = RESUME_GUEST;
585 svcpu_put(svcpu);
567 break; 586 break;
568 } 587 }
569#endif 588#endif
589 svcpu_put(svcpu);
570 590
571 /* The only case we need to handle is missing shadow PTEs */ 591 /* The only case we need to handle is missing shadow PTEs */
572 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { 592 if (fault_dsisr & DSISR_NOHPTE) {
573 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); 593 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
574 } else { 594 } else {
575 vcpu->arch.shared->dar = dar; 595 vcpu->arch.shared->dar = dar;
576 vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; 596 vcpu->arch.shared->dsisr = fault_dsisr;
577 kvmppc_book3s_queue_irqprio(vcpu, exit_nr); 597 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
578 r = RESUME_GUEST; 598 r = RESUME_GUEST;
579 } 599 }
@@ -609,10 +629,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
609 case BOOK3S_INTERRUPT_PROGRAM: 629 case BOOK3S_INTERRUPT_PROGRAM:
610 { 630 {
611 enum emulation_result er; 631 enum emulation_result er;
632 struct kvmppc_book3s_shadow_vcpu *svcpu;
612 ulong flags; 633 ulong flags;
613 634
614program_interrupt: 635program_interrupt:
615 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; 636 svcpu = svcpu_get(vcpu);
637 flags = svcpu->shadow_srr1 & 0x1f0000ull;
638 svcpu_put(svcpu);
616 639
617 if (vcpu->arch.shared->msr & MSR_PR) { 640 if (vcpu->arch.shared->msr & MSR_PR) {
618#ifdef EXIT_DEBUG 641#ifdef EXIT_DEBUG
@@ -740,20 +763,33 @@ program_interrupt:
740 r = RESUME_GUEST; 763 r = RESUME_GUEST;
741 break; 764 break;
742 default: 765 default:
766 {
767 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
768 ulong shadow_srr1 = svcpu->shadow_srr1;
769 svcpu_put(svcpu);
743 /* Ugh - bork here! What did we get? */ 770 /* Ugh - bork here! What did we get? */
744 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", 771 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
745 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1); 772 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
746 r = RESUME_HOST; 773 r = RESUME_HOST;
747 BUG(); 774 BUG();
748 break; 775 break;
749 } 776 }
750 777 }
751 778
752 if (!(r & RESUME_HOST)) { 779 if (!(r & RESUME_HOST)) {
753 /* To avoid clobbering exit_reason, only check for signals if 780 /* To avoid clobbering exit_reason, only check for signals if
754 * we aren't already exiting to userspace for some other 781 * we aren't already exiting to userspace for some other
755 * reason. */ 782 * reason. */
783
784 /*
785 * Interrupts could be timers for the guest which we have to
786 * inject again, so let's postpone them until we're in the guest
787 * and if we really did time things so badly, then we just exit
788 * again due to a host external interrupt.
789 */
790 __hard_irq_disable();
756 if (signal_pending(current)) { 791 if (signal_pending(current)) {
792 __hard_irq_enable();
757#ifdef EXIT_DEBUG 793#ifdef EXIT_DEBUG
758 printk(KERN_EMERG "KVM: Going back to host\n"); 794 printk(KERN_EMERG "KVM: Going back to host\n");
759#endif 795#endif
@@ -761,10 +797,12 @@ program_interrupt:
761 run->exit_reason = KVM_EXIT_INTR; 797 run->exit_reason = KVM_EXIT_INTR;
762 r = -EINTR; 798 r = -EINTR;
763 } else { 799 } else {
800 preempt_disable();
801
764 /* In case an interrupt came in that was triggered 802 /* In case an interrupt came in that was triggered
765 * from userspace (like DEC), we need to check what 803 * from userspace (like DEC), we need to check what
766 * to inject now! */ 804 * to inject now! */
767 kvmppc_core_deliver_interrupts(vcpu); 805 kvmppc_core_prepare_to_enter(vcpu);
768 } 806 }
769 } 807 }
770 808
@@ -836,6 +874,38 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
836 return 0; 874 return 0;
837} 875}
838 876
877int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
878{
879 int r = -EINVAL;
880
881 switch (reg->id) {
882 case KVM_REG_PPC_HIOR:
883 r = put_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
884 break;
885 default:
886 break;
887 }
888
889 return r;
890}
891
892int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
893{
894 int r = -EINVAL;
895
896 switch (reg->id) {
897 case KVM_REG_PPC_HIOR:
898 r = get_user(to_book3s(vcpu)->hior, (u64 __user *)reg->addr);
899 if (!r)
900 to_book3s(vcpu)->hior_explicit = true;
901 break;
902 default:
903 break;
904 }
905
906 return r;
907}
908
839int kvmppc_core_check_processor_compat(void) 909int kvmppc_core_check_processor_compat(void)
840{ 910{
841 return 0; 911 return 0;
@@ -923,16 +993,31 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
923#endif 993#endif
924 ulong ext_msr; 994 ulong ext_msr;
925 995
996 preempt_disable();
997
926 /* Check if we can run the vcpu at all */ 998 /* Check if we can run the vcpu at all */
927 if (!vcpu->arch.sane) { 999 if (!vcpu->arch.sane) {
928 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 1000 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
929 return -EINVAL; 1001 ret = -EINVAL;
1002 goto out;
930 } 1003 }
931 1004
1005 kvmppc_core_prepare_to_enter(vcpu);
1006
1007 /*
1008 * Interrupts could be timers for the guest which we have to inject
1009 * again, so let's postpone them until we're in the guest and if we
1010 * really did time things so badly, then we just exit again due to
1011 * a host external interrupt.
1012 */
1013 __hard_irq_disable();
1014
932 /* No need to go into the guest when all we do is going out */ 1015 /* No need to go into the guest when all we do is going out */
933 if (signal_pending(current)) { 1016 if (signal_pending(current)) {
1017 __hard_irq_enable();
934 kvm_run->exit_reason = KVM_EXIT_INTR; 1018 kvm_run->exit_reason = KVM_EXIT_INTR;
935 return -EINTR; 1019 ret = -EINTR;
1020 goto out;
936 } 1021 }
937 1022
938 /* Save FPU state in stack */ 1023 /* Save FPU state in stack */
@@ -974,8 +1059,6 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
974 1059
975 kvm_guest_exit(); 1060 kvm_guest_exit();
976 1061
977 local_irq_disable();
978
979 current->thread.regs->msr = ext_msr; 1062 current->thread.regs->msr = ext_msr;
980 1063
981 /* Make sure we save the guest FPU/Altivec/VSX state */ 1064 /* Make sure we save the guest FPU/Altivec/VSX state */
@@ -1002,9 +1085,50 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1002 current->thread.used_vsr = used_vsr; 1085 current->thread.used_vsr = used_vsr;
1003#endif 1086#endif
1004 1087
1088out:
1089 preempt_enable();
1005 return ret; 1090 return ret;
1006} 1091}
1007 1092
1093/*
1094 * Get (and clear) the dirty memory log for a memory slot.
1095 */
1096int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1097 struct kvm_dirty_log *log)
1098{
1099 struct kvm_memory_slot *memslot;
1100 struct kvm_vcpu *vcpu;
1101 ulong ga, ga_end;
1102 int is_dirty = 0;
1103 int r;
1104 unsigned long n;
1105
1106 mutex_lock(&kvm->slots_lock);
1107
1108 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1109 if (r)
1110 goto out;
1111
1112 /* If nothing is dirty, don't bother messing with page tables. */
1113 if (is_dirty) {
1114 memslot = id_to_memslot(kvm->memslots, log->slot);
1115
1116 ga = memslot->base_gfn << PAGE_SHIFT;
1117 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1118
1119 kvm_for_each_vcpu(n, vcpu, kvm)
1120 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1121
1122 n = kvm_dirty_bitmap_bytes(memslot);
1123 memset(memslot->dirty_bitmap, 0, n);
1124 }
1125
1126 r = 0;
1127out:
1128 mutex_unlock(&kvm->slots_lock);
1129 return r;
1130}
1131
1008int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1132int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1009 struct kvm_userspace_memory_region *mem) 1133 struct kvm_userspace_memory_region *mem)
1010{ 1134{
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index bb6c988f010a..ee9e1ee9c858 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -124,12 +124,6 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
124 vcpu->arch.shared->msr = new_msr; 124 vcpu->arch.shared->msr = new_msr;
125 125
126 kvmppc_mmu_msr_notify(vcpu, old_msr); 126 kvmppc_mmu_msr_notify(vcpu, old_msr);
127
128 if (vcpu->arch.shared->msr & MSR_WE) {
129 kvm_vcpu_block(vcpu);
130 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
131 };
132
133 kvmppc_vcpu_sync_spe(vcpu); 127 kvmppc_vcpu_sync_spe(vcpu);
134} 128}
135 129
@@ -258,9 +252,11 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
258 allowed = vcpu->arch.shared->msr & MSR_ME; 252 allowed = vcpu->arch.shared->msr & MSR_ME;
259 msr_mask = 0; 253 msr_mask = 0;
260 break; 254 break;
261 case BOOKE_IRQPRIO_EXTERNAL:
262 case BOOKE_IRQPRIO_DECREMENTER: 255 case BOOKE_IRQPRIO_DECREMENTER:
263 case BOOKE_IRQPRIO_FIT: 256 case BOOKE_IRQPRIO_FIT:
257 keep_irq = true;
258 /* fall through */
259 case BOOKE_IRQPRIO_EXTERNAL:
264 allowed = vcpu->arch.shared->msr & MSR_EE; 260 allowed = vcpu->arch.shared->msr & MSR_EE;
265 allowed = allowed && !crit; 261 allowed = allowed && !crit;
266 msr_mask = MSR_CE|MSR_ME|MSR_DE; 262 msr_mask = MSR_CE|MSR_ME|MSR_DE;
@@ -276,7 +272,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
276 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; 272 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
277 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 273 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
278 if (update_esr == true) 274 if (update_esr == true)
279 vcpu->arch.esr = vcpu->arch.queued_esr; 275 vcpu->arch.shared->esr = vcpu->arch.queued_esr;
280 if (update_dear == true) 276 if (update_dear == true)
281 vcpu->arch.shared->dar = vcpu->arch.queued_dear; 277 vcpu->arch.shared->dar = vcpu->arch.queued_dear;
282 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); 278 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
@@ -288,13 +284,26 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
288 return allowed; 284 return allowed;
289} 285}
290 286
291/* Check pending exceptions and deliver one, if possible. */ 287static void update_timer_ints(struct kvm_vcpu *vcpu)
292void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) 288{
289 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
290 kvmppc_core_queue_dec(vcpu);
291 else
292 kvmppc_core_dequeue_dec(vcpu);
293}
294
295static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
293{ 296{
294 unsigned long *pending = &vcpu->arch.pending_exceptions; 297 unsigned long *pending = &vcpu->arch.pending_exceptions;
295 unsigned long old_pending = vcpu->arch.pending_exceptions;
296 unsigned int priority; 298 unsigned int priority;
297 299
300 if (vcpu->requests) {
301 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
302 smp_mb();
303 update_timer_ints(vcpu);
304 }
305 }
306
298 priority = __ffs(*pending); 307 priority = __ffs(*pending);
299 while (priority <= BOOKE_IRQPRIO_MAX) { 308 while (priority <= BOOKE_IRQPRIO_MAX) {
300 if (kvmppc_booke_irqprio_deliver(vcpu, priority)) 309 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
@@ -306,10 +315,24 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
306 } 315 }
307 316
308 /* Tell the guest about our interrupt status */ 317 /* Tell the guest about our interrupt status */
309 if (*pending) 318 vcpu->arch.shared->int_pending = !!*pending;
310 vcpu->arch.shared->int_pending = 1; 319}
311 else if (old_pending) 320
312 vcpu->arch.shared->int_pending = 0; 321/* Check pending exceptions and deliver one, if possible. */
322void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
323{
324 WARN_ON_ONCE(!irqs_disabled());
325
326 kvmppc_core_check_exceptions(vcpu);
327
328 if (vcpu->arch.shared->msr & MSR_WE) {
329 local_irq_enable();
330 kvm_vcpu_block(vcpu);
331 local_irq_disable();
332
333 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
334 kvmppc_core_check_exceptions(vcpu);
335 };
313} 336}
314 337
315int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 338int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
@@ -322,11 +345,21 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
322 } 345 }
323 346
324 local_irq_disable(); 347 local_irq_disable();
348
349 kvmppc_core_prepare_to_enter(vcpu);
350
351 if (signal_pending(current)) {
352 kvm_run->exit_reason = KVM_EXIT_INTR;
353 ret = -EINTR;
354 goto out;
355 }
356
325 kvm_guest_enter(); 357 kvm_guest_enter();
326 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 358 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
327 kvm_guest_exit(); 359 kvm_guest_exit();
328 local_irq_enable();
329 360
361out:
362 local_irq_enable();
330 return ret; 363 return ret;
331} 364}
332 365
@@ -603,7 +636,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
603 636
604 local_irq_disable(); 637 local_irq_disable();
605 638
606 kvmppc_core_deliver_interrupts(vcpu); 639 kvmppc_core_prepare_to_enter(vcpu);
607 640
608 if (!(r & RESUME_HOST)) { 641 if (!(r & RESUME_HOST)) {
609 /* To avoid clobbering exit_reason, only check for signals if 642 /* To avoid clobbering exit_reason, only check for signals if
@@ -628,6 +661,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
628 vcpu->arch.pc = 0; 661 vcpu->arch.pc = 0;
629 vcpu->arch.shared->msr = 0; 662 vcpu->arch.shared->msr = 0;
630 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; 663 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
664 vcpu->arch.shared->pir = vcpu->vcpu_id;
631 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 665 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
632 666
633 vcpu->arch.shadow_pid = 1; 667 vcpu->arch.shadow_pid = 1;
@@ -662,10 +696,10 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
662 regs->sprg1 = vcpu->arch.shared->sprg1; 696 regs->sprg1 = vcpu->arch.shared->sprg1;
663 regs->sprg2 = vcpu->arch.shared->sprg2; 697 regs->sprg2 = vcpu->arch.shared->sprg2;
664 regs->sprg3 = vcpu->arch.shared->sprg3; 698 regs->sprg3 = vcpu->arch.shared->sprg3;
665 regs->sprg4 = vcpu->arch.sprg4; 699 regs->sprg4 = vcpu->arch.shared->sprg4;
666 regs->sprg5 = vcpu->arch.sprg5; 700 regs->sprg5 = vcpu->arch.shared->sprg5;
667 regs->sprg6 = vcpu->arch.sprg6; 701 regs->sprg6 = vcpu->arch.shared->sprg6;
668 regs->sprg7 = vcpu->arch.sprg7; 702 regs->sprg7 = vcpu->arch.shared->sprg7;
669 703
670 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 704 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
671 regs->gpr[i] = kvmppc_get_gpr(vcpu, i); 705 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
@@ -690,10 +724,10 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
690 vcpu->arch.shared->sprg1 = regs->sprg1; 724 vcpu->arch.shared->sprg1 = regs->sprg1;
691 vcpu->arch.shared->sprg2 = regs->sprg2; 725 vcpu->arch.shared->sprg2 = regs->sprg2;
692 vcpu->arch.shared->sprg3 = regs->sprg3; 726 vcpu->arch.shared->sprg3 = regs->sprg3;
693 vcpu->arch.sprg4 = regs->sprg4; 727 vcpu->arch.shared->sprg4 = regs->sprg4;
694 vcpu->arch.sprg5 = regs->sprg5; 728 vcpu->arch.shared->sprg5 = regs->sprg5;
695 vcpu->arch.sprg6 = regs->sprg6; 729 vcpu->arch.shared->sprg6 = regs->sprg6;
696 vcpu->arch.sprg7 = regs->sprg7; 730 vcpu->arch.shared->sprg7 = regs->sprg7;
697 731
698 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) 732 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
699 kvmppc_set_gpr(vcpu, i, regs->gpr[i]); 733 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
@@ -711,7 +745,7 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
711 sregs->u.e.csrr0 = vcpu->arch.csrr0; 745 sregs->u.e.csrr0 = vcpu->arch.csrr0;
712 sregs->u.e.csrr1 = vcpu->arch.csrr1; 746 sregs->u.e.csrr1 = vcpu->arch.csrr1;
713 sregs->u.e.mcsr = vcpu->arch.mcsr; 747 sregs->u.e.mcsr = vcpu->arch.mcsr;
714 sregs->u.e.esr = vcpu->arch.esr; 748 sregs->u.e.esr = vcpu->arch.shared->esr;
715 sregs->u.e.dear = vcpu->arch.shared->dar; 749 sregs->u.e.dear = vcpu->arch.shared->dar;
716 sregs->u.e.tsr = vcpu->arch.tsr; 750 sregs->u.e.tsr = vcpu->arch.tsr;
717 sregs->u.e.tcr = vcpu->arch.tcr; 751 sregs->u.e.tcr = vcpu->arch.tcr;
@@ -729,28 +763,19 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
729 vcpu->arch.csrr0 = sregs->u.e.csrr0; 763 vcpu->arch.csrr0 = sregs->u.e.csrr0;
730 vcpu->arch.csrr1 = sregs->u.e.csrr1; 764 vcpu->arch.csrr1 = sregs->u.e.csrr1;
731 vcpu->arch.mcsr = sregs->u.e.mcsr; 765 vcpu->arch.mcsr = sregs->u.e.mcsr;
732 vcpu->arch.esr = sregs->u.e.esr; 766 vcpu->arch.shared->esr = sregs->u.e.esr;
733 vcpu->arch.shared->dar = sregs->u.e.dear; 767 vcpu->arch.shared->dar = sregs->u.e.dear;
734 vcpu->arch.vrsave = sregs->u.e.vrsave; 768 vcpu->arch.vrsave = sregs->u.e.vrsave;
735 vcpu->arch.tcr = sregs->u.e.tcr; 769 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
736 770
737 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) 771 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
738 vcpu->arch.dec = sregs->u.e.dec; 772 vcpu->arch.dec = sregs->u.e.dec;
739 773 kvmppc_emulate_dec(vcpu);
740 kvmppc_emulate_dec(vcpu); 774 }
741 775
742 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { 776 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
743 /* 777 vcpu->arch.tsr = sregs->u.e.tsr;
744 * FIXME: existing KVM timer handling is incomplete. 778 update_timer_ints(vcpu);
745 * TSR cannot be read by the guest, and its value in
746 * vcpu->arch is always zero. For now, just handle
747 * the case where the caller is trying to inject a
748 * decrementer interrupt.
749 */
750
751 if ((sregs->u.e.tsr & TSR_DIS) &&
752 (vcpu->arch.tcr & TCR_DIE))
753 kvmppc_core_queue_dec(vcpu);
754 } 779 }
755 780
756 return 0; 781 return 0;
@@ -761,7 +786,7 @@ static void get_sregs_arch206(struct kvm_vcpu *vcpu,
761{ 786{
762 sregs->u.e.features |= KVM_SREGS_E_ARCH206; 787 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
763 788
764 sregs->u.e.pir = 0; 789 sregs->u.e.pir = vcpu->vcpu_id;
765 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; 790 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
766 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; 791 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
767 sregs->u.e.decar = vcpu->arch.decar; 792 sregs->u.e.decar = vcpu->arch.decar;
@@ -774,7 +799,7 @@ static int set_sregs_arch206(struct kvm_vcpu *vcpu,
774 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) 799 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
775 return 0; 800 return 0;
776 801
777 if (sregs->u.e.pir != 0) 802 if (sregs->u.e.pir != vcpu->vcpu_id)
778 return -EINVAL; 803 return -EINVAL;
779 804
780 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; 805 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
@@ -862,6 +887,16 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
862 return kvmppc_core_set_sregs(vcpu, sregs); 887 return kvmppc_core_set_sregs(vcpu, sregs);
863} 888}
864 889
890int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
891{
892 return -EINVAL;
893}
894
895int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
896{
897 return -EINVAL;
898}
899
865int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) 900int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
866{ 901{
867 return -ENOTSUPP; 902 return -ENOTSUPP;
@@ -906,6 +941,33 @@ void kvmppc_core_destroy_vm(struct kvm *kvm)
906{ 941{
907} 942}
908 943
944void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
945{
946 vcpu->arch.tcr = new_tcr;
947 update_timer_ints(vcpu);
948}
949
950void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
951{
952 set_bits(tsr_bits, &vcpu->arch.tsr);
953 smp_wmb();
954 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
955 kvm_vcpu_kick(vcpu);
956}
957
958void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
959{
960 clear_bits(tsr_bits, &vcpu->arch.tsr);
961 update_timer_ints(vcpu);
962}
963
964void kvmppc_decrementer_func(unsigned long data)
965{
966 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
967
968 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
969}
970
909int __init kvmppc_booke_init(void) 971int __init kvmppc_booke_init(void)
910{ 972{
911 unsigned long ivor[16]; 973 unsigned long ivor[16];
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 8e1fe33d64e5..2fe202705a3f 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -55,6 +55,10 @@ extern unsigned long kvmppc_booke_handlers;
55void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr); 55void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr);
56void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr); 56void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr);
57 57
58void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr);
59void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
60void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
61
58int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 62int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
59 unsigned int inst, int *advance); 63 unsigned int inst, int *advance);
60int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 64int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt);
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 1260f5f24c0c..3e652da36534 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright IBM Corp. 2008 15 * Copyright IBM Corp. 2008
16 * Copyright 2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */ 19 */
@@ -107,7 +108,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
107 case SPRN_DEAR: 108 case SPRN_DEAR:
108 vcpu->arch.shared->dar = spr_val; break; 109 vcpu->arch.shared->dar = spr_val; break;
109 case SPRN_ESR: 110 case SPRN_ESR:
110 vcpu->arch.esr = spr_val; break; 111 vcpu->arch.shared->esr = spr_val; break;
111 case SPRN_DBCR0: 112 case SPRN_DBCR0:
112 vcpu->arch.dbcr0 = spr_val; break; 113 vcpu->arch.dbcr0 = spr_val; break;
113 case SPRN_DBCR1: 114 case SPRN_DBCR1:
@@ -115,23 +116,23 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
115 case SPRN_DBSR: 116 case SPRN_DBSR:
116 vcpu->arch.dbsr &= ~spr_val; break; 117 vcpu->arch.dbsr &= ~spr_val; break;
117 case SPRN_TSR: 118 case SPRN_TSR:
118 vcpu->arch.tsr &= ~spr_val; break; 119 kvmppc_clr_tsr_bits(vcpu, spr_val);
120 break;
119 case SPRN_TCR: 121 case SPRN_TCR:
120 vcpu->arch.tcr = spr_val; 122 kvmppc_set_tcr(vcpu, spr_val);
121 kvmppc_emulate_dec(vcpu);
122 break; 123 break;
123 124
124 /* Note: SPRG4-7 are user-readable. These values are 125 /* Note: SPRG4-7 are user-readable. These values are
125 * loaded into the real SPRGs when resuming the 126 * loaded into the real SPRGs when resuming the
126 * guest. */ 127 * guest. */
127 case SPRN_SPRG4: 128 case SPRN_SPRG4:
128 vcpu->arch.sprg4 = spr_val; break; 129 vcpu->arch.shared->sprg4 = spr_val; break;
129 case SPRN_SPRG5: 130 case SPRN_SPRG5:
130 vcpu->arch.sprg5 = spr_val; break; 131 vcpu->arch.shared->sprg5 = spr_val; break;
131 case SPRN_SPRG6: 132 case SPRN_SPRG6:
132 vcpu->arch.sprg6 = spr_val; break; 133 vcpu->arch.shared->sprg6 = spr_val; break;
133 case SPRN_SPRG7: 134 case SPRN_SPRG7:
134 vcpu->arch.sprg7 = spr_val; break; 135 vcpu->arch.shared->sprg7 = spr_val; break;
135 136
136 case SPRN_IVPR: 137 case SPRN_IVPR:
137 vcpu->arch.ivpr = spr_val; 138 vcpu->arch.ivpr = spr_val;
@@ -202,13 +203,17 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
202 case SPRN_DEAR: 203 case SPRN_DEAR:
203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; 204 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break;
204 case SPRN_ESR: 205 case SPRN_ESR:
205 kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; 206 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break;
206 case SPRN_DBCR0: 207 case SPRN_DBCR0:
207 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; 208 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break;
208 case SPRN_DBCR1: 209 case SPRN_DBCR1:
209 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; 210 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break;
210 case SPRN_DBSR: 211 case SPRN_DBSR:
211 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; 212 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break;
213 case SPRN_TSR:
214 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break;
215 case SPRN_TCR:
216 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break;
212 217
213 case SPRN_IVOR0: 218 case SPRN_IVOR0:
214 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); 219 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]);
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 42f2fb1f66e9..10d8ef602e5c 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -402,19 +402,25 @@ lightweight_exit:
402 /* Save vcpu pointer for the exception handlers. */ 402 /* Save vcpu pointer for the exception handlers. */
403 mtspr SPRN_SPRG_WVCPU, r4 403 mtspr SPRN_SPRG_WVCPU, r4
404 404
405 lwz r5, VCPU_SHARED(r4)
406
405 /* Can't switch the stack pointer until after IVPR is switched, 407 /* Can't switch the stack pointer until after IVPR is switched,
406 * because host interrupt handlers would get confused. */ 408 * because host interrupt handlers would get confused. */
407 lwz r1, VCPU_GPR(r1)(r4) 409 lwz r1, VCPU_GPR(r1)(r4)
408 410
409 /* Host interrupt handlers may have clobbered these guest-readable 411 /*
410 * SPRGs, so we need to reload them here with the guest's values. */ 412 * Host interrupt handlers may have clobbered these
411 lwz r3, VCPU_SPRG4(r4) 413 * guest-readable SPRGs, or the guest kernel may have
414 * written directly to the shared area, so we
415 * need to reload them here with the guest's values.
416 */
417 lwz r3, VCPU_SHARED_SPRG4(r5)
412 mtspr SPRN_SPRG4W, r3 418 mtspr SPRN_SPRG4W, r3
413 lwz r3, VCPU_SPRG5(r4) 419 lwz r3, VCPU_SHARED_SPRG5(r5)
414 mtspr SPRN_SPRG5W, r3 420 mtspr SPRN_SPRG5W, r3
415 lwz r3, VCPU_SPRG6(r4) 421 lwz r3, VCPU_SHARED_SPRG6(r5)
416 mtspr SPRN_SPRG6W, r3 422 mtspr SPRN_SPRG6W, r3
417 lwz r3, VCPU_SPRG7(r4) 423 lwz r3, VCPU_SHARED_SPRG7(r5)
418 mtspr SPRN_SPRG7W, r3 424 mtspr SPRN_SPRG7W, r3
419 425
420#ifdef CONFIG_KVM_EXIT_TIMING 426#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 8c0d45a6faf7..ddcd896fa2ff 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -71,9 +71,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
71 vcpu->arch.pvr = mfspr(SPRN_PVR); 71 vcpu->arch.pvr = mfspr(SPRN_PVR);
72 vcpu_e500->svr = mfspr(SPRN_SVR); 72 vcpu_e500->svr = mfspr(SPRN_SVR);
73 73
74 /* Since booke kvm only support one core, update all vcpus' PIR to 0 */
75 vcpu->vcpu_id = 0;
76
77 vcpu->arch.cpu_type = KVM_CPU_E500V2; 74 vcpu->arch.cpu_type = KVM_CPU_E500V2;
78 75
79 return 0; 76 return 0;
@@ -118,12 +115,12 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
118 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; 115 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
119 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; 116 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
120 117
121 sregs->u.e.mas0 = vcpu_e500->mas0; 118 sregs->u.e.mas0 = vcpu->arch.shared->mas0;
122 sregs->u.e.mas1 = vcpu_e500->mas1; 119 sregs->u.e.mas1 = vcpu->arch.shared->mas1;
123 sregs->u.e.mas2 = vcpu_e500->mas2; 120 sregs->u.e.mas2 = vcpu->arch.shared->mas2;
124 sregs->u.e.mas7_3 = ((u64)vcpu_e500->mas7 << 32) | vcpu_e500->mas3; 121 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
125 sregs->u.e.mas4 = vcpu_e500->mas4; 122 sregs->u.e.mas4 = vcpu->arch.shared->mas4;
126 sregs->u.e.mas6 = vcpu_e500->mas6; 123 sregs->u.e.mas6 = vcpu->arch.shared->mas6;
127 124
128 sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG); 125 sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
129 sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg; 126 sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
@@ -151,13 +148,12 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
151 } 148 }
152 149
153 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { 150 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
154 vcpu_e500->mas0 = sregs->u.e.mas0; 151 vcpu->arch.shared->mas0 = sregs->u.e.mas0;
155 vcpu_e500->mas1 = sregs->u.e.mas1; 152 vcpu->arch.shared->mas1 = sregs->u.e.mas1;
156 vcpu_e500->mas2 = sregs->u.e.mas2; 153 vcpu->arch.shared->mas2 = sregs->u.e.mas2;
157 vcpu_e500->mas7 = sregs->u.e.mas7_3 >> 32; 154 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
158 vcpu_e500->mas3 = (u32)sregs->u.e.mas7_3; 155 vcpu->arch.shared->mas4 = sregs->u.e.mas4;
159 vcpu_e500->mas4 = sregs->u.e.mas4; 156 vcpu->arch.shared->mas6 = sregs->u.e.mas6;
160 vcpu_e500->mas6 = sregs->u.e.mas6;
161 } 157 }
162 158
163 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) 159 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
@@ -233,6 +229,10 @@ static int __init kvmppc_e500_init(void)
233 unsigned long ivor[3]; 229 unsigned long ivor[3];
234 unsigned long max_ivor = 0; 230 unsigned long max_ivor = 0;
235 231
232 r = kvmppc_core_check_processor_compat();
233 if (r)
234 return r;
235
236 r = kvmppc_booke_init(); 236 r = kvmppc_booke_init();
237 if (r) 237 if (r)
238 return r; 238 return r;
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index d48ae396f41e..6d0b2bd54fb0 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -89,19 +89,23 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
89 return EMULATE_FAIL; 89 return EMULATE_FAIL;
90 vcpu_e500->pid[2] = spr_val; break; 90 vcpu_e500->pid[2] = spr_val; break;
91 case SPRN_MAS0: 91 case SPRN_MAS0:
92 vcpu_e500->mas0 = spr_val; break; 92 vcpu->arch.shared->mas0 = spr_val; break;
93 case SPRN_MAS1: 93 case SPRN_MAS1:
94 vcpu_e500->mas1 = spr_val; break; 94 vcpu->arch.shared->mas1 = spr_val; break;
95 case SPRN_MAS2: 95 case SPRN_MAS2:
96 vcpu_e500->mas2 = spr_val; break; 96 vcpu->arch.shared->mas2 = spr_val; break;
97 case SPRN_MAS3: 97 case SPRN_MAS3:
98 vcpu_e500->mas3 = spr_val; break; 98 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
99 vcpu->arch.shared->mas7_3 |= spr_val;
100 break;
99 case SPRN_MAS4: 101 case SPRN_MAS4:
100 vcpu_e500->mas4 = spr_val; break; 102 vcpu->arch.shared->mas4 = spr_val; break;
101 case SPRN_MAS6: 103 case SPRN_MAS6:
102 vcpu_e500->mas6 = spr_val; break; 104 vcpu->arch.shared->mas6 = spr_val; break;
103 case SPRN_MAS7: 105 case SPRN_MAS7:
104 vcpu_e500->mas7 = spr_val; break; 106 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
107 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
108 break;
105 case SPRN_L1CSR0: 109 case SPRN_L1CSR0:
106 vcpu_e500->l1csr0 = spr_val; 110 vcpu_e500->l1csr0 = spr_val;
107 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); 111 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
@@ -143,6 +147,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
143{ 147{
144 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 148 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
145 int emulated = EMULATE_DONE; 149 int emulated = EMULATE_DONE;
150 unsigned long val;
146 151
147 switch (sprn) { 152 switch (sprn) {
148 case SPRN_PID: 153 case SPRN_PID:
@@ -152,20 +157,23 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
152 case SPRN_PID2: 157 case SPRN_PID2:
153 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; 158 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break;
154 case SPRN_MAS0: 159 case SPRN_MAS0:
155 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas0); break; 160 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break;
156 case SPRN_MAS1: 161 case SPRN_MAS1:
157 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas1); break; 162 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break;
158 case SPRN_MAS2: 163 case SPRN_MAS2:
159 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas2); break; 164 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break;
160 case SPRN_MAS3: 165 case SPRN_MAS3:
161 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas3); break; 166 val = (u32)vcpu->arch.shared->mas7_3;
167 kvmppc_set_gpr(vcpu, rt, val);
168 break;
162 case SPRN_MAS4: 169 case SPRN_MAS4:
163 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas4); break; 170 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break;
164 case SPRN_MAS6: 171 case SPRN_MAS6:
165 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas6); break; 172 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break;
166 case SPRN_MAS7: 173 case SPRN_MAS7:
167 kvmppc_set_gpr(vcpu, rt, vcpu_e500->mas7); break; 174 val = vcpu->arch.shared->mas7_3 >> 32;
168 175 kvmppc_set_gpr(vcpu, rt, val);
176 break;
169 case SPRN_TLB0CFG: 177 case SPRN_TLB0CFG:
170 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; 178 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break;
171 case SPRN_TLB1CFG: 179 case SPRN_TLB1CFG:
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 13c432ea2fa8..6e53e4164de1 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -12,12 +12,19 @@
12 * published by the Free Software Foundation. 12 * published by the Free Software Foundation.
13 */ 13 */
14 14
15#include <linux/kernel.h>
15#include <linux/types.h> 16#include <linux/types.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/string.h> 18#include <linux/string.h>
18#include <linux/kvm.h> 19#include <linux/kvm.h>
19#include <linux/kvm_host.h> 20#include <linux/kvm_host.h>
20#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/log2.h>
23#include <linux/uaccess.h>
24#include <linux/sched.h>
25#include <linux/rwsem.h>
26#include <linux/vmalloc.h>
27#include <linux/hugetlb.h>
21#include <asm/kvm_ppc.h> 28#include <asm/kvm_ppc.h>
22#include <asm/kvm_e500.h> 29#include <asm/kvm_e500.h>
23 30
@@ -26,7 +33,7 @@
26#include "trace.h" 33#include "trace.h"
27#include "timing.h" 34#include "timing.h"
28 35
29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) 36#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
30 37
31struct id { 38struct id {
32 unsigned long val; 39 unsigned long val;
@@ -63,7 +70,14 @@ static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
63 * The valid range of shadow ID is [1..255] */ 70 * The valid range of shadow ID is [1..255] */
64static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid); 71static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
65 72
66static unsigned int tlb1_entry_num; 73static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
74
75static struct kvm_book3e_206_tlb_entry *get_entry(
76 struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
77{
78 int offset = vcpu_e500->gtlb_offset[tlbsel];
79 return &vcpu_e500->gtlb_arch[offset + entry];
80}
67 81
68/* 82/*
69 * Allocate a free shadow id and setup a valid sid mapping in given entry. 83 * Allocate a free shadow id and setup a valid sid mapping in given entry.
@@ -116,13 +130,11 @@ static inline int local_sid_lookup(struct id *entry)
116 return -1; 130 return -1;
117} 131}
118 132
119/* Invalidate all id mappings on local core */ 133/* Invalidate all id mappings on local core -- call with preempt disabled */
120static inline void local_sid_destroy_all(void) 134static inline void local_sid_destroy_all(void)
121{ 135{
122 preempt_disable();
123 __get_cpu_var(pcpu_last_used_sid) = 0; 136 __get_cpu_var(pcpu_last_used_sid) = 0;
124 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); 137 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
125 preempt_enable();
126} 138}
127 139
128static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) 140static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
@@ -218,34 +230,13 @@ void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
218 preempt_enable(); 230 preempt_enable();
219} 231}
220 232
221void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) 233static inline unsigned int gtlb0_get_next_victim(
222{
223 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
224 struct tlbe *tlbe;
225 int i, tlbsel;
226
227 printk("| %8s | %8s | %8s | %8s | %8s |\n",
228 "nr", "mas1", "mas2", "mas3", "mas7");
229
230 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
231 printk("Guest TLB%d:\n", tlbsel);
232 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
233 tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
234 if (tlbe->mas1 & MAS1_VALID)
235 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
236 tlbsel, i, tlbe->mas1, tlbe->mas2,
237 tlbe->mas3, tlbe->mas7);
238 }
239 }
240}
241
242static inline unsigned int tlb0_get_next_victim(
243 struct kvmppc_vcpu_e500 *vcpu_e500) 234 struct kvmppc_vcpu_e500 *vcpu_e500)
244{ 235{
245 unsigned int victim; 236 unsigned int victim;
246 237
247 victim = vcpu_e500->gtlb_nv[0]++; 238 victim = vcpu_e500->gtlb_nv[0]++;
248 if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM)) 239 if (unlikely(vcpu_e500->gtlb_nv[0] >= vcpu_e500->gtlb_params[0].ways))
249 vcpu_e500->gtlb_nv[0] = 0; 240 vcpu_e500->gtlb_nv[0] = 0;
250 241
251 return victim; 242 return victim;
@@ -254,12 +245,12 @@ static inline unsigned int tlb0_get_next_victim(
254static inline unsigned int tlb1_max_shadow_size(void) 245static inline unsigned int tlb1_max_shadow_size(void)
255{ 246{
256 /* reserve one entry for magic page */ 247 /* reserve one entry for magic page */
257 return tlb1_entry_num - tlbcam_index - 1; 248 return host_tlb_params[1].entries - tlbcam_index - 1;
258} 249}
259 250
260static inline int tlbe_is_writable(struct tlbe *tlbe) 251static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
261{ 252{
262 return tlbe->mas3 & (MAS3_SW|MAS3_UW); 253 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
263} 254}
264 255
265static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode) 256static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
@@ -290,40 +281,66 @@ static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
290/* 281/*
291 * writing shadow tlb entry to host TLB 282 * writing shadow tlb entry to host TLB
292 */ 283 */
293static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0) 284static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
285 uint32_t mas0)
294{ 286{
295 unsigned long flags; 287 unsigned long flags;
296 288
297 local_irq_save(flags); 289 local_irq_save(flags);
298 mtspr(SPRN_MAS0, mas0); 290 mtspr(SPRN_MAS0, mas0);
299 mtspr(SPRN_MAS1, stlbe->mas1); 291 mtspr(SPRN_MAS1, stlbe->mas1);
300 mtspr(SPRN_MAS2, stlbe->mas2); 292 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
301 mtspr(SPRN_MAS3, stlbe->mas3); 293 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
302 mtspr(SPRN_MAS7, stlbe->mas7); 294 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
303 asm volatile("isync; tlbwe" : : : "memory"); 295 asm volatile("isync; tlbwe" : : : "memory");
304 local_irq_restore(flags); 296 local_irq_restore(flags);
297
298 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
299 stlbe->mas2, stlbe->mas7_3);
300}
301
302/*
303 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
304 *
305 * We don't care about the address we're searching for, other than that it's
306 * in the right set and is not present in the TLB. Using a zero PID and a
307 * userspace address means we don't have to set and then restore MAS5, or
308 * calculate a proper MAS6 value.
309 */
310static u32 get_host_mas0(unsigned long eaddr)
311{
312 unsigned long flags;
313 u32 mas0;
314
315 local_irq_save(flags);
316 mtspr(SPRN_MAS6, 0);
317 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
318 mas0 = mfspr(SPRN_MAS0);
319 local_irq_restore(flags);
320
321 return mas0;
305} 322}
306 323
324/* sesel is for tlb1 only */
307static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 325static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
308 int tlbsel, int esel, struct tlbe *stlbe) 326 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
309{ 327{
328 u32 mas0;
329
310 if (tlbsel == 0) { 330 if (tlbsel == 0) {
311 __write_host_tlbe(stlbe, 331 mas0 = get_host_mas0(stlbe->mas2);
312 MAS0_TLBSEL(0) | 332 __write_host_tlbe(stlbe, mas0);
313 MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
314 } else { 333 } else {
315 __write_host_tlbe(stlbe, 334 __write_host_tlbe(stlbe,
316 MAS0_TLBSEL(1) | 335 MAS0_TLBSEL(1) |
317 MAS0_ESEL(to_htlb1_esel(esel))); 336 MAS0_ESEL(to_htlb1_esel(sesel)));
318 } 337 }
319 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
320 stlbe->mas3, stlbe->mas7);
321} 338}
322 339
323void kvmppc_map_magic(struct kvm_vcpu *vcpu) 340void kvmppc_map_magic(struct kvm_vcpu *vcpu)
324{ 341{
325 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 342 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
326 struct tlbe magic; 343 struct kvm_book3e_206_tlb_entry magic;
327 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; 344 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
328 unsigned int stid; 345 unsigned int stid;
329 pfn_t pfn; 346 pfn_t pfn;
@@ -337,9 +354,9 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
337 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) | 354 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
338 MAS1_TSIZE(BOOK3E_PAGESZ_4K); 355 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
339 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M; 356 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
340 magic.mas3 = (pfn << PAGE_SHIFT) | 357 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
341 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR; 358 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
342 magic.mas7 = pfn >> (32 - PAGE_SHIFT); 359 magic.mas8 = 0;
343 360
344 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 361 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
345 preempt_enable(); 362 preempt_enable();
@@ -357,10 +374,11 @@ void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
357{ 374{
358} 375}
359 376
360static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, 377static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
361 int tlbsel, int esel) 378 int tlbsel, int esel)
362{ 379{
363 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 380 struct kvm_book3e_206_tlb_entry *gtlbe =
381 get_entry(vcpu_e500, tlbsel, esel);
364 struct vcpu_id_table *idt = vcpu_e500->idt; 382 struct vcpu_id_table *idt = vcpu_e500->idt;
365 unsigned int pr, tid, ts, pid; 383 unsigned int pr, tid, ts, pid;
366 u32 val, eaddr; 384 u32 val, eaddr;
@@ -414,25 +432,57 @@ static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
414 preempt_enable(); 432 preempt_enable();
415} 433}
416 434
435static int tlb0_set_base(gva_t addr, int sets, int ways)
436{
437 int set_base;
438
439 set_base = (addr >> PAGE_SHIFT) & (sets - 1);
440 set_base *= ways;
441
442 return set_base;
443}
444
445static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
446{
447 return tlb0_set_base(addr, vcpu_e500->gtlb_params[0].sets,
448 vcpu_e500->gtlb_params[0].ways);
449}
450
451static unsigned int get_tlb_esel(struct kvm_vcpu *vcpu, int tlbsel)
452{
453 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
454 int esel = get_tlb_esel_bit(vcpu);
455
456 if (tlbsel == 0) {
457 esel &= vcpu_e500->gtlb_params[0].ways - 1;
458 esel += gtlb0_set_base(vcpu_e500, vcpu->arch.shared->mas2);
459 } else {
460 esel &= vcpu_e500->gtlb_params[tlbsel].entries - 1;
461 }
462
463 return esel;
464}
465
417/* Search the guest TLB for a matching entry. */ 466/* Search the guest TLB for a matching entry. */
418static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500, 467static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
419 gva_t eaddr, int tlbsel, unsigned int pid, int as) 468 gva_t eaddr, int tlbsel, unsigned int pid, int as)
420{ 469{
421 int size = vcpu_e500->gtlb_size[tlbsel]; 470 int size = vcpu_e500->gtlb_params[tlbsel].entries;
422 int set_base; 471 unsigned int set_base, offset;
423 int i; 472 int i;
424 473
425 if (tlbsel == 0) { 474 if (tlbsel == 0) {
426 int mask = size / KVM_E500_TLB0_WAY_NUM - 1; 475 set_base = gtlb0_set_base(vcpu_e500, eaddr);
427 set_base = (eaddr >> PAGE_SHIFT) & mask; 476 size = vcpu_e500->gtlb_params[0].ways;
428 set_base *= KVM_E500_TLB0_WAY_NUM;
429 size = KVM_E500_TLB0_WAY_NUM;
430 } else { 477 } else {
431 set_base = 0; 478 set_base = 0;
432 } 479 }
433 480
481 offset = vcpu_e500->gtlb_offset[tlbsel];
482
434 for (i = 0; i < size; i++) { 483 for (i = 0; i < size; i++) {
435 struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i]; 484 struct kvm_book3e_206_tlb_entry *tlbe =
485 &vcpu_e500->gtlb_arch[offset + set_base + i];
436 unsigned int tid; 486 unsigned int tid;
437 487
438 if (eaddr < get_tlb_eaddr(tlbe)) 488 if (eaddr < get_tlb_eaddr(tlbe))
@@ -457,27 +507,55 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
457 return -1; 507 return -1;
458} 508}
459 509
460static inline void kvmppc_e500_priv_setup(struct tlbe_priv *priv, 510static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
461 struct tlbe *gtlbe, 511 struct kvm_book3e_206_tlb_entry *gtlbe,
462 pfn_t pfn) 512 pfn_t pfn)
463{ 513{
464 priv->pfn = pfn; 514 ref->pfn = pfn;
465 priv->flags = E500_TLB_VALID; 515 ref->flags = E500_TLB_VALID;
466 516
467 if (tlbe_is_writable(gtlbe)) 517 if (tlbe_is_writable(gtlbe))
468 priv->flags |= E500_TLB_DIRTY; 518 ref->flags |= E500_TLB_DIRTY;
469} 519}
470 520
471static inline void kvmppc_e500_priv_release(struct tlbe_priv *priv) 521static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
472{ 522{
473 if (priv->flags & E500_TLB_VALID) { 523 if (ref->flags & E500_TLB_VALID) {
474 if (priv->flags & E500_TLB_DIRTY) 524 if (ref->flags & E500_TLB_DIRTY)
475 kvm_release_pfn_dirty(priv->pfn); 525 kvm_release_pfn_dirty(ref->pfn);
476 else 526 else
477 kvm_release_pfn_clean(priv->pfn); 527 kvm_release_pfn_clean(ref->pfn);
528
529 ref->flags = 0;
530 }
531}
532
533static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
534{
535 int tlbsel = 0;
536 int i;
537
538 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
539 struct tlbe_ref *ref =
540 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
541 kvmppc_e500_ref_release(ref);
542 }
543}
544
545static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
546{
547 int stlbsel = 1;
548 int i;
549
550 kvmppc_e500_id_table_reset_all(vcpu_e500);
478 551
479 priv->flags = 0; 552 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
553 struct tlbe_ref *ref =
554 &vcpu_e500->tlb_refs[stlbsel][i];
555 kvmppc_e500_ref_release(ref);
480 } 556 }
557
558 clear_tlb_privs(vcpu_e500);
481} 559}
482 560
483static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu, 561static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
@@ -488,59 +566,54 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
488 int tlbsel; 566 int tlbsel;
489 567
490 /* since we only have two TLBs, only lower bit is used. */ 568 /* since we only have two TLBs, only lower bit is used. */
491 tlbsel = (vcpu_e500->mas4 >> 28) & 0x1; 569 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
492 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; 570 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
493 pidsel = (vcpu_e500->mas4 >> 16) & 0xf; 571 pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
494 tsized = (vcpu_e500->mas4 >> 7) & 0x1f; 572 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
495 573
496 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) 574 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
497 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 575 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
498 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) 576 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
499 | MAS1_TID(vcpu_e500->pid[pidsel]) 577 | MAS1_TID(vcpu_e500->pid[pidsel])
500 | MAS1_TSIZE(tsized); 578 | MAS1_TSIZE(tsized);
501 vcpu_e500->mas2 = (eaddr & MAS2_EPN) 579 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
502 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK); 580 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
503 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; 581 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
504 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1) 582 vcpu->arch.shared->mas6 = (vcpu->arch.shared->mas6 & MAS6_SPID1)
505 | (get_cur_pid(vcpu) << 16) 583 | (get_cur_pid(vcpu) << 16)
506 | (as ? MAS6_SAS : 0); 584 | (as ? MAS6_SAS : 0);
507 vcpu_e500->mas7 = 0;
508} 585}
509 586
510static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500, 587/* TID must be supplied by the caller */
511 struct tlbe *gtlbe, int tsize, 588static inline void kvmppc_e500_setup_stlbe(
512 struct tlbe_priv *priv, 589 struct kvmppc_vcpu_e500 *vcpu_e500,
513 u64 gvaddr, struct tlbe *stlbe) 590 struct kvm_book3e_206_tlb_entry *gtlbe,
591 int tsize, struct tlbe_ref *ref, u64 gvaddr,
592 struct kvm_book3e_206_tlb_entry *stlbe)
514{ 593{
515 pfn_t pfn = priv->pfn; 594 pfn_t pfn = ref->pfn;
516 unsigned int stid;
517 595
518 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe), 596 BUG_ON(!(ref->flags & E500_TLB_VALID));
519 get_tlb_tid(gtlbe),
520 get_cur_pr(&vcpu_e500->vcpu), 0);
521 597
522 /* Force TS=1 IPROT=0 for all guest mappings. */ 598 /* Force TS=1 IPROT=0 for all guest mappings. */
523 stlbe->mas1 = MAS1_TSIZE(tsize) 599 stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
524 | MAS1_TID(stid) | MAS1_TS | MAS1_VALID;
525 stlbe->mas2 = (gvaddr & MAS2_EPN) 600 stlbe->mas2 = (gvaddr & MAS2_EPN)
526 | e500_shadow_mas2_attrib(gtlbe->mas2, 601 | e500_shadow_mas2_attrib(gtlbe->mas2,
527 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 602 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
528 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN) 603 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT)
529 | e500_shadow_mas3_attrib(gtlbe->mas3, 604 | e500_shadow_mas3_attrib(gtlbe->mas7_3,
530 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 605 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
531 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
532} 606}
533 607
534
535static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 608static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
536 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel, 609 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
537 struct tlbe *stlbe) 610 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
611 struct tlbe_ref *ref)
538{ 612{
539 struct kvm_memory_slot *slot; 613 struct kvm_memory_slot *slot;
540 unsigned long pfn, hva; 614 unsigned long pfn, hva;
541 int pfnmap = 0; 615 int pfnmap = 0;
542 int tsize = BOOK3E_PAGESZ_4K; 616 int tsize = BOOK3E_PAGESZ_4K;
543 struct tlbe_priv *priv;
544 617
545 /* 618 /*
546 * Translate guest physical to true physical, acquiring 619 * Translate guest physical to true physical, acquiring
@@ -621,12 +694,31 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
621 pfn &= ~(tsize_pages - 1); 694 pfn &= ~(tsize_pages - 1);
622 break; 695 break;
623 } 696 }
697 } else if (vma && hva >= vma->vm_start &&
698 (vma->vm_flags & VM_HUGETLB)) {
699 unsigned long psize = vma_kernel_pagesize(vma);
700
701 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
702 MAS1_TSIZE_SHIFT;
703
704 /*
705 * Take the largest page size that satisfies both host
706 * and guest mapping
707 */
708 tsize = min(__ilog2(psize) - 10, tsize);
709
710 /*
711 * e500 doesn't implement the lowest tsize bit,
712 * or 1K pages.
713 */
714 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
624 } 715 }
625 716
626 up_read(&current->mm->mmap_sem); 717 up_read(&current->mm->mmap_sem);
627 } 718 }
628 719
629 if (likely(!pfnmap)) { 720 if (likely(!pfnmap)) {
721 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
630 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn); 722 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
631 if (is_error_pfn(pfn)) { 723 if (is_error_pfn(pfn)) {
632 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n", 724 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
@@ -634,45 +726,52 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
634 kvm_release_pfn_clean(pfn); 726 kvm_release_pfn_clean(pfn);
635 return; 727 return;
636 } 728 }
729
730 /* Align guest and physical address to page map boundaries */
731 pfn &= ~(tsize_pages - 1);
732 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
637 } 733 }
638 734
639 /* Drop old priv and setup new one. */ 735 /* Drop old ref and setup new one. */
640 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 736 kvmppc_e500_ref_release(ref);
641 kvmppc_e500_priv_release(priv); 737 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
642 kvmppc_e500_priv_setup(priv, gtlbe, pfn);
643 738
644 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, priv, gvaddr, stlbe); 739 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
645} 740}
646 741
647/* XXX only map the one-one case, for now use TLB0 */ 742/* XXX only map the one-one case, for now use TLB0 */
648static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, 743static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
649 int esel, struct tlbe *stlbe) 744 int esel,
745 struct kvm_book3e_206_tlb_entry *stlbe)
650{ 746{
651 struct tlbe *gtlbe; 747 struct kvm_book3e_206_tlb_entry *gtlbe;
748 struct tlbe_ref *ref;
652 749
653 gtlbe = &vcpu_e500->gtlb_arch[0][esel]; 750 gtlbe = get_entry(vcpu_e500, 0, esel);
751 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
654 752
655 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe), 753 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
656 get_tlb_raddr(gtlbe) >> PAGE_SHIFT, 754 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
657 gtlbe, 0, esel, stlbe); 755 gtlbe, 0, stlbe, ref);
658
659 return esel;
660} 756}
661 757
662/* Caller must ensure that the specified guest TLB entry is safe to insert into 758/* Caller must ensure that the specified guest TLB entry is safe to insert into
663 * the shadow TLB. */ 759 * the shadow TLB. */
664/* XXX for both one-one and one-to-many , for now use TLB1 */ 760/* XXX for both one-one and one-to-many , for now use TLB1 */
665static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, 761static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
666 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe) 762 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
763 struct kvm_book3e_206_tlb_entry *stlbe)
667{ 764{
765 struct tlbe_ref *ref;
668 unsigned int victim; 766 unsigned int victim;
669 767
670 victim = vcpu_e500->gtlb_nv[1]++; 768 victim = vcpu_e500->host_tlb1_nv++;
671 769
672 if (unlikely(vcpu_e500->gtlb_nv[1] >= tlb1_max_shadow_size())) 770 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
673 vcpu_e500->gtlb_nv[1] = 0; 771 vcpu_e500->host_tlb1_nv = 0;
674 772
675 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim, stlbe); 773 ref = &vcpu_e500->tlb_refs[1][victim];
774 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
676 775
677 return victim; 776 return victim;
678} 777}
@@ -689,7 +788,8 @@ static inline int kvmppc_e500_gtlbe_invalidate(
689 struct kvmppc_vcpu_e500 *vcpu_e500, 788 struct kvmppc_vcpu_e500 *vcpu_e500,
690 int tlbsel, int esel) 789 int tlbsel, int esel)
691{ 790{
692 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 791 struct kvm_book3e_206_tlb_entry *gtlbe =
792 get_entry(vcpu_e500, tlbsel, esel);
693 793
694 if (unlikely(get_tlb_iprot(gtlbe))) 794 if (unlikely(get_tlb_iprot(gtlbe)))
695 return -1; 795 return -1;
@@ -704,10 +804,10 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
704 int esel; 804 int esel;
705 805
706 if (value & MMUCSR0_TLB0FI) 806 if (value & MMUCSR0_TLB0FI)
707 for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++) 807 for (esel = 0; esel < vcpu_e500->gtlb_params[0].entries; esel++)
708 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel); 808 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
709 if (value & MMUCSR0_TLB1FI) 809 if (value & MMUCSR0_TLB1FI)
710 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++) 810 for (esel = 0; esel < vcpu_e500->gtlb_params[1].entries; esel++)
711 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); 811 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
712 812
713 /* Invalidate all vcpu id mappings */ 813 /* Invalidate all vcpu id mappings */
@@ -732,7 +832,8 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
732 832
733 if (ia) { 833 if (ia) {
734 /* invalidate all entries */ 834 /* invalidate all entries */
735 for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++) 835 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries;
836 esel++)
736 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel); 837 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
737 } else { 838 } else {
738 ea &= 0xfffff000; 839 ea &= 0xfffff000;
@@ -752,18 +853,17 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
752{ 853{
753 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 854 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
754 int tlbsel, esel; 855 int tlbsel, esel;
755 struct tlbe *gtlbe; 856 struct kvm_book3e_206_tlb_entry *gtlbe;
756 857
757 tlbsel = get_tlb_tlbsel(vcpu_e500); 858 tlbsel = get_tlb_tlbsel(vcpu);
758 esel = get_tlb_esel(vcpu_e500, tlbsel); 859 esel = get_tlb_esel(vcpu, tlbsel);
759 860
760 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 861 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
761 vcpu_e500->mas0 &= ~MAS0_NV(~0); 862 vcpu->arch.shared->mas0 &= ~MAS0_NV(~0);
762 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 863 vcpu->arch.shared->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
763 vcpu_e500->mas1 = gtlbe->mas1; 864 vcpu->arch.shared->mas1 = gtlbe->mas1;
764 vcpu_e500->mas2 = gtlbe->mas2; 865 vcpu->arch.shared->mas2 = gtlbe->mas2;
765 vcpu_e500->mas3 = gtlbe->mas3; 866 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
766 vcpu_e500->mas7 = gtlbe->mas7;
767 867
768 return EMULATE_DONE; 868 return EMULATE_DONE;
769} 869}
@@ -771,10 +871,10 @@ int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
771int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) 871int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
772{ 872{
773 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 873 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
774 int as = !!get_cur_sas(vcpu_e500); 874 int as = !!get_cur_sas(vcpu);
775 unsigned int pid = get_cur_spid(vcpu_e500); 875 unsigned int pid = get_cur_spid(vcpu);
776 int esel, tlbsel; 876 int esel, tlbsel;
777 struct tlbe *gtlbe = NULL; 877 struct kvm_book3e_206_tlb_entry *gtlbe = NULL;
778 gva_t ea; 878 gva_t ea;
779 879
780 ea = kvmppc_get_gpr(vcpu, rb); 880 ea = kvmppc_get_gpr(vcpu, rb);
@@ -782,70 +882,90 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
782 for (tlbsel = 0; tlbsel < 2; tlbsel++) { 882 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
783 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as); 883 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
784 if (esel >= 0) { 884 if (esel >= 0) {
785 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 885 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
786 break; 886 break;
787 } 887 }
788 } 888 }
789 889
790 if (gtlbe) { 890 if (gtlbe) {
791 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel) 891 esel &= vcpu_e500->gtlb_params[tlbsel].ways - 1;
892
893 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
792 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 894 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
793 vcpu_e500->mas1 = gtlbe->mas1; 895 vcpu->arch.shared->mas1 = gtlbe->mas1;
794 vcpu_e500->mas2 = gtlbe->mas2; 896 vcpu->arch.shared->mas2 = gtlbe->mas2;
795 vcpu_e500->mas3 = gtlbe->mas3; 897 vcpu->arch.shared->mas7_3 = gtlbe->mas7_3;
796 vcpu_e500->mas7 = gtlbe->mas7;
797 } else { 898 } else {
798 int victim; 899 int victim;
799 900
800 /* since we only have two TLBs, only lower bit is used. */ 901 /* since we only have two TLBs, only lower bit is used. */
801 tlbsel = vcpu_e500->mas4 >> 28 & 0x1; 902 tlbsel = vcpu->arch.shared->mas4 >> 28 & 0x1;
802 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0; 903 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
803 904
804 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) 905 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel)
906 | MAS0_ESEL(victim)
805 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 907 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
806 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0) 908 vcpu->arch.shared->mas1 =
807 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0)) 909 (vcpu->arch.shared->mas6 & MAS6_SPID0)
808 | (vcpu_e500->mas4 & MAS4_TSIZED(~0)); 910 | (vcpu->arch.shared->mas6 & (MAS6_SAS ? MAS1_TS : 0))
809 vcpu_e500->mas2 &= MAS2_EPN; 911 | (vcpu->arch.shared->mas4 & MAS4_TSIZED(~0));
810 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK; 912 vcpu->arch.shared->mas2 &= MAS2_EPN;
811 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3; 913 vcpu->arch.shared->mas2 |= vcpu->arch.shared->mas4 &
812 vcpu_e500->mas7 = 0; 914 MAS2_ATTRIB_MASK;
915 vcpu->arch.shared->mas7_3 &= MAS3_U0 | MAS3_U1 |
916 MAS3_U2 | MAS3_U3;
813 } 917 }
814 918
815 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); 919 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
816 return EMULATE_DONE; 920 return EMULATE_DONE;
817} 921}
818 922
923/* sesel is for tlb1 only */
924static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
925 struct kvm_book3e_206_tlb_entry *gtlbe,
926 struct kvm_book3e_206_tlb_entry *stlbe,
927 int stlbsel, int sesel)
928{
929 int stid;
930
931 preempt_disable();
932 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
933 get_tlb_tid(gtlbe),
934 get_cur_pr(&vcpu_e500->vcpu), 0);
935
936 stlbe->mas1 |= MAS1_TID(stid);
937 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
938 preempt_enable();
939}
940
819int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) 941int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
820{ 942{
821 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 943 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
822 struct tlbe *gtlbe; 944 struct kvm_book3e_206_tlb_entry *gtlbe;
823 int tlbsel, esel; 945 int tlbsel, esel;
824 946
825 tlbsel = get_tlb_tlbsel(vcpu_e500); 947 tlbsel = get_tlb_tlbsel(vcpu);
826 esel = get_tlb_esel(vcpu_e500, tlbsel); 948 esel = get_tlb_esel(vcpu, tlbsel);
827 949
828 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 950 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
829 951
830 if (get_tlb_v(gtlbe)) 952 if (get_tlb_v(gtlbe))
831 kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel); 953 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
832 954
833 gtlbe->mas1 = vcpu_e500->mas1; 955 gtlbe->mas1 = vcpu->arch.shared->mas1;
834 gtlbe->mas2 = vcpu_e500->mas2; 956 gtlbe->mas2 = vcpu->arch.shared->mas2;
835 gtlbe->mas3 = vcpu_e500->mas3; 957 gtlbe->mas7_3 = vcpu->arch.shared->mas7_3;
836 gtlbe->mas7 = vcpu_e500->mas7;
837 958
838 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2, 959 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
839 gtlbe->mas3, gtlbe->mas7); 960 gtlbe->mas2, gtlbe->mas7_3);
840 961
841 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 962 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
842 if (tlbe_is_host_safe(vcpu, gtlbe)) { 963 if (tlbe_is_host_safe(vcpu, gtlbe)) {
843 struct tlbe stlbe; 964 struct kvm_book3e_206_tlb_entry stlbe;
844 int stlbsel, sesel; 965 int stlbsel, sesel;
845 u64 eaddr; 966 u64 eaddr;
846 u64 raddr; 967 u64 raddr;
847 968
848 preempt_disable();
849 switch (tlbsel) { 969 switch (tlbsel) {
850 case 0: 970 case 0:
851 /* TLB0 */ 971 /* TLB0 */
@@ -853,7 +973,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
853 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K); 973 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
854 974
855 stlbsel = 0; 975 stlbsel = 0;
856 sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe); 976 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
977 sesel = 0; /* unused */
857 978
858 break; 979 break;
859 980
@@ -874,8 +995,8 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
874 default: 995 default:
875 BUG(); 996 BUG();
876 } 997 }
877 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe); 998
878 preempt_enable(); 999 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
879 } 1000 }
880 1001
881 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); 1002 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
@@ -914,9 +1035,11 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
914 gva_t eaddr) 1035 gva_t eaddr)
915{ 1036{
916 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 1037 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
917 struct tlbe *gtlbe = 1038 struct kvm_book3e_206_tlb_entry *gtlbe;
918 &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)]; 1039 u64 pgmask;
919 u64 pgmask = get_tlb_bytes(gtlbe) - 1; 1040
1041 gtlbe = get_entry(vcpu_e500, tlbsel_of(index), esel_of(index));
1042 pgmask = get_tlb_bytes(gtlbe) - 1;
920 1043
921 return get_tlb_raddr(gtlbe) | (eaddr & pgmask); 1044 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
922} 1045}
@@ -930,22 +1053,21 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
930{ 1053{
931 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 1054 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
932 struct tlbe_priv *priv; 1055 struct tlbe_priv *priv;
933 struct tlbe *gtlbe, stlbe; 1056 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
934 int tlbsel = tlbsel_of(index); 1057 int tlbsel = tlbsel_of(index);
935 int esel = esel_of(index); 1058 int esel = esel_of(index);
936 int stlbsel, sesel; 1059 int stlbsel, sesel;
937 1060
938 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel]; 1061 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
939 1062
940 preempt_disable();
941 switch (tlbsel) { 1063 switch (tlbsel) {
942 case 0: 1064 case 0:
943 stlbsel = 0; 1065 stlbsel = 0;
944 sesel = esel; 1066 sesel = 0; /* unused */
945 priv = &vcpu_e500->gtlb_priv[stlbsel][sesel]; 1067 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
946 1068
947 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K, 1069 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
948 priv, eaddr, &stlbe); 1070 &priv->ref, eaddr, &stlbe);
949 break; 1071 break;
950 1072
951 case 1: { 1073 case 1: {
@@ -962,8 +1084,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
962 break; 1084 break;
963 } 1085 }
964 1086
965 write_host_tlbe(vcpu_e500, stlbsel, sesel, &stlbe); 1087 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
966 preempt_enable();
967} 1088}
968 1089
969int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, 1090int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
@@ -993,85 +1114,279 @@ void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
993 1114
994void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) 1115void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
995{ 1116{
996 struct tlbe *tlbe; 1117 struct kvm_book3e_206_tlb_entry *tlbe;
997 1118
998 /* Insert large initial mapping for guest. */ 1119 /* Insert large initial mapping for guest. */
999 tlbe = &vcpu_e500->gtlb_arch[1][0]; 1120 tlbe = get_entry(vcpu_e500, 1, 0);
1000 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M); 1121 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
1001 tlbe->mas2 = 0; 1122 tlbe->mas2 = 0;
1002 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK; 1123 tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
1003 tlbe->mas7 = 0;
1004 1124
1005 /* 4K map for serial output. Used by kernel wrapper. */ 1125 /* 4K map for serial output. Used by kernel wrapper. */
1006 tlbe = &vcpu_e500->gtlb_arch[1][1]; 1126 tlbe = get_entry(vcpu_e500, 1, 1);
1007 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K); 1127 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
1008 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G; 1128 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
1009 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK; 1129 tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1010 tlbe->mas7 = 0; 1130}
1131
1132static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
1133{
1134 int i;
1135
1136 clear_tlb_refs(vcpu_e500);
1137 kfree(vcpu_e500->gtlb_priv[0]);
1138 kfree(vcpu_e500->gtlb_priv[1]);
1139
1140 if (vcpu_e500->shared_tlb_pages) {
1141 vfree((void *)(round_down((uintptr_t)vcpu_e500->gtlb_arch,
1142 PAGE_SIZE)));
1143
1144 for (i = 0; i < vcpu_e500->num_shared_tlb_pages; i++) {
1145 set_page_dirty_lock(vcpu_e500->shared_tlb_pages[i]);
1146 put_page(vcpu_e500->shared_tlb_pages[i]);
1147 }
1148
1149 vcpu_e500->num_shared_tlb_pages = 0;
1150 vcpu_e500->shared_tlb_pages = NULL;
1151 } else {
1152 kfree(vcpu_e500->gtlb_arch);
1153 }
1154
1155 vcpu_e500->gtlb_arch = NULL;
1156}
1157
1158int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1159 struct kvm_config_tlb *cfg)
1160{
1161 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1162 struct kvm_book3e_206_tlb_params params;
1163 char *virt;
1164 struct page **pages;
1165 struct tlbe_priv *privs[2] = {};
1166 size_t array_len;
1167 u32 sets;
1168 int num_pages, ret, i;
1169
1170 if (cfg->mmu_type != KVM_MMU_FSL_BOOKE_NOHV)
1171 return -EINVAL;
1172
1173 if (copy_from_user(&params, (void __user *)(uintptr_t)cfg->params,
1174 sizeof(params)))
1175 return -EFAULT;
1176
1177 if (params.tlb_sizes[1] > 64)
1178 return -EINVAL;
1179 if (params.tlb_ways[1] != params.tlb_sizes[1])
1180 return -EINVAL;
1181 if (params.tlb_sizes[2] != 0 || params.tlb_sizes[3] != 0)
1182 return -EINVAL;
1183 if (params.tlb_ways[2] != 0 || params.tlb_ways[3] != 0)
1184 return -EINVAL;
1185
1186 if (!is_power_of_2(params.tlb_ways[0]))
1187 return -EINVAL;
1188
1189 sets = params.tlb_sizes[0] >> ilog2(params.tlb_ways[0]);
1190 if (!is_power_of_2(sets))
1191 return -EINVAL;
1192
1193 array_len = params.tlb_sizes[0] + params.tlb_sizes[1];
1194 array_len *= sizeof(struct kvm_book3e_206_tlb_entry);
1195
1196 if (cfg->array_len < array_len)
1197 return -EINVAL;
1198
1199 num_pages = DIV_ROUND_UP(cfg->array + array_len - 1, PAGE_SIZE) -
1200 cfg->array / PAGE_SIZE;
1201 pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
1202 if (!pages)
1203 return -ENOMEM;
1204
1205 ret = get_user_pages_fast(cfg->array, num_pages, 1, pages);
1206 if (ret < 0)
1207 goto err_pages;
1208
1209 if (ret != num_pages) {
1210 num_pages = ret;
1211 ret = -EFAULT;
1212 goto err_put_page;
1213 }
1214
1215 virt = vmap(pages, num_pages, VM_MAP, PAGE_KERNEL);
1216 if (!virt)
1217 goto err_put_page;
1218
1219 privs[0] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[0],
1220 GFP_KERNEL);
1221 privs[1] = kzalloc(sizeof(struct tlbe_priv) * params.tlb_sizes[1],
1222 GFP_KERNEL);
1223
1224 if (!privs[0] || !privs[1])
1225 goto err_put_page;
1226
1227 free_gtlb(vcpu_e500);
1228
1229 vcpu_e500->gtlb_priv[0] = privs[0];
1230 vcpu_e500->gtlb_priv[1] = privs[1];
1231
1232 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
1233 (virt + (cfg->array & (PAGE_SIZE - 1)));
1234
1235 vcpu_e500->gtlb_params[0].entries = params.tlb_sizes[0];
1236 vcpu_e500->gtlb_params[1].entries = params.tlb_sizes[1];
1237
1238 vcpu_e500->gtlb_offset[0] = 0;
1239 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
1240
1241 vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1242 if (params.tlb_sizes[0] <= 2048)
1243 vcpu_e500->tlb0cfg |= params.tlb_sizes[0];
1244 vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
1245
1246 vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1247 vcpu_e500->tlb1cfg |= params.tlb_sizes[1];
1248 vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
1249
1250 vcpu_e500->shared_tlb_pages = pages;
1251 vcpu_e500->num_shared_tlb_pages = num_pages;
1252
1253 vcpu_e500->gtlb_params[0].ways = params.tlb_ways[0];
1254 vcpu_e500->gtlb_params[0].sets = sets;
1255
1256 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
1257 vcpu_e500->gtlb_params[1].sets = 1;
1258
1259 return 0;
1260
1261err_put_page:
1262 kfree(privs[0]);
1263 kfree(privs[1]);
1264
1265 for (i = 0; i < num_pages; i++)
1266 put_page(pages[i]);
1267
1268err_pages:
1269 kfree(pages);
1270 return ret;
1271}
1272
1273int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
1274 struct kvm_dirty_tlb *dirty)
1275{
1276 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1277
1278 clear_tlb_refs(vcpu_e500);
1279 return 0;
1011} 1280}
1012 1281
1013int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) 1282int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1014{ 1283{
1015 tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF; 1284 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
1016 1285 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
1017 vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE; 1286
1018 vcpu_e500->gtlb_arch[0] = 1287 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
1019 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL); 1288 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
1020 if (vcpu_e500->gtlb_arch[0] == NULL) 1289
1021 goto err_out; 1290 /*
1022 1291 * This should never happen on real e500 hardware, but is
1023 vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE; 1292 * architecturally possible -- e.g. in some weird nested
1024 vcpu_e500->gtlb_arch[1] = 1293 * virtualization case.
1025 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL); 1294 */
1026 if (vcpu_e500->gtlb_arch[1] == NULL) 1295 if (host_tlb_params[0].entries == 0 ||
1027 goto err_out_guest0; 1296 host_tlb_params[1].entries == 0) {
1028 1297 pr_err("%s: need to know host tlb size\n", __func__);
1029 vcpu_e500->gtlb_priv[0] = (struct tlbe_priv *) 1298 return -ENODEV;
1030 kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB0_SIZE, GFP_KERNEL); 1299 }
1031 if (vcpu_e500->gtlb_priv[0] == NULL) 1300
1032 goto err_out_guest1; 1301 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
1033 vcpu_e500->gtlb_priv[1] = (struct tlbe_priv *) 1302 TLBnCFG_ASSOC_SHIFT;
1034 kzalloc(sizeof(struct tlbe_priv) * KVM_E500_TLB1_SIZE, GFP_KERNEL); 1303 host_tlb_params[1].ways = host_tlb_params[1].entries;
1035 1304
1036 if (vcpu_e500->gtlb_priv[1] == NULL) 1305 if (!is_power_of_2(host_tlb_params[0].entries) ||
1037 goto err_out_priv0; 1306 !is_power_of_2(host_tlb_params[0].ways) ||
1307 host_tlb_params[0].entries < host_tlb_params[0].ways ||
1308 host_tlb_params[0].ways == 0) {
1309 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
1310 __func__, host_tlb_params[0].entries,
1311 host_tlb_params[0].ways);
1312 return -ENODEV;
1313 }
1314
1315 host_tlb_params[0].sets =
1316 host_tlb_params[0].entries / host_tlb_params[0].ways;
1317 host_tlb_params[1].sets = 1;
1318
1319 vcpu_e500->gtlb_params[0].entries = KVM_E500_TLB0_SIZE;
1320 vcpu_e500->gtlb_params[1].entries = KVM_E500_TLB1_SIZE;
1321
1322 vcpu_e500->gtlb_params[0].ways = KVM_E500_TLB0_WAY_NUM;
1323 vcpu_e500->gtlb_params[0].sets =
1324 KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
1325
1326 vcpu_e500->gtlb_params[1].ways = KVM_E500_TLB1_SIZE;
1327 vcpu_e500->gtlb_params[1].sets = 1;
1328
1329 vcpu_e500->gtlb_arch = kmalloc(entries * entry_size, GFP_KERNEL);
1330 if (!vcpu_e500->gtlb_arch)
1331 return -ENOMEM;
1332
1333 vcpu_e500->gtlb_offset[0] = 0;
1334 vcpu_e500->gtlb_offset[1] = KVM_E500_TLB0_SIZE;
1335
1336 vcpu_e500->tlb_refs[0] =
1337 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
1338 GFP_KERNEL);
1339 if (!vcpu_e500->tlb_refs[0])
1340 goto err;
1341
1342 vcpu_e500->tlb_refs[1] =
1343 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
1344 GFP_KERNEL);
1345 if (!vcpu_e500->tlb_refs[1])
1346 goto err;
1347
1348 vcpu_e500->gtlb_priv[0] = kzalloc(sizeof(struct tlbe_ref) *
1349 vcpu_e500->gtlb_params[0].entries,
1350 GFP_KERNEL);
1351 if (!vcpu_e500->gtlb_priv[0])
1352 goto err;
1353
1354 vcpu_e500->gtlb_priv[1] = kzalloc(sizeof(struct tlbe_ref) *
1355 vcpu_e500->gtlb_params[1].entries,
1356 GFP_KERNEL);
1357 if (!vcpu_e500->gtlb_priv[1])
1358 goto err;
1038 1359
1039 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) 1360 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
1040 goto err_out_priv1; 1361 goto err;
1041 1362
1042 /* Init TLB configuration register */ 1363 /* Init TLB configuration register */
1043 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL; 1364 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) &
1044 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0]; 1365 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1045 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL; 1366 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries;
1046 vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1]; 1367 vcpu_e500->tlb0cfg |=
1368 vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
1369
1370 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) &
1371 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1372 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries;
1373 vcpu_e500->tlb0cfg |=
1374 vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
1047 1375
1048 return 0; 1376 return 0;
1049 1377
1050err_out_priv1: 1378err:
1051 kfree(vcpu_e500->gtlb_priv[1]); 1379 free_gtlb(vcpu_e500);
1052err_out_priv0: 1380 kfree(vcpu_e500->tlb_refs[0]);
1053 kfree(vcpu_e500->gtlb_priv[0]); 1381 kfree(vcpu_e500->tlb_refs[1]);
1054err_out_guest1:
1055 kfree(vcpu_e500->gtlb_arch[1]);
1056err_out_guest0:
1057 kfree(vcpu_e500->gtlb_arch[0]);
1058err_out:
1059 return -1; 1382 return -1;
1060} 1383}
1061 1384
1062void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 1385void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1063{ 1386{
1064 int stlbsel, i; 1387 free_gtlb(vcpu_e500);
1065
1066 /* release all privs */
1067 for (stlbsel = 0; stlbsel < 2; stlbsel++)
1068 for (i = 0; i < vcpu_e500->gtlb_size[stlbsel]; i++) {
1069 struct tlbe_priv *priv =
1070 &vcpu_e500->gtlb_priv[stlbsel][i];
1071 kvmppc_e500_priv_release(priv);
1072 }
1073
1074 kvmppc_e500_id_table_free(vcpu_e500); 1388 kvmppc_e500_id_table_free(vcpu_e500);
1075 kfree(vcpu_e500->gtlb_arch[1]); 1389
1076 kfree(vcpu_e500->gtlb_arch[0]); 1390 kfree(vcpu_e500->tlb_refs[0]);
1391 kfree(vcpu_e500->tlb_refs[1]);
1077} 1392}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
index 59b88e99a235..5c6d2d7bf058 100644
--- a/arch/powerpc/kvm/e500_tlb.h
+++ b/arch/powerpc/kvm/e500_tlb.h
@@ -20,13 +20,9 @@
20#include <asm/tlb.h> 20#include <asm/tlb.h>
21#include <asm/kvm_e500.h> 21#include <asm/kvm_e500.h>
22 22
23#define KVM_E500_TLB0_WAY_SIZE_BIT 7 /* Fixed */ 23/* This geometry is the legacy default -- can be overridden by userspace */
24#define KVM_E500_TLB0_WAY_SIZE (1UL << KVM_E500_TLB0_WAY_SIZE_BIT) 24#define KVM_E500_TLB0_WAY_SIZE 128
25#define KVM_E500_TLB0_WAY_SIZE_MASK (KVM_E500_TLB0_WAY_SIZE - 1) 25#define KVM_E500_TLB0_WAY_NUM 2
26
27#define KVM_E500_TLB0_WAY_NUM_BIT 1 /* No greater than 7 */
28#define KVM_E500_TLB0_WAY_NUM (1UL << KVM_E500_TLB0_WAY_NUM_BIT)
29#define KVM_E500_TLB0_WAY_NUM_MASK (KVM_E500_TLB0_WAY_NUM - 1)
30 26
31#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM) 27#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
32#define KVM_E500_TLB1_SIZE 16 28#define KVM_E500_TLB1_SIZE 16
@@ -58,50 +54,54 @@ extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
58extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *); 54extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
59 55
60/* TLB helper functions */ 56/* TLB helper functions */
61static inline unsigned int get_tlb_size(const struct tlbe *tlbe) 57static inline unsigned int
58get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
62{ 59{
63 return (tlbe->mas1 >> 7) & 0x1f; 60 return (tlbe->mas1 >> 7) & 0x1f;
64} 61}
65 62
66static inline gva_t get_tlb_eaddr(const struct tlbe *tlbe) 63static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
67{ 64{
68 return tlbe->mas2 & 0xfffff000; 65 return tlbe->mas2 & 0xfffff000;
69} 66}
70 67
71static inline u64 get_tlb_bytes(const struct tlbe *tlbe) 68static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
72{ 69{
73 unsigned int pgsize = get_tlb_size(tlbe); 70 unsigned int pgsize = get_tlb_size(tlbe);
74 return 1ULL << 10 << pgsize; 71 return 1ULL << 10 << pgsize;
75} 72}
76 73
77static inline gva_t get_tlb_end(const struct tlbe *tlbe) 74static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
78{ 75{
79 u64 bytes = get_tlb_bytes(tlbe); 76 u64 bytes = get_tlb_bytes(tlbe);
80 return get_tlb_eaddr(tlbe) + bytes - 1; 77 return get_tlb_eaddr(tlbe) + bytes - 1;
81} 78}
82 79
83static inline u64 get_tlb_raddr(const struct tlbe *tlbe) 80static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
84{ 81{
85 u64 rpn = tlbe->mas7; 82 return tlbe->mas7_3 & ~0xfffULL;
86 return (rpn << 32) | (tlbe->mas3 & 0xfffff000);
87} 83}
88 84
89static inline unsigned int get_tlb_tid(const struct tlbe *tlbe) 85static inline unsigned int
86get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
90{ 87{
91 return (tlbe->mas1 >> 16) & 0xff; 88 return (tlbe->mas1 >> 16) & 0xff;
92} 89}
93 90
94static inline unsigned int get_tlb_ts(const struct tlbe *tlbe) 91static inline unsigned int
92get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
95{ 93{
96 return (tlbe->mas1 >> 12) & 0x1; 94 return (tlbe->mas1 >> 12) & 0x1;
97} 95}
98 96
99static inline unsigned int get_tlb_v(const struct tlbe *tlbe) 97static inline unsigned int
98get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
100{ 99{
101 return (tlbe->mas1 >> 31) & 0x1; 100 return (tlbe->mas1 >> 31) & 0x1;
102} 101}
103 102
104static inline unsigned int get_tlb_iprot(const struct tlbe *tlbe) 103static inline unsigned int
104get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
105{ 105{
106 return (tlbe->mas1 >> 30) & 0x1; 106 return (tlbe->mas1 >> 30) & 0x1;
107} 107}
@@ -121,59 +121,37 @@ static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
121 return !!(vcpu->arch.shared->msr & MSR_PR); 121 return !!(vcpu->arch.shared->msr & MSR_PR);
122} 122}
123 123
124static inline unsigned int get_cur_spid( 124static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
125 const struct kvmppc_vcpu_e500 *vcpu_e500)
126{ 125{
127 return (vcpu_e500->mas6 >> 16) & 0xff; 126 return (vcpu->arch.shared->mas6 >> 16) & 0xff;
128} 127}
129 128
130static inline unsigned int get_cur_sas( 129static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
131 const struct kvmppc_vcpu_e500 *vcpu_e500)
132{ 130{
133 return vcpu_e500->mas6 & 0x1; 131 return vcpu->arch.shared->mas6 & 0x1;
134} 132}
135 133
136static inline unsigned int get_tlb_tlbsel( 134static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
137 const struct kvmppc_vcpu_e500 *vcpu_e500)
138{ 135{
139 /* 136 /*
140 * Manual says that tlbsel has 2 bits wide. 137 * Manual says that tlbsel has 2 bits wide.
141 * Since we only have two TLBs, only lower bit is used. 138 * Since we only have two TLBs, only lower bit is used.
142 */ 139 */
143 return (vcpu_e500->mas0 >> 28) & 0x1; 140 return (vcpu->arch.shared->mas0 >> 28) & 0x1;
144}
145
146static inline unsigned int get_tlb_nv_bit(
147 const struct kvmppc_vcpu_e500 *vcpu_e500)
148{
149 return vcpu_e500->mas0 & 0xfff;
150} 141}
151 142
152static inline unsigned int get_tlb_esel_bit( 143static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
153 const struct kvmppc_vcpu_e500 *vcpu_e500)
154{ 144{
155 return (vcpu_e500->mas0 >> 16) & 0xfff; 145 return vcpu->arch.shared->mas0 & 0xfff;
156} 146}
157 147
158static inline unsigned int get_tlb_esel( 148static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
159 const struct kvmppc_vcpu_e500 *vcpu_e500,
160 int tlbsel)
161{ 149{
162 unsigned int esel = get_tlb_esel_bit(vcpu_e500); 150 return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
163
164 if (tlbsel == 0) {
165 esel &= KVM_E500_TLB0_WAY_NUM_MASK;
166 esel |= ((vcpu_e500->mas2 >> 12) & KVM_E500_TLB0_WAY_SIZE_MASK)
167 << KVM_E500_TLB0_WAY_NUM_BIT;
168 } else {
169 esel &= KVM_E500_TLB1_SIZE - 1;
170 }
171
172 return esel;
173} 151}
174 152
175static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, 153static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
176 const struct tlbe *tlbe) 154 const struct kvm_book3e_206_tlb_entry *tlbe)
177{ 155{
178 gpa_t gpa; 156 gpa_t gpa;
179 157
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 141dce3c6810..968f40101883 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -13,6 +13,7 @@
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. 13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 * 14 *
15 * Copyright IBM Corp. 2007 15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
16 * 17 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */ 19 */
@@ -69,54 +70,55 @@
69#define OP_STH 44 70#define OP_STH 44
70#define OP_STHU 45 71#define OP_STHU 45
71 72
72#ifdef CONFIG_PPC_BOOK3S
73static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
74{
75 return 1;
76}
77#else
78static int kvmppc_dec_enabled(struct kvm_vcpu *vcpu)
79{
80 return vcpu->arch.tcr & TCR_DIE;
81}
82#endif
83
84void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) 73void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
85{ 74{
86 unsigned long dec_nsec; 75 unsigned long dec_nsec;
76 unsigned long long dec_time;
87 77
88 pr_debug("mtDEC: %x\n", vcpu->arch.dec); 78 pr_debug("mtDEC: %x\n", vcpu->arch.dec);
79 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
80
89#ifdef CONFIG_PPC_BOOK3S 81#ifdef CONFIG_PPC_BOOK3S
90 /* mtdec lowers the interrupt line when positive. */ 82 /* mtdec lowers the interrupt line when positive. */
91 kvmppc_core_dequeue_dec(vcpu); 83 kvmppc_core_dequeue_dec(vcpu);
92 84
93 /* POWER4+ triggers a dec interrupt if the value is < 0 */ 85 /* POWER4+ triggers a dec interrupt if the value is < 0 */
94 if (vcpu->arch.dec & 0x80000000) { 86 if (vcpu->arch.dec & 0x80000000) {
95 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
96 kvmppc_core_queue_dec(vcpu); 87 kvmppc_core_queue_dec(vcpu);
97 return; 88 return;
98 } 89 }
99#endif 90#endif
100 if (kvmppc_dec_enabled(vcpu)) { 91
101 /* The decrementer ticks at the same rate as the timebase, so 92#ifdef CONFIG_BOOKE
102 * that's how we convert the guest DEC value to the number of 93 /* On BOOKE, DEC = 0 is as good as decrementer not enabled */
103 * host ticks. */ 94 if (vcpu->arch.dec == 0)
104 95 return;
105 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 96#endif
106 dec_nsec = vcpu->arch.dec; 97
107 dec_nsec *= 1000; 98 /*
108 dec_nsec /= tb_ticks_per_usec; 99 * The decrementer ticks at the same rate as the timebase, so
109 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), 100 * that's how we convert the guest DEC value to the number of
110 HRTIMER_MODE_REL); 101 * host ticks.
111 vcpu->arch.dec_jiffies = get_tb(); 102 */
112 } else { 103
113 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); 104 dec_time = vcpu->arch.dec;
114 } 105 dec_time *= 1000;
106 do_div(dec_time, tb_ticks_per_usec);
107 dec_nsec = do_div(dec_time, NSEC_PER_SEC);
108 hrtimer_start(&vcpu->arch.dec_timer,
109 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
110 vcpu->arch.dec_jiffies = get_tb();
115} 111}
116 112
117u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) 113u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
118{ 114{
119 u64 jd = tb - vcpu->arch.dec_jiffies; 115 u64 jd = tb - vcpu->arch.dec_jiffies;
116
117#ifdef CONFIG_BOOKE
118 if (vcpu->arch.dec < jd)
119 return 0;
120#endif
121
120 return vcpu->arch.dec - jd; 122 return vcpu->arch.dec - jd;
121} 123}
122 124
@@ -159,7 +161,8 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
159 case OP_TRAP_64: 161 case OP_TRAP_64:
160 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); 162 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
161#else 163#else
162 kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); 164 kvmppc_core_queue_program(vcpu,
165 vcpu->arch.shared->esr | ESR_PTR);
163#endif 166#endif
164 advance = 0; 167 advance = 0;
165 break; 168 break;
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 607fbdf24b84..00d7e345b3fe 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -39,7 +39,8 @@
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) 39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{ 40{
41 return !(v->arch.shared->msr & MSR_WE) || 41 return !(v->arch.shared->msr & MSR_WE) ||
42 !!(v->arch.pending_exceptions); 42 !!(v->arch.pending_exceptions) ||
43 v->requests;
43} 44}
44 45
45int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 46int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
@@ -66,7 +67,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
66 vcpu->arch.magic_page_pa = param1; 67 vcpu->arch.magic_page_pa = param1;
67 vcpu->arch.magic_page_ea = param2; 68 vcpu->arch.magic_page_ea = param2;
68 69
69 r2 = KVM_MAGIC_FEAT_SR; 70 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
70 71
71 r = HC_EV_SUCCESS; 72 r = HC_EV_SUCCESS;
72 break; 73 break;
@@ -171,8 +172,11 @@ void kvm_arch_check_processor_compat(void *rtn)
171 *(int *)rtn = kvmppc_core_check_processor_compat(); 172 *(int *)rtn = kvmppc_core_check_processor_compat();
172} 173}
173 174
174int kvm_arch_init_vm(struct kvm *kvm) 175int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
175{ 176{
177 if (type)
178 return -EINVAL;
179
176 return kvmppc_core_init_vm(kvm); 180 return kvmppc_core_init_vm(kvm);
177} 181}
178 182
@@ -208,17 +212,22 @@ int kvm_dev_ioctl_check_extension(long ext)
208 case KVM_CAP_PPC_BOOKE_SREGS: 212 case KVM_CAP_PPC_BOOKE_SREGS:
209#else 213#else
210 case KVM_CAP_PPC_SEGSTATE: 214 case KVM_CAP_PPC_SEGSTATE:
215 case KVM_CAP_PPC_HIOR:
211 case KVM_CAP_PPC_PAPR: 216 case KVM_CAP_PPC_PAPR:
212#endif 217#endif
213 case KVM_CAP_PPC_UNSET_IRQ: 218 case KVM_CAP_PPC_UNSET_IRQ:
214 case KVM_CAP_PPC_IRQ_LEVEL: 219 case KVM_CAP_PPC_IRQ_LEVEL:
215 case KVM_CAP_ENABLE_CAP: 220 case KVM_CAP_ENABLE_CAP:
221 case KVM_CAP_ONE_REG:
216 r = 1; 222 r = 1;
217 break; 223 break;
218#ifndef CONFIG_KVM_BOOK3S_64_HV 224#ifndef CONFIG_KVM_BOOK3S_64_HV
219 case KVM_CAP_PPC_PAIRED_SINGLES: 225 case KVM_CAP_PPC_PAIRED_SINGLES:
220 case KVM_CAP_PPC_OSI: 226 case KVM_CAP_PPC_OSI:
221 case KVM_CAP_PPC_GET_PVINFO: 227 case KVM_CAP_PPC_GET_PVINFO:
228#ifdef CONFIG_KVM_E500
229 case KVM_CAP_SW_TLB:
230#endif
222 r = 1; 231 r = 1;
223 break; 232 break;
224 case KVM_CAP_COALESCED_MMIO: 233 case KVM_CAP_COALESCED_MMIO:
@@ -238,7 +247,26 @@ int kvm_dev_ioctl_check_extension(long ext)
238 if (cpu_has_feature(CPU_FTR_ARCH_201)) 247 if (cpu_has_feature(CPU_FTR_ARCH_201))
239 r = 2; 248 r = 2;
240 break; 249 break;
250 case KVM_CAP_SYNC_MMU:
251 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
252 break;
241#endif 253#endif
254 case KVM_CAP_NR_VCPUS:
255 /*
256 * Recommending a number of CPUs is somewhat arbitrary; we
257 * return the number of present CPUs for -HV (since a host
258 * will have secondary threads "offline"), and for other KVM
259 * implementations just count online CPUs.
260 */
261#ifdef CONFIG_KVM_BOOK3S_64_HV
262 r = num_present_cpus();
263#else
264 r = num_online_cpus();
265#endif
266 break;
267 case KVM_CAP_MAX_VCPUS:
268 r = KVM_MAX_VCPUS;
269 break;
242 default: 270 default:
243 r = 0; 271 r = 0;
244 break; 272 break;
@@ -253,6 +281,16 @@ long kvm_arch_dev_ioctl(struct file *filp,
253 return -EINVAL; 281 return -EINVAL;
254} 282}
255 283
284void kvm_arch_free_memslot(struct kvm_memory_slot *free,
285 struct kvm_memory_slot *dont)
286{
287}
288
289int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
290{
291 return 0;
292}
293
256int kvm_arch_prepare_memory_region(struct kvm *kvm, 294int kvm_arch_prepare_memory_region(struct kvm *kvm,
257 struct kvm_memory_slot *memslot, 295 struct kvm_memory_slot *memslot,
258 struct kvm_memory_slot old, 296 struct kvm_memory_slot old,
@@ -279,9 +317,10 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
279{ 317{
280 struct kvm_vcpu *vcpu; 318 struct kvm_vcpu *vcpu;
281 vcpu = kvmppc_core_vcpu_create(kvm, id); 319 vcpu = kvmppc_core_vcpu_create(kvm, id);
282 vcpu->arch.wqp = &vcpu->wq; 320 if (!IS_ERR(vcpu)) {
283 if (!IS_ERR(vcpu)) 321 vcpu->arch.wqp = &vcpu->wq;
284 kvmppc_create_vcpu_debugfs(vcpu, id); 322 kvmppc_create_vcpu_debugfs(vcpu, id);
323 }
285 return vcpu; 324 return vcpu;
286} 325}
287 326
@@ -305,18 +344,6 @@ int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
305 return kvmppc_core_pending_dec(vcpu); 344 return kvmppc_core_pending_dec(vcpu);
306} 345}
307 346
308static void kvmppc_decrementer_func(unsigned long data)
309{
310 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
311
312 kvmppc_core_queue_dec(vcpu);
313
314 if (waitqueue_active(vcpu->arch.wqp)) {
315 wake_up_interruptible(vcpu->arch.wqp);
316 vcpu->stat.halt_wakeup++;
317 }
318}
319
320/* 347/*
321 * low level hrtimer wake routine. Because this runs in hardirq context 348 * low level hrtimer wake routine. Because this runs in hardirq context
322 * we schedule a tasklet to do the real work. 349 * we schedule a tasklet to do the real work.
@@ -431,20 +458,20 @@ static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
431 458
432 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 459 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
433 460
434 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) { 461 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
435 case KVM_REG_GPR: 462 case KVM_MMIO_REG_GPR:
436 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); 463 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
437 break; 464 break;
438 case KVM_REG_FPR: 465 case KVM_MMIO_REG_FPR:
439 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 466 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
440 break; 467 break;
441#ifdef CONFIG_PPC_BOOK3S 468#ifdef CONFIG_PPC_BOOK3S
442 case KVM_REG_QPR: 469 case KVM_MMIO_REG_QPR:
443 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 470 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
444 break; 471 break;
445 case KVM_REG_FQPR: 472 case KVM_MMIO_REG_FQPR:
446 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 473 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
447 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr; 474 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
448 break; 475 break;
449#endif 476#endif
450 default: 477 default:
@@ -553,8 +580,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
553 vcpu->arch.hcall_needed = 0; 580 vcpu->arch.hcall_needed = 0;
554 } 581 }
555 582
556 kvmppc_core_deliver_interrupts(vcpu);
557
558 r = kvmppc_vcpu_run(run, vcpu); 583 r = kvmppc_vcpu_run(run, vcpu);
559 584
560 if (vcpu->sigset_active) 585 if (vcpu->sigset_active)
@@ -563,6 +588,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
563 return r; 588 return r;
564} 589}
565 590
591void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
592{
593 int me;
594 int cpu = vcpu->cpu;
595
596 me = get_cpu();
597 if (waitqueue_active(vcpu->arch.wqp)) {
598 wake_up_interruptible(vcpu->arch.wqp);
599 vcpu->stat.halt_wakeup++;
600 } else if (cpu != me && cpu != -1) {
601 smp_send_reschedule(vcpu->cpu);
602 }
603 put_cpu();
604}
605
566int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 606int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
567{ 607{
568 if (irq->irq == KVM_INTERRUPT_UNSET) { 608 if (irq->irq == KVM_INTERRUPT_UNSET) {
@@ -571,13 +611,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
571 } 611 }
572 612
573 kvmppc_core_queue_external(vcpu, irq); 613 kvmppc_core_queue_external(vcpu, irq);
574 614 kvm_vcpu_kick(vcpu);
575 if (waitqueue_active(vcpu->arch.wqp)) {
576 wake_up_interruptible(vcpu->arch.wqp);
577 vcpu->stat.halt_wakeup++;
578 } else if (vcpu->cpu != -1) {
579 smp_send_reschedule(vcpu->cpu);
580 }
581 615
582 return 0; 616 return 0;
583} 617}
@@ -599,6 +633,19 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
599 r = 0; 633 r = 0;
600 vcpu->arch.papr_enabled = true; 634 vcpu->arch.papr_enabled = true;
601 break; 635 break;
636#ifdef CONFIG_KVM_E500
637 case KVM_CAP_SW_TLB: {
638 struct kvm_config_tlb cfg;
639 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
640
641 r = -EFAULT;
642 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
643 break;
644
645 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
646 break;
647 }
648#endif
602 default: 649 default:
603 r = -EINVAL; 650 r = -EINVAL;
604 break; 651 break;
@@ -648,6 +695,32 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
648 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); 695 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
649 break; 696 break;
650 } 697 }
698
699 case KVM_SET_ONE_REG:
700 case KVM_GET_ONE_REG:
701 {
702 struct kvm_one_reg reg;
703 r = -EFAULT;
704 if (copy_from_user(&reg, argp, sizeof(reg)))
705 goto out;
706 if (ioctl == KVM_SET_ONE_REG)
707 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
708 else
709 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
710 break;
711 }
712
713#ifdef CONFIG_KVM_E500
714 case KVM_DIRTY_TLB: {
715 struct kvm_dirty_tlb dirty;
716 r = -EFAULT;
717 if (copy_from_user(&dirty, argp, sizeof(dirty)))
718 goto out;
719 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
720 break;
721 }
722#endif
723
651 default: 724 default:
652 r = -EINVAL; 725 r = -EINVAL;
653 } 726 }
@@ -656,6 +729,11 @@ out:
656 return r; 729 return r;
657} 730}
658 731
732int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
733{
734 return VM_FAULT_SIGBUS;
735}
736
659static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) 737static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
660{ 738{
661 u32 inst_lis = 0x3c000000; 739 u32 inst_lis = 0x3c000000;
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h
index b135d3d397db..877186b7b1c3 100644
--- a/arch/powerpc/kvm/trace.h
+++ b/arch/powerpc/kvm/trace.h
@@ -118,11 +118,14 @@ TRACE_EVENT(kvm_book3s_exit,
118 ), 118 ),
119 119
120 TP_fast_assign( 120 TP_fast_assign(
121 struct kvmppc_book3s_shadow_vcpu *svcpu;
121 __entry->exit_nr = exit_nr; 122 __entry->exit_nr = exit_nr;
122 __entry->pc = kvmppc_get_pc(vcpu); 123 __entry->pc = kvmppc_get_pc(vcpu);
123 __entry->dar = kvmppc_get_fault_dar(vcpu); 124 __entry->dar = kvmppc_get_fault_dar(vcpu);
124 __entry->msr = vcpu->arch.shared->msr; 125 __entry->msr = vcpu->arch.shared->msr;
125 __entry->srr1 = to_svcpu(vcpu)->shadow_srr1; 126 svcpu = svcpu_get(vcpu);
127 __entry->srr1 = svcpu->shadow_srr1;
128 svcpu_put(svcpu);
126 ), 129 ),
127 130
128 TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx", 131 TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx",
@@ -337,6 +340,63 @@ TRACE_EVENT(kvm_book3s_slbmte,
337 340
338#endif /* CONFIG_PPC_BOOK3S */ 341#endif /* CONFIG_PPC_BOOK3S */
339 342
343
344/*************************************************************************
345 * Book3E trace points *
346 *************************************************************************/
347
348#ifdef CONFIG_BOOKE
349
350TRACE_EVENT(kvm_booke206_stlb_write,
351 TP_PROTO(__u32 mas0, __u32 mas8, __u32 mas1, __u64 mas2, __u64 mas7_3),
352 TP_ARGS(mas0, mas8, mas1, mas2, mas7_3),
353
354 TP_STRUCT__entry(
355 __field( __u32, mas0 )
356 __field( __u32, mas8 )
357 __field( __u32, mas1 )
358 __field( __u64, mas2 )
359 __field( __u64, mas7_3 )
360 ),
361
362 TP_fast_assign(
363 __entry->mas0 = mas0;
364 __entry->mas8 = mas8;
365 __entry->mas1 = mas1;
366 __entry->mas2 = mas2;
367 __entry->mas7_3 = mas7_3;
368 ),
369
370 TP_printk("mas0=%x mas8=%x mas1=%x mas2=%llx mas7_3=%llx",
371 __entry->mas0, __entry->mas8, __entry->mas1,
372 __entry->mas2, __entry->mas7_3)
373);
374
375TRACE_EVENT(kvm_booke206_gtlb_write,
376 TP_PROTO(__u32 mas0, __u32 mas1, __u64 mas2, __u64 mas7_3),
377 TP_ARGS(mas0, mas1, mas2, mas7_3),
378
379 TP_STRUCT__entry(
380 __field( __u32, mas0 )
381 __field( __u32, mas1 )
382 __field( __u64, mas2 )
383 __field( __u64, mas7_3 )
384 ),
385
386 TP_fast_assign(
387 __entry->mas0 = mas0;
388 __entry->mas1 = mas1;
389 __entry->mas2 = mas2;
390 __entry->mas7_3 = mas7_3;
391 ),
392
393 TP_printk("mas0=%x mas1=%x mas2=%llx mas7_3=%llx",
394 __entry->mas0, __entry->mas1,
395 __entry->mas2, __entry->mas7_3)
396);
397
398#endif
399
340#endif /* _TRACE_KVM_H */ 400#endif /* _TRACE_KVM_H */
341 401
342/* This part must be outside protection */ 402/* This part must be outside protection */
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c
index a6ebba56fdd4..bb7cfecf2788 100644
--- a/arch/powerpc/lib/locks.c
+++ b/arch/powerpc/lib/locks.c
@@ -19,11 +19,9 @@
19#include <linux/smp.h> 19#include <linux/smp.h>
20 20
21/* waiting for a spinlock... */ 21/* waiting for a spinlock... */
22#if defined(CONFIG_PPC_SPLPAR) || defined(CONFIG_PPC_ISERIES) 22#if defined(CONFIG_PPC_SPLPAR)
23#include <asm/hvcall.h> 23#include <asm/hvcall.h>
24#include <asm/iseries/hv_call.h>
25#include <asm/smp.h> 24#include <asm/smp.h>
26#include <asm/firmware.h>
27 25
28void __spin_yield(arch_spinlock_t *lock) 26void __spin_yield(arch_spinlock_t *lock)
29{ 27{
@@ -40,14 +38,8 @@ void __spin_yield(arch_spinlock_t *lock)
40 rmb(); 38 rmb();
41 if (lock->slock != lock_value) 39 if (lock->slock != lock_value)
42 return; /* something has changed */ 40 return; /* something has changed */
43 if (firmware_has_feature(FW_FEATURE_ISERIES)) 41 plpar_hcall_norets(H_CONFER,
44 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, 42 get_hard_smp_processor_id(holder_cpu), yield_count);
45 ((u64)holder_cpu << 32) | yield_count);
46#ifdef CONFIG_PPC_SPLPAR
47 else
48 plpar_hcall_norets(H_CONFER,
49 get_hard_smp_processor_id(holder_cpu), yield_count);
50#endif
51} 43}
52 44
53/* 45/*
@@ -71,14 +63,8 @@ void __rw_yield(arch_rwlock_t *rw)
71 rmb(); 63 rmb();
72 if (rw->lock != lock_value) 64 if (rw->lock != lock_value)
73 return; /* something has changed */ 65 return; /* something has changed */
74 if (firmware_has_feature(FW_FEATURE_ISERIES)) 66 plpar_hcall_norets(H_CONFER,
75 HvCall2(HvCallBaseYieldProcessor, HvCall_YieldToProc, 67 get_hard_smp_processor_id(holder_cpu), yield_count);
76 ((u64)holder_cpu << 32) | yield_count);
77#ifdef CONFIG_PPC_SPLPAR
78 else
79 plpar_hcall_norets(H_CONFER,
80 get_hard_smp_processor_id(holder_cpu), yield_count);
81#endif
82} 68}
83#endif 69#endif
84 70
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c
index 329be36c0a8d..6747eece84af 100644
--- a/arch/powerpc/mm/dma-noncoherent.c
+++ b/arch/powerpc/mm/dma-noncoherent.c
@@ -365,12 +365,11 @@ static inline void __dma_sync_page_highmem(struct page *page,
365 local_irq_save(flags); 365 local_irq_save(flags);
366 366
367 do { 367 do {
368 start = (unsigned long)kmap_atomic(page + seg_nr, 368 start = (unsigned long)kmap_atomic(page + seg_nr) + seg_offset;
369 KM_PPC_SYNC_PAGE) + seg_offset;
370 369
371 /* Sync this buffer segment */ 370 /* Sync this buffer segment */
372 __dma_sync((void *)start, seg_size, direction); 371 __dma_sync((void *)start, seg_size, direction);
373 kunmap_atomic((void *)start, KM_PPC_SYNC_PAGE); 372 kunmap_atomic((void *)start);
374 seg_nr++; 373 seg_nr++;
375 374
376 /* Calculate next buffer segment size */ 375 /* Calculate next buffer segment size */
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 2f0d1b032a89..19f2f9498b27 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -105,6 +105,82 @@ static int store_updates_sp(struct pt_regs *regs)
105 } 105 }
106 return 0; 106 return 0;
107} 107}
108/*
109 * do_page_fault error handling helpers
110 */
111
112#define MM_FAULT_RETURN 0
113#define MM_FAULT_CONTINUE -1
114#define MM_FAULT_ERR(sig) (sig)
115
116static int out_of_memory(struct pt_regs *regs)
117{
118 /*
119 * We ran out of memory, or some other thing happened to us that made
120 * us unable to handle the page fault gracefully.
121 */
122 up_read(&current->mm->mmap_sem);
123 if (!user_mode(regs))
124 return MM_FAULT_ERR(SIGKILL);
125 pagefault_out_of_memory();
126 return MM_FAULT_RETURN;
127}
128
129static int do_sigbus(struct pt_regs *regs, unsigned long address)
130{
131 siginfo_t info;
132
133 up_read(&current->mm->mmap_sem);
134
135 if (user_mode(regs)) {
136 info.si_signo = SIGBUS;
137 info.si_errno = 0;
138 info.si_code = BUS_ADRERR;
139 info.si_addr = (void __user *)address;
140 force_sig_info(SIGBUS, &info, current);
141 return MM_FAULT_RETURN;
142 }
143 return MM_FAULT_ERR(SIGBUS);
144}
145
146static int mm_fault_error(struct pt_regs *regs, unsigned long addr, int fault)
147{
148 /*
149 * Pagefault was interrupted by SIGKILL. We have no reason to
150 * continue the pagefault.
151 */
152 if (fatal_signal_pending(current)) {
153 /*
154 * If we have retry set, the mmap semaphore will have
155 * alrady been released in __lock_page_or_retry(). Else
156 * we release it now.
157 */
158 if (!(fault & VM_FAULT_RETRY))
159 up_read(&current->mm->mmap_sem);
160 /* Coming from kernel, we need to deal with uaccess fixups */
161 if (user_mode(regs))
162 return MM_FAULT_RETURN;
163 return MM_FAULT_ERR(SIGKILL);
164 }
165
166 /* No fault: be happy */
167 if (!(fault & VM_FAULT_ERROR))
168 return MM_FAULT_CONTINUE;
169
170 /* Out of memory */
171 if (fault & VM_FAULT_OOM)
172 return out_of_memory(regs);
173
174 /* Bus error. x86 handles HWPOISON here, we'll add this if/when
175 * we support the feature in HW
176 */
177 if (fault & VM_FAULT_SIGBUS)
178 return do_sigbus(regs, addr);
179
180 /* We don't understand the fault code, this is fatal */
181 BUG();
182 return MM_FAULT_CONTINUE;
183}
108 184
109/* 185/*
110 * For 600- and 800-family processors, the error_code parameter is DSISR 186 * For 600- and 800-family processors, the error_code parameter is DSISR
@@ -124,11 +200,12 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
124{ 200{
125 struct vm_area_struct * vma; 201 struct vm_area_struct * vma;
126 struct mm_struct *mm = current->mm; 202 struct mm_struct *mm = current->mm;
127 siginfo_t info; 203 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
128 int code = SEGV_MAPERR; 204 int code = SEGV_MAPERR;
129 int is_write = 0, ret; 205 int is_write = 0;
130 int trap = TRAP(regs); 206 int trap = TRAP(regs);
131 int is_exec = trap == 0x400; 207 int is_exec = trap == 0x400;
208 int fault;
132 209
133#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) 210#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
134 /* 211 /*
@@ -145,6 +222,9 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
145 is_write = error_code & ESR_DST; 222 is_write = error_code & ESR_DST;
146#endif /* CONFIG_4xx || CONFIG_BOOKE */ 223#endif /* CONFIG_4xx || CONFIG_BOOKE */
147 224
225 if (is_write)
226 flags |= FAULT_FLAG_WRITE;
227
148#ifdef CONFIG_PPC_ICSWX 228#ifdef CONFIG_PPC_ICSWX
149 /* 229 /*
150 * we need to do this early because this "data storage 230 * we need to do this early because this "data storage
@@ -152,13 +232,11 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
152 * look at it 232 * look at it
153 */ 233 */
154 if (error_code & ICSWX_DSI_UCT) { 234 if (error_code & ICSWX_DSI_UCT) {
155 int ret; 235 int rc = acop_handle_fault(regs, address, error_code);
156 236 if (rc)
157 ret = acop_handle_fault(regs, address, error_code); 237 return rc;
158 if (ret)
159 return ret;
160 } 238 }
161#endif 239#endif /* CONFIG_PPC_ICSWX */
162 240
163 if (notify_page_fault(regs)) 241 if (notify_page_fault(regs))
164 return 0; 242 return 0;
@@ -179,6 +257,10 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
179 } 257 }
180#endif 258#endif
181 259
260 /* We restore the interrupt state now */
261 if (!arch_irq_disabled_regs(regs))
262 local_irq_enable();
263
182 if (in_atomic() || mm == NULL) { 264 if (in_atomic() || mm == NULL) {
183 if (!user_mode(regs)) 265 if (!user_mode(regs))
184 return SIGSEGV; 266 return SIGSEGV;
@@ -212,7 +294,15 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
212 if (!user_mode(regs) && !search_exception_tables(regs->nip)) 294 if (!user_mode(regs) && !search_exception_tables(regs->nip))
213 goto bad_area_nosemaphore; 295 goto bad_area_nosemaphore;
214 296
297retry:
215 down_read(&mm->mmap_sem); 298 down_read(&mm->mmap_sem);
299 } else {
300 /*
301 * The above down_read_trylock() might have succeeded in
302 * which case we'll have missed the might_sleep() from
303 * down_read():
304 */
305 might_sleep();
216 } 306 }
217 307
218 vma = find_vma(mm, address); 308 vma = find_vma(mm, address);
@@ -327,30 +417,43 @@ good_area:
327 * make sure we exit gracefully rather than endlessly redo 417 * make sure we exit gracefully rather than endlessly redo
328 * the fault. 418 * the fault.
329 */ 419 */
330 ret = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); 420 fault = handle_mm_fault(mm, vma, address, flags);
331 if (unlikely(ret & VM_FAULT_ERROR)) { 421 if (unlikely(fault & (VM_FAULT_RETRY|VM_FAULT_ERROR))) {
332 if (ret & VM_FAULT_OOM) 422 int rc = mm_fault_error(regs, address, fault);
333 goto out_of_memory; 423 if (rc >= MM_FAULT_RETURN)
334 else if (ret & VM_FAULT_SIGBUS) 424 return rc;
335 goto do_sigbus;
336 BUG();
337 } 425 }
338 if (ret & VM_FAULT_MAJOR) { 426
339 current->maj_flt++; 427 /*
340 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 428 * Major/minor page fault accounting is only done on the
341 regs, address); 429 * initial attempt. If we go through a retry, it is extremely
430 * likely that the page will be found in page cache at that point.
431 */
432 if (flags & FAULT_FLAG_ALLOW_RETRY) {
433 if (fault & VM_FAULT_MAJOR) {
434 current->maj_flt++;
435 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1,
436 regs, address);
342#ifdef CONFIG_PPC_SMLPAR 437#ifdef CONFIG_PPC_SMLPAR
343 if (firmware_has_feature(FW_FEATURE_CMO)) { 438 if (firmware_has_feature(FW_FEATURE_CMO)) {
344 preempt_disable(); 439 preempt_disable();
345 get_lppaca()->page_ins += (1 << PAGE_FACTOR); 440 get_lppaca()->page_ins += (1 << PAGE_FACTOR);
346 preempt_enable(); 441 preempt_enable();
442 }
443#endif /* CONFIG_PPC_SMLPAR */
444 } else {
445 current->min_flt++;
446 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
447 regs, address);
448 }
449 if (fault & VM_FAULT_RETRY) {
450 /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk
451 * of starvation. */
452 flags &= ~FAULT_FLAG_ALLOW_RETRY;
453 goto retry;
347 } 454 }
348#endif
349 } else {
350 current->min_flt++;
351 perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1,
352 regs, address);
353 } 455 }
456
354 up_read(&mm->mmap_sem); 457 up_read(&mm->mmap_sem);
355 return 0; 458 return 0;
356 459
@@ -371,28 +474,6 @@ bad_area_nosemaphore:
371 474
372 return SIGSEGV; 475 return SIGSEGV;
373 476
374/*
375 * We ran out of memory, or some other thing happened to us that made
376 * us unable to handle the page fault gracefully.
377 */
378out_of_memory:
379 up_read(&mm->mmap_sem);
380 if (!user_mode(regs))
381 return SIGKILL;
382 pagefault_out_of_memory();
383 return 0;
384
385do_sigbus:
386 up_read(&mm->mmap_sem);
387 if (user_mode(regs)) {
388 info.si_signo = SIGBUS;
389 info.si_errno = 0;
390 info.si_code = BUS_ADRERR;
391 info.si_addr = (void __user *)address;
392 force_sig_info(SIGBUS, &info, current);
393 return 0;
394 }
395 return SIGBUS;
396} 477}
397 478
398/* 479/*
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c
index 66a6fd38e9cd..07ba45b0f07c 100644
--- a/arch/powerpc/mm/fsl_booke_mmu.c
+++ b/arch/powerpc/mm/fsl_booke_mmu.c
@@ -149,12 +149,19 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys,
149unsigned long calc_cam_sz(unsigned long ram, unsigned long virt, 149unsigned long calc_cam_sz(unsigned long ram, unsigned long virt,
150 phys_addr_t phys) 150 phys_addr_t phys)
151{ 151{
152 unsigned int camsize = __ilog2(ram) & ~1U; 152 unsigned int camsize = __ilog2(ram);
153 unsigned int align = __ffs(virt | phys) & ~1U; 153 unsigned int align = __ffs(virt | phys);
154 unsigned long max_cam = (mfspr(SPRN_TLB1CFG) >> 16) & 0xf; 154 unsigned long max_cam;
155 155
156 /* Convert (4^max) kB to (2^max) bytes */ 156 if ((mfspr(SPRN_MMUCFG) & MMUCFG_MAVN) == MMUCFG_MAVN_V1) {
157 max_cam = max_cam * 2 + 10; 157 /* Convert (4^max) kB to (2^max) bytes */
158 max_cam = ((mfspr(SPRN_TLB1CFG) >> 16) & 0xf) * 2 + 10;
159 camsize &= ~1U;
160 align &= ~1U;
161 } else {
162 /* Convert (2^max) kB to (2^max) bytes */
163 max_cam = __ilog2(mfspr(SPRN_TLB1PS)) + 10;
164 }
158 165
159 if (camsize > align) 166 if (camsize > align)
160 camsize = align; 167 camsize = align;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 2d282186cb45..3e8c37a4e395 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -55,6 +55,8 @@
55#include <asm/spu.h> 55#include <asm/spu.h>
56#include <asm/udbg.h> 56#include <asm/udbg.h>
57#include <asm/code-patching.h> 57#include <asm/code-patching.h>
58#include <asm/fadump.h>
59#include <asm/firmware.h>
58 60
59#ifdef DEBUG 61#ifdef DEBUG
60#define DBG(fmt...) udbg_printf(fmt) 62#define DBG(fmt...) udbg_printf(fmt)
@@ -625,6 +627,16 @@ static void __init htab_initialize(void)
625 /* Using a hypervisor which owns the htab */ 627 /* Using a hypervisor which owns the htab */
626 htab_address = NULL; 628 htab_address = NULL;
627 _SDR1 = 0; 629 _SDR1 = 0;
630#ifdef CONFIG_FA_DUMP
631 /*
632 * If firmware assisted dump is active firmware preserves
633 * the contents of htab along with entire partition memory.
634 * Clear the htab if firmware assisted dump is active so
635 * that we dont end up using old mappings.
636 */
637 if (is_fadump_active() && ppc_md.hpte_clear_all)
638 ppc_md.hpte_clear_all();
639#endif
628 } else { 640 } else {
629 /* Find storage for the HPT. Must be contiguous in 641 /* Find storage for the HPT. Must be contiguous in
630 * the absolute address space. On cell we want it to be 642 * the absolute address space. On cell we want it to be
@@ -745,12 +757,9 @@ void __init early_init_mmu(void)
745 */ 757 */
746 htab_initialize(); 758 htab_initialize();
747 759
748 /* Initialize stab / SLB management except on iSeries 760 /* Initialize stab / SLB management */
749 */
750 if (mmu_has_feature(MMU_FTR_SLB)) 761 if (mmu_has_feature(MMU_FTR_SLB))
751 slb_initialize(); 762 slb_initialize();
752 else if (!firmware_has_feature(FW_FEATURE_ISERIES))
753 stab_initialize(get_paca()->stab_real);
754} 763}
755 764
756#ifdef CONFIG_SMP 765#ifdef CONFIG_SMP
@@ -761,8 +770,7 @@ void __cpuinit early_init_mmu_secondary(void)
761 mtspr(SPRN_SDR1, _SDR1); 770 mtspr(SPRN_SDR1, _SDR1);
762 771
763 /* Initialize STAB/SLB. We use a virtual address as it works 772 /* Initialize STAB/SLB. We use a virtual address as it works
764 * in real mode on pSeries and we want a virtual address on 773 * in real mode on pSeries.
765 * iSeries anyway
766 */ 774 */
767 if (mmu_has_feature(MMU_FTR_SLB)) 775 if (mmu_has_feature(MMU_FTR_SLB))
768 slb_initialize(); 776 slb_initialize();
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index a8b3cc7d90fe..fb05b123218f 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -12,6 +12,7 @@
12#include <linux/io.h> 12#include <linux/io.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/hugetlb.h> 14#include <linux/hugetlb.h>
15#include <linux/export.h>
15#include <linux/of_fdt.h> 16#include <linux/of_fdt.h>
16#include <linux/memblock.h> 17#include <linux/memblock.h>
17#include <linux/bootmem.h> 18#include <linux/bootmem.h>
@@ -103,6 +104,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
103 *shift = hugepd_shift(*hpdp); 104 *shift = hugepd_shift(*hpdp);
104 return hugepte_offset(hpdp, ea, pdshift); 105 return hugepte_offset(hpdp, ea, pdshift);
105} 106}
107EXPORT_SYMBOL_GPL(find_linux_pte_or_hugepte);
106 108
107pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 109pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
108{ 110{
@@ -310,7 +312,8 @@ void __init reserve_hugetlb_gpages(void)
310 int i; 312 int i;
311 313
312 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE); 314 strlcpy(cmdline, boot_command_line, COMMAND_LINE_SIZE);
313 parse_args("hugetlb gpages", cmdline, NULL, 0, &do_gpage_early_setup); 315 parse_args("hugetlb gpages", cmdline, NULL, 0, 0, 0,
316 &do_gpage_early_setup);
314 317
315 /* 318 /*
316 * Walk gpage list in reverse, allocating larger page sizes first. 319 * Walk gpage list in reverse, allocating larger page sizes first.
@@ -910,9 +913,9 @@ void flush_dcache_icache_hugepage(struct page *page)
910 if (!PageHighMem(page)) { 913 if (!PageHighMem(page)) {
911 __flush_dcache_icache(page_address(page+i)); 914 __flush_dcache_icache(page_address(page+i));
912 } else { 915 } else {
913 start = kmap_atomic(page+i, KM_PPC_SYNC_ICACHE); 916 start = kmap_atomic(page+i);
914 __flush_dcache_icache(start); 917 __flush_dcache_icache(start);
915 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 918 kunmap_atomic(start);
916 } 919 }
917 } 920 }
918} 921}
diff --git a/arch/powerpc/mm/icswx.c b/arch/powerpc/mm/icswx.c
index 5d9a59eaad93..8cdbd8634a58 100644
--- a/arch/powerpc/mm/icswx.c
+++ b/arch/powerpc/mm/icswx.c
@@ -163,7 +163,7 @@ EXPORT_SYMBOL_GPL(drop_cop);
163 163
164static int acop_use_cop(int ct) 164static int acop_use_cop(int ct)
165{ 165{
166 /* todo */ 166 /* There is no alternate policy, yet */
167 return -1; 167 return -1;
168} 168}
169 169
@@ -227,11 +227,30 @@ int acop_handle_fault(struct pt_regs *regs, unsigned long address,
227 ct = (ccw >> 16) & 0x3f; 227 ct = (ccw >> 16) & 0x3f;
228 } 228 }
229 229
230 /*
231 * We could be here because another thread has enabled acop
232 * but the ACOP register has yet to be updated.
233 *
234 * This should have been taken care of by the IPI to sync all
235 * the threads (see smp_call_function(sync_cop, mm, 1)), but
236 * that could take forever if there are a significant amount
237 * of threads.
238 *
239 * Given the number of threads on some of these systems,
240 * perhaps this is the best way to sync ACOP rather than whack
241 * every thread with an IPI.
242 */
243 if ((acop_copro_type_bit(ct) & current->active_mm->context.acop) != 0) {
244 sync_cop(current->active_mm);
245 return 0;
246 }
247
248 /* check for alternate policy */
230 if (!acop_use_cop(ct)) 249 if (!acop_use_cop(ct))
231 return 0; 250 return 0;
232 251
233 /* at this point the CT is unknown to the system */ 252 /* at this point the CT is unknown to the system */
234 pr_warn("%s[%d]: Coprocessor %d is unavailable", 253 pr_warn("%s[%d]: Coprocessor %d is unavailable\n",
235 current->comm, current->pid, ct); 254 current->comm, current->pid, ct);
236 255
237 /* get inst if we don't already have it */ 256 /* get inst if we don't already have it */
diff --git a/arch/powerpc/mm/icswx.h b/arch/powerpc/mm/icswx.h
index 42176bd0884c..6dedc08e62c8 100644
--- a/arch/powerpc/mm/icswx.h
+++ b/arch/powerpc/mm/icswx.h
@@ -59,4 +59,10 @@ extern void free_cop_pid(int free_pid);
59 59
60extern int acop_handle_fault(struct pt_regs *regs, unsigned long address, 60extern int acop_handle_fault(struct pt_regs *regs, unsigned long address,
61 unsigned long error_code); 61 unsigned long error_code);
62
63static inline u64 acop_copro_type_bit(unsigned int type)
64{
65 return 1ULL << (63 - type);
66}
67
62#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */ 68#endif /* !_ARCH_POWERPC_MM_ICSWX_H_ */
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index d974b79a3068..baaafde7d135 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -458,9 +458,9 @@ void flush_dcache_icache_page(struct page *page)
458#endif 458#endif
459#ifdef CONFIG_BOOKE 459#ifdef CONFIG_BOOKE
460 { 460 {
461 void *start = kmap_atomic(page, KM_PPC_SYNC_ICACHE); 461 void *start = kmap_atomic(page);
462 __flush_dcache_icache(start); 462 __flush_dcache_icache(start);
463 kunmap_atomic(start, KM_PPC_SYNC_ICACHE); 463 kunmap_atomic(start);
464 } 464 }
465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64) 465#elif defined(CONFIG_8xx) || defined(CONFIG_PPC64)
466 /* On 8xx there is no need to kmap since highmem is not supported */ 466 /* On 8xx there is no need to kmap since highmem is not supported */
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index 51f87956f8f8..0907f92ce309 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -207,7 +207,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags,
207 */ 207 */
208 if (mem_init_done && (p < virt_to_phys(high_memory)) && 208 if (mem_init_done && (p < virt_to_phys(high_memory)) &&
209 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) { 209 !(__allow_ioremap_reserved && memblock_is_region_reserved(p, size))) {
210 printk("__ioremap(): phys addr 0x%llx is RAM lr %p\n", 210 printk("__ioremap(): phys addr 0x%llx is RAM lr %pf\n",
211 (unsigned long long)p, __builtin_return_address(0)); 211 (unsigned long long)p, __builtin_return_address(0));
212 return NULL; 212 return NULL;
213 } 213 }
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c
index e22276cb67a4..a538c80db2df 100644
--- a/arch/powerpc/mm/slb.c
+++ b/arch/powerpc/mm/slb.c
@@ -21,7 +21,6 @@
21#include <asm/cputable.h> 21#include <asm/cputable.h>
22#include <asm/cacheflush.h> 22#include <asm/cacheflush.h>
23#include <asm/smp.h> 23#include <asm/smp.h>
24#include <asm/firmware.h>
25#include <linux/compiler.h> 24#include <linux/compiler.h>
26#include <asm/udbg.h> 25#include <asm/udbg.h>
27#include <asm/code-patching.h> 26#include <asm/code-patching.h>
@@ -307,11 +306,6 @@ void slb_initialize(void)
307 306
308 get_paca()->stab_rr = SLB_NUM_BOLTED; 307 get_paca()->stab_rr = SLB_NUM_BOLTED;
309 308
310 /* On iSeries the bolted entries have already been set up by
311 * the hypervisor from the lparMap data in head.S */
312 if (firmware_has_feature(FW_FEATURE_ISERIES))
313 return;
314
315 lflags = SLB_VSID_KERNEL | linear_llp; 309 lflags = SLB_VSID_KERNEL | linear_llp;
316 vflags = SLB_VSID_KERNEL | vmalloc_llp; 310 vflags = SLB_VSID_KERNEL | vmalloc_llp;
317 311
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S
index ef653dc95b65..b9ee79ce2200 100644
--- a/arch/powerpc/mm/slb_low.S
+++ b/arch/powerpc/mm/slb_low.S
@@ -217,21 +217,6 @@ slb_finish_load:
217 * free slot first but that took too long. Unfortunately we 217 * free slot first but that took too long. Unfortunately we
218 * dont have any LRU information to help us choose a slot. 218 * dont have any LRU information to help us choose a slot.
219 */ 219 */
220#ifdef CONFIG_PPC_ISERIES
221BEGIN_FW_FTR_SECTION
222 /*
223 * On iSeries, the "bolted" stack segment can be cast out on
224 * shared processor switch so we need to check for a miss on
225 * it and restore it to the right slot.
226 */
227 ld r9,PACAKSAVE(r13)
228 clrrdi r9,r9,28
229 clrrdi r3,r3,28
230 li r10,SLB_NUM_BOLTED-1 /* Stack goes in last bolted slot */
231 cmpld r9,r3
232 beq 3f
233END_FW_FTR_SECTION_IFSET(FW_FEATURE_ISERIES)
234#endif /* CONFIG_PPC_ISERIES */
235 220
2367: ld r10,PACASTABRR(r13) 2217: ld r10,PACASTABRR(r13)
237 addi r10,r10,1 222 addi r10,r10,1
@@ -282,7 +267,6 @@ _GLOBAL(slb_compare_rr_to_size)
282 267
283/* 268/*
284 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. 269 * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
285 * We assume legacy iSeries will never have 1T segments.
286 * 270 *
287 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 271 * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9
288 */ 272 */
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index 41e31642a86a..9106ebb118f5 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -21,8 +21,6 @@
21#include <asm/cputable.h> 21#include <asm/cputable.h>
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/abs_addr.h> 23#include <asm/abs_addr.h>
24#include <asm/firmware.h>
25#include <asm/iseries/hv_call.h>
26 24
27struct stab_entry { 25struct stab_entry {
28 unsigned long esid_data; 26 unsigned long esid_data;
@@ -285,12 +283,5 @@ void stab_initialize(unsigned long stab)
285 /* Set ASR */ 283 /* Set ASR */
286 stabreal = get_paca()->stab_real | 0x1ul; 284 stabreal = get_paca()->stab_real | 0x1ul;
287 285
288#ifdef CONFIG_PPC_ISERIES
289 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
290 HvCall1(HvCallBaseSetASR, stabreal);
291 return;
292 }
293#endif /* CONFIG_PPC_ISERIES */
294
295 mtspr(SPRN_ASR, stabreal); 286 mtspr(SPRN_ASR, stabreal);
296} 287}
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c
index d65e68f3cb25..6f01624f317f 100644
--- a/arch/powerpc/oprofile/common.c
+++ b/arch/powerpc/oprofile/common.c
@@ -195,9 +195,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
195 if (!cur_cpu_spec->oprofile_cpu_type) 195 if (!cur_cpu_spec->oprofile_cpu_type)
196 return -ENODEV; 196 return -ENODEV;
197 197
198 if (firmware_has_feature(FW_FEATURE_ISERIES))
199 return -ENODEV;
200
201 switch (cur_cpu_spec->oprofile_type) { 198 switch (cur_cpu_spec->oprofile_type) {
202#ifdef CONFIG_PPC_BOOK3S_64 199#ifdef CONFIG_PPC_BOOK3S_64
203#ifdef CONFIG_OPROFILE_CELL 200#ifdef CONFIG_OPROFILE_CELL
diff --git a/arch/powerpc/perf/Makefile b/arch/powerpc/perf/Makefile
new file mode 100644
index 000000000000..af3fac23768c
--- /dev/null
+++ b/arch/powerpc/perf/Makefile
@@ -0,0 +1,14 @@
1subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
2
3obj-$(CONFIG_PERF_EVENTS) += callchain.o
4
5obj-$(CONFIG_PPC_PERF_CTRS) += core-book3s.o
6obj64-$(CONFIG_PPC_PERF_CTRS) += power4-pmu.o ppc970-pmu.o power5-pmu.o \
7 power5+-pmu.o power6-pmu.o power7-pmu.o
8obj32-$(CONFIG_PPC_PERF_CTRS) += mpc7450-pmu.o
9
10obj-$(CONFIG_FSL_EMB_PERF_EVENT) += core-fsl-emb.o
11obj-$(CONFIG_FSL_EMB_PERF_EVENT_E500) += e500-pmu.o
12
13obj-$(CONFIG_PPC64) += $(obj64-y)
14obj-$(CONFIG_PPC32) += $(obj32-y)
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/perf/callchain.c
index 564c1d8bdb5c..e8a18d1cc7c9 100644
--- a/arch/powerpc/kernel/perf_callchain.c
+++ b/arch/powerpc/perf/callchain.c
@@ -20,7 +20,7 @@
20#include <asm/ucontext.h> 20#include <asm/ucontext.h>
21#include <asm/vdso.h> 21#include <asm/vdso.h>
22#ifdef CONFIG_PPC64 22#ifdef CONFIG_PPC64
23#include "ppc32.h" 23#include "../kernel/ppc32.h"
24#endif 24#endif
25 25
26 26
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/perf/core-book3s.c
index 10a140f82cb8..02aee03e713c 100644
--- a/arch/powerpc/kernel/perf_event.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -116,14 +116,45 @@ static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
116 *addrp = mfspr(SPRN_SDAR); 116 *addrp = mfspr(SPRN_SDAR);
117} 117}
118 118
119static inline u32 perf_flags_from_msr(struct pt_regs *regs)
120{
121 if (regs->msr & MSR_PR)
122 return PERF_RECORD_MISC_USER;
123 if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
124 return PERF_RECORD_MISC_HYPERVISOR;
125 return PERF_RECORD_MISC_KERNEL;
126}
127
119static inline u32 perf_get_misc_flags(struct pt_regs *regs) 128static inline u32 perf_get_misc_flags(struct pt_regs *regs)
120{ 129{
121 unsigned long mmcra = regs->dsisr; 130 unsigned long mmcra = regs->dsisr;
122 unsigned long sihv = MMCRA_SIHV; 131 unsigned long sihv = MMCRA_SIHV;
123 unsigned long sipr = MMCRA_SIPR; 132 unsigned long sipr = MMCRA_SIPR;
124 133
134 /* Not a PMU interrupt: Make up flags from regs->msr */
125 if (TRAP(regs) != 0xf00) 135 if (TRAP(regs) != 0xf00)
126 return 0; /* not a PMU interrupt */ 136 return perf_flags_from_msr(regs);
137
138 /*
139 * If we don't support continuous sampling and this
140 * is not a marked event, same deal
141 */
142 if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) &&
143 !(mmcra & MMCRA_SAMPLE_ENABLE))
144 return perf_flags_from_msr(regs);
145
146 /*
147 * If we don't have flags in MMCRA, rather than using
148 * the MSR, we intuit the flags from the address in
149 * SIAR which should give slightly more reliable
150 * results
151 */
152 if (ppmu->flags & PPMU_NO_SIPR) {
153 unsigned long siar = mfspr(SPRN_SIAR);
154 if (siar >= PAGE_OFFSET)
155 return PERF_RECORD_MISC_KERNEL;
156 return PERF_RECORD_MISC_USER;
157 }
127 158
128 if (ppmu->flags & PPMU_ALT_SIPR) { 159 if (ppmu->flags & PPMU_ALT_SIPR) {
129 sihv = POWER6_MMCRA_SIHV; 160 sihv = POWER6_MMCRA_SIHV;
@@ -865,6 +896,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
865{ 896{
866 unsigned long flags; 897 unsigned long flags;
867 s64 left; 898 s64 left;
899 unsigned long val;
868 900
869 if (!event->hw.idx || !event->hw.sample_period) 901 if (!event->hw.idx || !event->hw.sample_period)
870 return; 902 return;
@@ -880,7 +912,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags)
880 912
881 event->hw.state = 0; 913 event->hw.state = 0;
882 left = local64_read(&event->hw.period_left); 914 left = local64_read(&event->hw.period_left);
883 write_pmc(event->hw.idx, left); 915
916 val = 0;
917 if (left < 0x80000000L)
918 val = 0x80000000L - left;
919
920 write_pmc(event->hw.idx, val);
884 921
885 perf_event_update_userpage(event); 922 perf_event_update_userpage(event);
886 perf_pmu_enable(event->pmu); 923 perf_pmu_enable(event->pmu);
@@ -1078,6 +1115,10 @@ static int power_pmu_event_init(struct perf_event *event)
1078 if (!ppmu) 1115 if (!ppmu)
1079 return -ENOENT; 1116 return -ENOENT;
1080 1117
1118 /* does not support taken branch sampling */
1119 if (has_branch_stack(event))
1120 return -EOPNOTSUPP;
1121
1081 switch (event->attr.type) { 1122 switch (event->attr.type) {
1082 case PERF_TYPE_HARDWARE: 1123 case PERF_TYPE_HARDWARE:
1083 ev = event->attr.config; 1124 ev = event->attr.config;
@@ -1187,6 +1228,11 @@ static int power_pmu_event_init(struct perf_event *event)
1187 return err; 1228 return err;
1188} 1229}
1189 1230
1231static int power_pmu_event_idx(struct perf_event *event)
1232{
1233 return event->hw.idx;
1234}
1235
1190struct pmu power_pmu = { 1236struct pmu power_pmu = {
1191 .pmu_enable = power_pmu_enable, 1237 .pmu_enable = power_pmu_enable,
1192 .pmu_disable = power_pmu_disable, 1238 .pmu_disable = power_pmu_disable,
@@ -1199,6 +1245,7 @@ struct pmu power_pmu = {
1199 .start_txn = power_pmu_start_txn, 1245 .start_txn = power_pmu_start_txn,
1200 .cancel_txn = power_pmu_cancel_txn, 1246 .cancel_txn = power_pmu_cancel_txn,
1201 .commit_txn = power_pmu_commit_txn, 1247 .commit_txn = power_pmu_commit_txn,
1248 .event_idx = power_pmu_event_idx,
1202}; 1249};
1203 1250
1204/* 1251/*
@@ -1283,13 +1330,18 @@ unsigned long perf_misc_flags(struct pt_regs *regs)
1283 */ 1330 */
1284unsigned long perf_instruction_pointer(struct pt_regs *regs) 1331unsigned long perf_instruction_pointer(struct pt_regs *regs)
1285{ 1332{
1286 unsigned long ip; 1333 unsigned long mmcra = regs->dsisr;
1287 1334
1335 /* Not a PMU interrupt */
1288 if (TRAP(regs) != 0xf00) 1336 if (TRAP(regs) != 0xf00)
1289 return regs->nip; /* not a PMU interrupt */ 1337 return regs->nip;
1338
1339 /* Processor doesn't support sampling non marked events */
1340 if ((ppmu->flags & PPMU_NO_CONT_SAMPLING) &&
1341 !(mmcra & MMCRA_SAMPLE_ENABLE))
1342 return regs->nip;
1290 1343
1291 ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs); 1344 return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1292 return ip;
1293} 1345}
1294 1346
1295static bool pmc_overflow(unsigned long val) 1347static bool pmc_overflow(unsigned long val)
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/perf/core-fsl-emb.c
index 0a6d2a9d569c..0a6d2a9d569c 100644
--- a/arch/powerpc/kernel/perf_event_fsl_emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/perf/e500-pmu.c
index cb2e2949c8d1..cb2e2949c8d1 100644
--- a/arch/powerpc/kernel/e500-pmu.c
+++ b/arch/powerpc/perf/e500-pmu.c
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/perf/mpc7450-pmu.c
index fe21b515ca44..fe21b515ca44 100644
--- a/arch/powerpc/kernel/mpc7450-pmu.c
+++ b/arch/powerpc/perf/mpc7450-pmu.c
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/perf/power4-pmu.c
index b4f1dda4d089..9103a1de864d 100644
--- a/arch/powerpc/kernel/power4-pmu.c
+++ b/arch/powerpc/perf/power4-pmu.c
@@ -607,6 +607,7 @@ static struct power_pmu power4_pmu = {
607 .n_generic = ARRAY_SIZE(p4_generic_events), 607 .n_generic = ARRAY_SIZE(p4_generic_events),
608 .generic_events = p4_generic_events, 608 .generic_events = p4_generic_events,
609 .cache_events = &power4_cache_events, 609 .cache_events = &power4_cache_events,
610 .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
610}; 611};
611 612
612static int __init init_power4_pmu(void) 613static int __init init_power4_pmu(void)
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/perf/power5+-pmu.c
index a8757baa28f3..a8757baa28f3 100644
--- a/arch/powerpc/kernel/power5+-pmu.c
+++ b/arch/powerpc/perf/power5+-pmu.c
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/perf/power5-pmu.c
index e7f06eb7a861..e7f06eb7a861 100644
--- a/arch/powerpc/kernel/power5-pmu.c
+++ b/arch/powerpc/perf/power5-pmu.c
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/perf/power6-pmu.c
index 0bbc901e7efc..31128e086fed 100644
--- a/arch/powerpc/kernel/power6-pmu.c
+++ b/arch/powerpc/perf/power6-pmu.c
@@ -131,7 +131,7 @@ static u32 marked_bus_events[16] = {
131 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */ 131 0x00000022, /* BFP set 2: byte 0 bits 1, 5 */
132 0, 0 132 0, 0
133}; 133};
134 134
135/* 135/*
136 * Returns 1 if event counts things relating to marked instructions 136 * Returns 1 if event counts things relating to marked instructions
137 * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not. 137 * and thus needs the MMCRA_SAMPLE_ENABLE bit set, or 0 if not.
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/perf/power7-pmu.c
index 1251e4d7e262..1251e4d7e262 100644
--- a/arch/powerpc/kernel/power7-pmu.c
+++ b/arch/powerpc/perf/power7-pmu.c
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/perf/ppc970-pmu.c
index 8c2190206964..20139ceeacf6 100644
--- a/arch/powerpc/kernel/ppc970-pmu.c
+++ b/arch/powerpc/perf/ppc970-pmu.c
@@ -252,7 +252,7 @@ static int p970_get_alternatives(u64 event, unsigned int flags, u64 alt[])
252 alt[1] = event ^ 0x1000; 252 alt[1] = event ^ 0x1000;
253 return 2; 253 return 2;
254 } 254 }
255 255
256 return 1; 256 return 1;
257} 257}
258 258
@@ -487,6 +487,7 @@ static struct power_pmu ppc970_pmu = {
487 .n_generic = ARRAY_SIZE(ppc970_generic_events), 487 .n_generic = ARRAY_SIZE(ppc970_generic_events),
488 .generic_events = ppc970_generic_events, 488 .generic_events = ppc970_generic_events,
489 .cache_events = &ppc970_cache_events, 489 .cache_events = &ppc970_cache_events,
490 .flags = PPMU_NO_SIPR | PPMU_NO_CONT_SAMPLING,
490}; 491};
491 492
492static int __init init_ppc970_pmu(void) 493static int __init init_ppc970_pmu(void)
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index fcf6bf2ceee9..2e4e64abfab4 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -23,6 +23,7 @@ config BLUESTONE
23 default n 23 default n
24 select PPC44x_SIMPLE 24 select PPC44x_SIMPLE
25 select APM821xx 25 select APM821xx
26 select PPC4xx_PCI_EXPRESS
26 select IBM_EMAC_RGMII 27 select IBM_EMAC_RGMII
27 help 28 help
28 This option enables support for the APM APM821xx Evaluation board. 29 This option enables support for the APM APM821xx Evaluation board.
diff --git a/arch/powerpc/platforms/44x/currituck.c b/arch/powerpc/platforms/44x/currituck.c
index 3f6229b5dee0..583e67fee37e 100644
--- a/arch/powerpc/platforms/44x/currituck.c
+++ b/arch/powerpc/platforms/44x/currituck.c
@@ -83,7 +83,7 @@ static void __init ppc47x_init_irq(void)
83 * device-tree, just pass 0 to all arguments 83 * device-tree, just pass 0 to all arguments
84 */ 84 */
85 struct mpic *mpic = 85 struct mpic *mpic =
86 mpic_alloc(np, 0, 0, 0, 0, " MPIC "); 86 mpic_alloc(np, 0, MPIC_NO_RESET, 0, 0, " MPIC ");
87 BUG_ON(mpic == NULL); 87 BUG_ON(mpic == NULL);
88 mpic_init(mpic); 88 mpic_init(mpic);
89 ppc_md.get_irq = mpic_get_irq; 89 ppc_md.get_irq = mpic_get_irq;
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c
index 5b8cdbb82f80..a28a8629727e 100644
--- a/arch/powerpc/platforms/44x/iss4xx.c
+++ b/arch/powerpc/platforms/44x/iss4xx.c
@@ -71,8 +71,7 @@ static void __init iss4xx_init_irq(void)
71 /* The MPIC driver will get everything it needs from the 71 /* The MPIC driver will get everything it needs from the
72 * device-tree, just pass 0 to all arguments 72 * device-tree, just pass 0 to all arguments
73 */ 73 */
74 struct mpic *mpic = mpic_alloc(np, 0, 0, 0, 0, 74 struct mpic *mpic = mpic_alloc(np, 0, MPIC_NO_RESET, 0, 0, " MPIC ");
75 " MPIC ");
76 BUG_ON(mpic == NULL); 75 BUG_ON(mpic == NULL);
77 mpic_init(mpic); 76 mpic_init(mpic);
78 ppc_md.get_irq = mpic_get_irq; 77 ppc_md.get_irq = mpic_get_irq;
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c
index 8d2202763415..3ffb915446e3 100644
--- a/arch/powerpc/platforms/44x/ppc44x_simple.c
+++ b/arch/powerpc/platforms/44x/ppc44x_simple.c
@@ -52,7 +52,7 @@ machine_device_initcall(ppc44x_simple, ppc44x_device_probe);
52static char *board[] __initdata = { 52static char *board[] __initdata = {
53 "amcc,arches", 53 "amcc,arches",
54 "amcc,bamboo", 54 "amcc,bamboo",
55 "amcc,bluestone", 55 "apm,bluestone",
56 "amcc,glacier", 56 "amcc,glacier",
57 "ibm,ebony", 57 "ibm,ebony",
58 "amcc,eiger", 58 "amcc,eiger",
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
index 9f09319352c0..ca3a062ed1b9 100644
--- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c
@@ -21,7 +21,7 @@
21#include <asm/prom.h> 21#include <asm/prom.h>
22 22
23static struct device_node *cpld_pic_node; 23static struct device_node *cpld_pic_node;
24static struct irq_host *cpld_pic_host; 24static struct irq_domain *cpld_pic_host;
25 25
26/* 26/*
27 * Bits to ignore in the misc_status register 27 * Bits to ignore in the misc_status register
@@ -123,13 +123,13 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc)
123} 123}
124 124
125static int 125static int
126cpld_pic_host_match(struct irq_host *h, struct device_node *node) 126cpld_pic_host_match(struct irq_domain *h, struct device_node *node)
127{ 127{
128 return cpld_pic_node == node; 128 return cpld_pic_node == node;
129} 129}
130 130
131static int 131static int
132cpld_pic_host_map(struct irq_host *h, unsigned int virq, 132cpld_pic_host_map(struct irq_domain *h, unsigned int virq,
133 irq_hw_number_t hw) 133 irq_hw_number_t hw)
134{ 134{
135 irq_set_status_flags(virq, IRQ_LEVEL); 135 irq_set_status_flags(virq, IRQ_LEVEL);
@@ -137,8 +137,7 @@ cpld_pic_host_map(struct irq_host *h, unsigned int virq,
137 return 0; 137 return 0;
138} 138}
139 139
140static struct 140static const struct irq_domain_ops cpld_pic_host_ops = {
141irq_host_ops cpld_pic_host_ops = {
142 .match = cpld_pic_host_match, 141 .match = cpld_pic_host_match,
143 .map = cpld_pic_host_map, 142 .map = cpld_pic_host_map,
144}; 143};
@@ -191,8 +190,7 @@ mpc5121_ads_cpld_pic_init(void)
191 190
192 cpld_pic_node = of_node_get(np); 191 cpld_pic_node = of_node_get(np);
193 192
194 cpld_pic_host = 193 cpld_pic_host = irq_domain_add_linear(np, 16, &cpld_pic_host_ops, NULL);
195 irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 16, &cpld_pic_host_ops, 16);
196 if (!cpld_pic_host) { 194 if (!cpld_pic_host) {
197 printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n"); 195 printk(KERN_ERR "CPLD PIC: failed to allocate irq host!\n");
198 goto end; 196 goto end;
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c
index 96f85e5e0cd3..17d91b7da315 100644
--- a/arch/powerpc/platforms/52xx/media5200.c
+++ b/arch/powerpc/platforms/52xx/media5200.c
@@ -45,7 +45,7 @@ static struct of_device_id mpc5200_gpio_ids[] __initdata = {
45struct media5200_irq { 45struct media5200_irq {
46 void __iomem *regs; 46 void __iomem *regs;
47 spinlock_t lock; 47 spinlock_t lock;
48 struct irq_host *irqhost; 48 struct irq_domain *irqhost;
49}; 49};
50struct media5200_irq media5200_irq; 50struct media5200_irq media5200_irq;
51 51
@@ -112,7 +112,7 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc)
112 raw_spin_unlock(&desc->lock); 112 raw_spin_unlock(&desc->lock);
113} 113}
114 114
115static int media5200_irq_map(struct irq_host *h, unsigned int virq, 115static int media5200_irq_map(struct irq_domain *h, unsigned int virq,
116 irq_hw_number_t hw) 116 irq_hw_number_t hw)
117{ 117{
118 pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); 118 pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw);
@@ -122,7 +122,7 @@ static int media5200_irq_map(struct irq_host *h, unsigned int virq,
122 return 0; 122 return 0;
123} 123}
124 124
125static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct, 125static int media5200_irq_xlate(struct irq_domain *h, struct device_node *ct,
126 const u32 *intspec, unsigned int intsize, 126 const u32 *intspec, unsigned int intsize,
127 irq_hw_number_t *out_hwirq, 127 irq_hw_number_t *out_hwirq,
128 unsigned int *out_flags) 128 unsigned int *out_flags)
@@ -136,7 +136,7 @@ static int media5200_irq_xlate(struct irq_host *h, struct device_node *ct,
136 return 0; 136 return 0;
137} 137}
138 138
139static struct irq_host_ops media5200_irq_ops = { 139static const struct irq_domain_ops media5200_irq_ops = {
140 .map = media5200_irq_map, 140 .map = media5200_irq_map,
141 .xlate = media5200_irq_xlate, 141 .xlate = media5200_irq_xlate,
142}; 142};
@@ -173,15 +173,12 @@ static void __init media5200_init_irq(void)
173 173
174 spin_lock_init(&media5200_irq.lock); 174 spin_lock_init(&media5200_irq.lock);
175 175
176 media5200_irq.irqhost = irq_alloc_host(fpga_np, IRQ_HOST_MAP_LINEAR, 176 media5200_irq.irqhost = irq_domain_add_linear(fpga_np,
177 MEDIA5200_NUM_IRQS, 177 MEDIA5200_NUM_IRQS, &media5200_irq_ops, &media5200_irq);
178 &media5200_irq_ops, -1);
179 if (!media5200_irq.irqhost) 178 if (!media5200_irq.irqhost)
180 goto out; 179 goto out;
181 pr_debug("%s: allocated irqhost\n", __func__); 180 pr_debug("%s: allocated irqhost\n", __func__);
182 181
183 media5200_irq.irqhost->host_data = &media5200_irq;
184
185 irq_set_handler_data(cascade_virq, &media5200_irq); 182 irq_set_handler_data(cascade_virq, &media5200_irq);
186 irq_set_chained_handler(cascade_virq, media5200_irq_cascade); 183 irq_set_chained_handler(cascade_virq, media5200_irq_cascade);
187 184
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c
index 846b789fb195..c0aa04068d69 100644
--- a/arch/powerpc/platforms/52xx/mpc5200_simple.c
+++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c
@@ -50,6 +50,7 @@ static void __init mpc5200_simple_setup_arch(void)
50 50
51/* list of the supported boards */ 51/* list of the supported boards */
52static const char *board[] __initdata = { 52static const char *board[] __initdata = {
53 "anonymous,a4m072",
53 "anon,charon", 54 "anon,charon",
54 "intercontrol,digsy-mtc", 55 "intercontrol,digsy-mtc",
55 "manroland,mucmc52", 56 "manroland,mucmc52",
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_common.c b/arch/powerpc/platforms/52xx/mpc52xx_common.c
index 369fd5457a3f..d7e94f49532a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_common.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_common.c
@@ -98,13 +98,11 @@ struct mpc52xx_gpio_wkup __iomem *wkup_gpio;
98 * of the localplus bus to the of_platform 98 * of the localplus bus to the of_platform
99 * bus. 99 * bus.
100 */ 100 */
101void __init 101void __init mpc52xx_declare_of_platform_devices(void)
102mpc52xx_declare_of_platform_devices(void)
103{ 102{
104 /* Find every child of the SOC node and add it to of_platform */ 103 /* Find all the 'platform' devices and register them. */
105 if (of_platform_bus_probe(NULL, mpc52xx_bus_ids, NULL)) 104 if (of_platform_populate(NULL, mpc52xx_bus_ids, NULL, NULL))
106 printk(KERN_ERR __FILE__ ": " 105 pr_err(__FILE__ ": Error while populating devices from DT\n");
107 "Error while probing of_platform bus\n");
108} 106}
109 107
110/* 108/*
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
index f94f06e52762..028470b95886 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c
@@ -81,7 +81,7 @@ MODULE_LICENSE("GPL");
81 * @regs: virtual address of GPT registers 81 * @regs: virtual address of GPT registers
82 * @lock: spinlock to coordinate between different functions. 82 * @lock: spinlock to coordinate between different functions.
83 * @gc: gpio_chip instance structure; used when GPIO is enabled 83 * @gc: gpio_chip instance structure; used when GPIO is enabled
84 * @irqhost: Pointer to irq_host instance; used when IRQ mode is supported 84 * @irqhost: Pointer to irq_domain instance; used when IRQ mode is supported
85 * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates 85 * @wdt_mode: only relevant for gpt0: bit 0 (MPC52xx_GPT_CAN_WDT) indicates
86 * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates 86 * if the gpt may be used as wdt, bit 1 (MPC52xx_GPT_IS_WDT) indicates
87 * if the timer is actively used as wdt which blocks gpt functions 87 * if the timer is actively used as wdt which blocks gpt functions
@@ -91,7 +91,7 @@ struct mpc52xx_gpt_priv {
91 struct device *dev; 91 struct device *dev;
92 struct mpc52xx_gpt __iomem *regs; 92 struct mpc52xx_gpt __iomem *regs;
93 spinlock_t lock; 93 spinlock_t lock;
94 struct irq_host *irqhost; 94 struct irq_domain *irqhost;
95 u32 ipb_freq; 95 u32 ipb_freq;
96 u8 wdt_mode; 96 u8 wdt_mode;
97 97
@@ -204,7 +204,7 @@ void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc)
204 } 204 }
205} 205}
206 206
207static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, 207static int mpc52xx_gpt_irq_map(struct irq_domain *h, unsigned int virq,
208 irq_hw_number_t hw) 208 irq_hw_number_t hw)
209{ 209{
210 struct mpc52xx_gpt_priv *gpt = h->host_data; 210 struct mpc52xx_gpt_priv *gpt = h->host_data;
@@ -216,7 +216,7 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq,
216 return 0; 216 return 0;
217} 217}
218 218
219static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct, 219static int mpc52xx_gpt_irq_xlate(struct irq_domain *h, struct device_node *ct,
220 const u32 *intspec, unsigned int intsize, 220 const u32 *intspec, unsigned int intsize,
221 irq_hw_number_t *out_hwirq, 221 irq_hw_number_t *out_hwirq,
222 unsigned int *out_flags) 222 unsigned int *out_flags)
@@ -236,7 +236,7 @@ static int mpc52xx_gpt_irq_xlate(struct irq_host *h, struct device_node *ct,
236 return 0; 236 return 0;
237} 237}
238 238
239static struct irq_host_ops mpc52xx_gpt_irq_ops = { 239static const struct irq_domain_ops mpc52xx_gpt_irq_ops = {
240 .map = mpc52xx_gpt_irq_map, 240 .map = mpc52xx_gpt_irq_map,
241 .xlate = mpc52xx_gpt_irq_xlate, 241 .xlate = mpc52xx_gpt_irq_xlate,
242}; 242};
@@ -252,14 +252,12 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node)
252 if (!cascade_virq) 252 if (!cascade_virq)
253 return; 253 return;
254 254
255 gpt->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 1, 255 gpt->irqhost = irq_domain_add_linear(node, 1, &mpc52xx_gpt_irq_ops, gpt);
256 &mpc52xx_gpt_irq_ops, -1);
257 if (!gpt->irqhost) { 256 if (!gpt->irqhost) {
258 dev_err(gpt->dev, "irq_alloc_host() failed\n"); 257 dev_err(gpt->dev, "irq_domain_add_linear() failed\n");
259 return; 258 return;
260 } 259 }
261 260
262 gpt->irqhost->host_data = gpt;
263 irq_set_handler_data(cascade_virq, gpt); 261 irq_set_handler_data(cascade_virq, gpt);
264 irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); 262 irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade);
265 263
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
index 1a9a49570579..8520b58a5e9a 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c
@@ -132,7 +132,7 @@ static struct of_device_id mpc52xx_sdma_ids[] __initdata = {
132 132
133static struct mpc52xx_intr __iomem *intr; 133static struct mpc52xx_intr __iomem *intr;
134static struct mpc52xx_sdma __iomem *sdma; 134static struct mpc52xx_sdma __iomem *sdma;
135static struct irq_host *mpc52xx_irqhost = NULL; 135static struct irq_domain *mpc52xx_irqhost = NULL;
136 136
137static unsigned char mpc52xx_map_senses[4] = { 137static unsigned char mpc52xx_map_senses[4] = {
138 IRQ_TYPE_LEVEL_HIGH, 138 IRQ_TYPE_LEVEL_HIGH,
@@ -301,7 +301,7 @@ static int mpc52xx_is_extirq(int l1, int l2)
301/** 301/**
302 * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property 302 * mpc52xx_irqhost_xlate - translate virq# from device tree interrupts property
303 */ 303 */
304static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct, 304static int mpc52xx_irqhost_xlate(struct irq_domain *h, struct device_node *ct,
305 const u32 *intspec, unsigned int intsize, 305 const u32 *intspec, unsigned int intsize,
306 irq_hw_number_t *out_hwirq, 306 irq_hw_number_t *out_hwirq,
307 unsigned int *out_flags) 307 unsigned int *out_flags)
@@ -335,7 +335,7 @@ static int mpc52xx_irqhost_xlate(struct irq_host *h, struct device_node *ct,
335/** 335/**
336 * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure 336 * mpc52xx_irqhost_map - Hook to map from virq to an irq_chip structure
337 */ 337 */
338static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, 338static int mpc52xx_irqhost_map(struct irq_domain *h, unsigned int virq,
339 irq_hw_number_t irq) 339 irq_hw_number_t irq)
340{ 340{
341 int l1irq; 341 int l1irq;
@@ -384,7 +384,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq,
384 return 0; 384 return 0;
385} 385}
386 386
387static struct irq_host_ops mpc52xx_irqhost_ops = { 387static const struct irq_domain_ops mpc52xx_irqhost_ops = {
388 .xlate = mpc52xx_irqhost_xlate, 388 .xlate = mpc52xx_irqhost_xlate,
389 .map = mpc52xx_irqhost_map, 389 .map = mpc52xx_irqhost_map,
390}; 390};
@@ -444,9 +444,9 @@ void __init mpc52xx_init_irq(void)
444 * As last step, add an irq host to translate the real 444 * As last step, add an irq host to translate the real
445 * hw irq information provided by the ofw to linux virq 445 * hw irq information provided by the ofw to linux virq
446 */ 446 */
447 mpc52xx_irqhost = irq_alloc_host(picnode, IRQ_HOST_MAP_LINEAR, 447 mpc52xx_irqhost = irq_domain_add_linear(picnode,
448 MPC52xx_IRQ_HIGHTESTHWIRQ, 448 MPC52xx_IRQ_HIGHTESTHWIRQ,
449 &mpc52xx_irqhost_ops, -1); 449 &mpc52xx_irqhost_ops, NULL);
450 450
451 if (!mpc52xx_irqhost) 451 if (!mpc52xx_irqhost)
452 panic(__FILE__ ": Cannot allocate the IRQ host\n"); 452 panic(__FILE__ ": Cannot allocate the IRQ host\n");
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
index 8ccf9ed62fe2..328d221fd1c0 100644
--- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
+++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c
@@ -29,7 +29,7 @@ static DEFINE_RAW_SPINLOCK(pci_pic_lock);
29 29
30struct pq2ads_pci_pic { 30struct pq2ads_pci_pic {
31 struct device_node *node; 31 struct device_node *node;
32 struct irq_host *host; 32 struct irq_domain *host;
33 33
34 struct { 34 struct {
35 u32 stat; 35 u32 stat;
@@ -103,7 +103,7 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc)
103 } 103 }
104} 104}
105 105
106static int pci_pic_host_map(struct irq_host *h, unsigned int virq, 106static int pci_pic_host_map(struct irq_domain *h, unsigned int virq,
107 irq_hw_number_t hw) 107 irq_hw_number_t hw)
108{ 108{
109 irq_set_status_flags(virq, IRQ_LEVEL); 109 irq_set_status_flags(virq, IRQ_LEVEL);
@@ -112,14 +112,14 @@ static int pci_pic_host_map(struct irq_host *h, unsigned int virq,
112 return 0; 112 return 0;
113} 113}
114 114
115static struct irq_host_ops pci_pic_host_ops = { 115static const struct irq_domain_ops pci_pic_host_ops = {
116 .map = pci_pic_host_map, 116 .map = pci_pic_host_map,
117}; 117};
118 118
119int __init pq2ads_pci_init_irq(void) 119int __init pq2ads_pci_init_irq(void)
120{ 120{
121 struct pq2ads_pci_pic *priv; 121 struct pq2ads_pci_pic *priv;
122 struct irq_host *host; 122 struct irq_domain *host;
123 struct device_node *np; 123 struct device_node *np;
124 int ret = -ENODEV; 124 int ret = -ENODEV;
125 int irq; 125 int irq;
@@ -156,17 +156,13 @@ int __init pq2ads_pci_init_irq(void)
156 out_be32(&priv->regs->mask, ~0); 156 out_be32(&priv->regs->mask, ~0);
157 mb(); 157 mb();
158 158
159 host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, NUM_IRQS, 159 host = irq_domain_add_linear(np, NUM_IRQS, &pci_pic_host_ops, priv);
160 &pci_pic_host_ops, NUM_IRQS);
161 if (!host) { 160 if (!host) {
162 ret = -ENOMEM; 161 ret = -ENOMEM;
163 goto out_unmap_regs; 162 goto out_unmap_regs;
164 } 163 }
165 164
166 host->host_data = priv;
167
168 priv->host = host; 165 priv->host = host;
169 host->host_data = priv;
170 irq_set_handler_data(irq, priv); 166 irq_set_handler_data(irq, priv);
171 irq_set_chained_handler(irq, pq2ads_pci_irq_demux); 167 irq_set_chained_handler(irq, pq2ads_pci_irq_demux);
172 168
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig
index d7946be298b6..f000d81c4e31 100644
--- a/arch/powerpc/platforms/85xx/Kconfig
+++ b/arch/powerpc/platforms/85xx/Kconfig
@@ -6,6 +6,7 @@ menuconfig FSL_SOC_BOOKE
6 select MPIC 6 select MPIC
7 select PPC_PCI_CHOICE 7 select PPC_PCI_CHOICE
8 select FSL_PCI if PCI 8 select FSL_PCI if PCI
9 select SERIAL_8250_EXTENDED if SERIAL_8250
9 select SERIAL_8250_SHARE_IRQ if SERIAL_8250 10 select SERIAL_8250_SHARE_IRQ if SERIAL_8250
10 default y 11 default y
11 12
@@ -13,6 +14,15 @@ if FSL_SOC_BOOKE
13 14
14if PPC32 15if PPC32
15 16
17config FSL_85XX_CACHE_SRAM
18 bool
19 select PPC_LIB_RHEAP
20 help
21 When selected, this option enables cache-sram support
22 for memory allocation on P1/P2 QorIQ platforms.
23 cache-sram-size and cache-sram-offset kernel boot
24 parameters should be passed when this option is enabled.
25
16config MPC8540_ADS 26config MPC8540_ADS
17 bool "Freescale MPC8540 ADS" 27 bool "Freescale MPC8540 ADS"
18 select DEFAULT_UIMAGE 28 select DEFAULT_UIMAGE
@@ -30,6 +40,7 @@ config MPC85xx_CDS
30 bool "Freescale MPC85xx CDS" 40 bool "Freescale MPC85xx CDS"
31 select DEFAULT_UIMAGE 41 select DEFAULT_UIMAGE
32 select PPC_I8259 42 select PPC_I8259
43 select HAS_RAPIDIO
33 help 44 help
34 This option enables support for the MPC85xx CDS board 45 This option enables support for the MPC85xx CDS board
35 46
@@ -80,7 +91,6 @@ config P1010_RDB
80config P1022_DS 91config P1022_DS
81 bool "Freescale P1022 DS" 92 bool "Freescale P1022 DS"
82 select DEFAULT_UIMAGE 93 select DEFAULT_UIMAGE
83 select PHYS_64BIT # The DTS has 36-bit addresses
84 select SWIOTLB 94 select SWIOTLB
85 help 95 help
86 This option enables support for the Freescale P1022DS reference board. 96 This option enables support for the Freescale P1022DS reference board.
@@ -171,6 +181,21 @@ config SBC8560
171 help 181 help
172 This option enables support for the Wind River SBC8560 board 182 This option enables support for the Wind River SBC8560 board
173 183
184config GE_IMP3A
185 bool "GE Intelligent Platforms IMP3A"
186 select DEFAULT_UIMAGE
187 select SWIOTLB
188 select MMIO_NVRAM
189 select GENERIC_GPIO
190 select ARCH_REQUIRE_GPIOLIB
191 select GE_FPGA
192 help
193 This option enables support for the GE Intelligent Platforms IMP3A
194 board.
195
196 This board is a 3U CompactPCI Single Board Computer with a Freescale
197 P2020 processor.
198
174config P2041_RDB 199config P2041_RDB
175 bool "Freescale P2041 RDB" 200 bool "Freescale P2041 RDB"
176 select DEFAULT_UIMAGE 201 select DEFAULT_UIMAGE
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile
index 9cb2d4320dcc..2125d4ca068a 100644
--- a/arch/powerpc/platforms/85xx/Makefile
+++ b/arch/powerpc/platforms/85xx/Makefile
@@ -27,3 +27,4 @@ obj-$(CONFIG_SBC8548) += sbc8548.o
27obj-$(CONFIG_SOCRATES) += socrates.o socrates_fpga_pic.o 27obj-$(CONFIG_SOCRATES) += socrates.o socrates_fpga_pic.o
28obj-$(CONFIG_KSI8560) += ksi8560.o 28obj-$(CONFIG_KSI8560) += ksi8560.o
29obj-$(CONFIG_XES_MPC85xx) += xes_mpc85xx.o 29obj-$(CONFIG_XES_MPC85xx) += xes_mpc85xx.o
30obj-$(CONFIG_GE_IMP3A) += ge_imp3a.o
diff --git a/arch/powerpc/platforms/85xx/corenet_ds.c b/arch/powerpc/platforms/85xx/corenet_ds.c
index 07e3e6c47371..df69e99e511c 100644
--- a/arch/powerpc/platforms/85xx/corenet_ds.c
+++ b/arch/powerpc/platforms/85xx/corenet_ds.c
@@ -36,8 +36,8 @@
36void __init corenet_ds_pic_init(void) 36void __init corenet_ds_pic_init(void)
37{ 37{
38 struct mpic *mpic; 38 struct mpic *mpic;
39 unsigned int flags = MPIC_BIG_ENDIAN | 39 unsigned int flags = MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU |
40 MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU; 40 MPIC_NO_RESET;
41 41
42 if (ppc_md.get_irq == mpic_get_coreint_irq) 42 if (ppc_md.get_irq == mpic_get_coreint_irq)
43 flags |= MPIC_ENABLE_COREINT; 43 flags |= MPIC_ENABLE_COREINT;
diff --git a/arch/powerpc/platforms/85xx/ge_imp3a.c b/arch/powerpc/platforms/85xx/ge_imp3a.c
new file mode 100644
index 000000000000..d50056f424f6
--- /dev/null
+++ b/arch/powerpc/platforms/85xx/ge_imp3a.c
@@ -0,0 +1,246 @@
1/*
2 * GE IMP3A Board Setup
3 *
4 * Author Martyn Welch <martyn.welch@ge.com>
5 *
6 * Copyright 2010 GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 * Based on: mpc85xx_ds.c (MPC85xx DS Board Setup)
14 * Copyright 2007 Freescale Semiconductor Inc.
15 */
16
17#include <linux/stddef.h>
18#include <linux/kernel.h>
19#include <linux/pci.h>
20#include <linux/kdev_t.h>
21#include <linux/delay.h>
22#include <linux/seq_file.h>
23#include <linux/interrupt.h>
24#include <linux/of_platform.h>
25#include <linux/memblock.h>
26
27#include <asm/system.h>
28#include <asm/time.h>
29#include <asm/machdep.h>
30#include <asm/pci-bridge.h>
31#include <mm/mmu_decl.h>
32#include <asm/prom.h>
33#include <asm/udbg.h>
34#include <asm/mpic.h>
35#include <asm/swiotlb.h>
36#include <asm/nvram.h>
37
38#include <sysdev/fsl_soc.h>
39#include <sysdev/fsl_pci.h>
40#include "smp.h"
41
42#include "mpc85xx.h"
43#include <sysdev/ge/ge_pic.h>
44
45void __iomem *imp3a_regs;
46
47void __init ge_imp3a_pic_init(void)
48{
49 struct mpic *mpic;
50 struct device_node *np;
51 struct device_node *cascade_node = NULL;
52 unsigned long root = of_get_flat_dt_root();
53
54 if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
55 mpic = mpic_alloc(NULL, 0,
56 MPIC_NO_RESET |
57 MPIC_BIG_ENDIAN |
58 MPIC_SINGLE_DEST_CPU,
59 0, 256, " OpenPIC ");
60 } else {
61 mpic = mpic_alloc(NULL, 0,
62 MPIC_BIG_ENDIAN |
63 MPIC_SINGLE_DEST_CPU,
64 0, 256, " OpenPIC ");
65 }
66
67 BUG_ON(mpic == NULL);
68 mpic_init(mpic);
69 /*
70 * There is a simple interrupt handler in the main FPGA, this needs
71 * to be cascaded into the MPIC
72 */
73 for_each_node_by_type(np, "interrupt-controller")
74 if (of_device_is_compatible(np, "gef,fpga-pic-1.00")) {
75 cascade_node = np;
76 break;
77 }
78
79 if (cascade_node == NULL) {
80 printk(KERN_WARNING "IMP3A: No FPGA PIC\n");
81 return;
82 }
83
84 gef_pic_init(cascade_node);
85 of_node_put(cascade_node);
86}
87
88#ifdef CONFIG_PCI
89static int primary_phb_addr;
90#endif /* CONFIG_PCI */
91
92/*
93 * Setup the architecture
94 */
95static void __init ge_imp3a_setup_arch(void)
96{
97 struct device_node *regs;
98#ifdef CONFIG_PCI
99 struct device_node *np;
100 struct pci_controller *hose;
101#endif
102 dma_addr_t max = 0xffffffff;
103
104 if (ppc_md.progress)
105 ppc_md.progress("ge_imp3a_setup_arch()", 0);
106
107#ifdef CONFIG_PCI
108 for_each_node_by_type(np, "pci") {
109 if (of_device_is_compatible(np, "fsl,mpc8540-pci") ||
110 of_device_is_compatible(np, "fsl,mpc8548-pcie") ||
111 of_device_is_compatible(np, "fsl,p2020-pcie")) {
112 struct resource rsrc;
113 of_address_to_resource(np, 0, &rsrc);
114 if ((rsrc.start & 0xfffff) == primary_phb_addr)
115 fsl_add_bridge(np, 1);
116 else
117 fsl_add_bridge(np, 0);
118
119 hose = pci_find_hose_for_OF_device(np);
120 max = min(max, hose->dma_window_base_cur +
121 hose->dma_window_size);
122 }
123 }
124#endif
125
126 mpc85xx_smp_init();
127
128#ifdef CONFIG_SWIOTLB
129 if (memblock_end_of_DRAM() > max) {
130 ppc_swiotlb_enable = 1;
131 set_pci_dma_ops(&swiotlb_dma_ops);
132 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_swiotlb;
133 }
134#endif
135
136 /* Remap basic board registers */
137 regs = of_find_compatible_node(NULL, NULL, "ge,imp3a-fpga-regs");
138 if (regs) {
139 imp3a_regs = of_iomap(regs, 0);
140 if (imp3a_regs == NULL)
141 printk(KERN_WARNING "Unable to map board registers\n");
142 of_node_put(regs);
143 }
144
145#if defined(CONFIG_MMIO_NVRAM)
146 mmio_nvram_init();
147#endif
148
149 printk(KERN_INFO "GE Intelligent Platforms IMP3A 3U cPCI SBC\n");
150}
151
152/* Return the PCB revision */
153static unsigned int ge_imp3a_get_pcb_rev(void)
154{
155 unsigned int reg;
156
157 reg = ioread16(imp3a_regs);
158 return (reg >> 8) & 0xff;
159}
160
161/* Return the board (software) revision */
162static unsigned int ge_imp3a_get_board_rev(void)
163{
164 unsigned int reg;
165
166 reg = ioread16(imp3a_regs + 0x2);
167 return reg & 0xff;
168}
169
170/* Return the FPGA revision */
171static unsigned int ge_imp3a_get_fpga_rev(void)
172{
173 unsigned int reg;
174
175 reg = ioread16(imp3a_regs + 0x2);
176 return (reg >> 8) & 0xff;
177}
178
179/* Return compactPCI Geographical Address */
180static unsigned int ge_imp3a_get_cpci_geo_addr(void)
181{
182 unsigned int reg;
183
184 reg = ioread16(imp3a_regs + 0x6);
185 return (reg & 0x0f00) >> 8;
186}
187
188/* Return compactPCI System Controller Status */
189static unsigned int ge_imp3a_get_cpci_is_syscon(void)
190{
191 unsigned int reg;
192
193 reg = ioread16(imp3a_regs + 0x6);
194 return reg & (1 << 12);
195}
196
197static void ge_imp3a_show_cpuinfo(struct seq_file *m)
198{
199 seq_printf(m, "Vendor\t\t: GE Intelligent Platforms\n");
200
201 seq_printf(m, "Revision\t: %u%c\n", ge_imp3a_get_pcb_rev(),
202 ('A' + ge_imp3a_get_board_rev() - 1));
203
204 seq_printf(m, "FPGA Revision\t: %u\n", ge_imp3a_get_fpga_rev());
205
206 seq_printf(m, "cPCI geo. addr\t: %u\n", ge_imp3a_get_cpci_geo_addr());
207
208 seq_printf(m, "cPCI syscon\t: %s\n",
209 ge_imp3a_get_cpci_is_syscon() ? "yes" : "no");
210}
211
212/*
213 * Called very early, device-tree isn't unflattened
214 */
215static int __init ge_imp3a_probe(void)
216{
217 unsigned long root = of_get_flat_dt_root();
218
219 if (of_flat_dt_is_compatible(root, "ge,IMP3A")) {
220#ifdef CONFIG_PCI
221 primary_phb_addr = 0x9000;
222#endif
223 return 1;
224 }
225
226 return 0;
227}
228
229machine_device_initcall(ge_imp3a, mpc85xx_common_publish_devices);
230
231machine_arch_initcall(ge_imp3a, swiotlb_setup_bus_notifier);
232
233define_machine(ge_imp3a) {
234 .name = "GE_IMP3A",
235 .probe = ge_imp3a_probe,
236 .setup_arch = ge_imp3a_setup_arch,
237 .init_IRQ = ge_imp3a_pic_init,
238 .show_cpuinfo = ge_imp3a_show_cpuinfo,
239#ifdef CONFIG_PCI
240 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
241#endif
242 .get_irq = mpic_get_irq,
243 .restart = fsl_rstcr_restart,
244 .calibrate_decr = generic_calibrate_decr,
245 .progress = udbg_progress,
246};
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c
index 20f75d7819c6..60120e55da41 100644
--- a/arch/powerpc/platforms/85xx/ksi8560.c
+++ b/arch/powerpc/platforms/85xx/ksi8560.c
@@ -57,8 +57,7 @@ static void machine_restart(char *cmd)
57 57
58static void __init ksi8560_pic_init(void) 58static void __init ksi8560_pic_init(void)
59{ 59{
60 struct mpic *mpic = mpic_alloc(NULL, 0, 60 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
61 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
62 0, 256, " OpenPIC "); 61 0, 256, " OpenPIC ");
63 BUG_ON(mpic == NULL); 62 BUG_ON(mpic == NULL);
64 mpic_init(mpic); 63 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/mpc8536_ds.c b/arch/powerpc/platforms/85xx/mpc8536_ds.c
index cf266826682e..f58872688d8f 100644
--- a/arch/powerpc/platforms/85xx/mpc8536_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc8536_ds.c
@@ -36,9 +36,7 @@
36 36
37void __init mpc8536_ds_pic_init(void) 37void __init mpc8536_ds_pic_init(void)
38{ 38{
39 struct mpic *mpic = mpic_alloc(NULL, 0, 39 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
40 MPIC_WANTS_RESET |
41 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
42 0, 256, " OpenPIC "); 40 0, 256, " OpenPIC ");
43 BUG_ON(mpic == NULL); 41 BUG_ON(mpic == NULL);
44 mpic_init(mpic); 42 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
index 3bebb5173bfc..d19f675cb369 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c
@@ -50,8 +50,7 @@ static int mpc85xx_exclude_device(struct pci_controller *hose,
50 50
51static void __init mpc85xx_ads_pic_init(void) 51static void __init mpc85xx_ads_pic_init(void)
52{ 52{
53 struct mpic *mpic = mpic_alloc(NULL, 0, 53 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
54 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
55 0, 256, " OpenPIC "); 54 0, 256, " OpenPIC ");
56 BUG_ON(mpic == NULL); 55 BUG_ON(mpic == NULL);
57 mpic_init(mpic); 56 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
index 40f03da616a9..ab5f0bf19454 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c
@@ -3,7 +3,7 @@
3 * 3 *
4 * Maintained by Kumar Gala (see MAINTAINERS for contact information) 4 * Maintained by Kumar Gala (see MAINTAINERS for contact information)
5 * 5 *
6 * Copyright 2005 Freescale Semiconductor Inc. 6 * Copyright 2005, 2011-2012 Freescale Semiconductor Inc.
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify it 8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the 9 * under the terms of the GNU General Public License as published by the
@@ -48,17 +48,24 @@
48 48
49#include "mpc85xx.h" 49#include "mpc85xx.h"
50 50
51/* CADMUS info */ 51/*
52/* xxx - galak, move into device tree */ 52 * The CDS board contains an FPGA/CPLD called "Cadmus", which collects
53#define CADMUS_BASE (0xf8004000) 53 * various logic and performs system control functions.
54#define CADMUS_SIZE (256) 54 * Here is the FPGA/CPLD register map.
55#define CM_VER (0) 55 */
56#define CM_CSR (1) 56struct cadmus_reg {
57#define CM_RST (2) 57 u8 cm_ver; /* Board version */
58 58 u8 cm_csr; /* General control/status */
59 u8 cm_rst; /* Reset control */
60 u8 cm_hsclk; /* High speed clock */
61 u8 cm_hsxclk; /* High speed clock extended */
62 u8 cm_led; /* LED data */
63 u8 cm_pci; /* PCI control/status */
64 u8 cm_dma; /* DMA control */
65 u8 res[248]; /* Total 256 bytes */
66};
59 67
60static int cds_pci_slot = 2; 68static struct cadmus_reg *cadmus;
61static volatile u8 *cadmus;
62 69
63#ifdef CONFIG_PCI 70#ifdef CONFIG_PCI
64 71
@@ -158,6 +165,33 @@ DECLARE_PCI_FIXUP_EARLY(0x1957, 0x3fff, skip_fake_bridge);
158DECLARE_PCI_FIXUP_EARLY(0x3fff, 0x1957, skip_fake_bridge); 165DECLARE_PCI_FIXUP_EARLY(0x3fff, 0x1957, skip_fake_bridge);
159DECLARE_PCI_FIXUP_EARLY(0xff3f, 0x5719, skip_fake_bridge); 166DECLARE_PCI_FIXUP_EARLY(0xff3f, 0x5719, skip_fake_bridge);
160 167
168#define PCI_DEVICE_ID_IDT_TSI310 0x01a7
169
170/*
171 * Fix Tsi310 PCI-X bridge resource.
172 * Force the bridge to open a window from 0x0000-0x1fff in PCI I/O space.
173 * This allows legacy I/O(i8259, etc) on the VIA southbridge to be accessed.
174 */
175void mpc85xx_cds_fixup_bus(struct pci_bus *bus)
176{
177 struct pci_dev *dev = bus->self;
178 struct resource *res = bus->resource[0];
179
180 if (dev != NULL &&
181 dev->vendor == PCI_VENDOR_ID_IBM &&
182 dev->device == PCI_DEVICE_ID_IDT_TSI310) {
183 if (res) {
184 res->start = 0;
185 res->end = 0x1fff;
186 res->flags = IORESOURCE_IO;
187 pr_info("mpc85xx_cds: PCI bridge resource fixup applied\n");
188 pr_info("mpc85xx_cds: %pR\n", res);
189 }
190 }
191
192 fsl_pcibios_fixup_bus(bus);
193}
194
161#ifdef CONFIG_PPC_I8259 195#ifdef CONFIG_PPC_I8259
162static void mpc85xx_8259_cascade_handler(unsigned int irq, 196static void mpc85xx_8259_cascade_handler(unsigned int irq,
163 struct irq_desc *desc) 197 struct irq_desc *desc)
@@ -188,8 +222,7 @@ static struct irqaction mpc85xxcds_8259_irqaction = {
188static void __init mpc85xx_cds_pic_init(void) 222static void __init mpc85xx_cds_pic_init(void)
189{ 223{
190 struct mpic *mpic; 224 struct mpic *mpic;
191 mpic = mpic_alloc(NULL, 0, 225 mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
192 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
193 0, 256, " OpenPIC "); 226 0, 256, " OpenPIC ");
194 BUG_ON(mpic == NULL); 227 BUG_ON(mpic == NULL);
195 mpic_init(mpic); 228 mpic_init(mpic);
@@ -249,20 +282,30 @@ machine_device_initcall(mpc85xx_cds, mpc85xx_cds_8259_attach);
249 */ 282 */
250static void __init mpc85xx_cds_setup_arch(void) 283static void __init mpc85xx_cds_setup_arch(void)
251{ 284{
252#ifdef CONFIG_PCI
253 struct device_node *np; 285 struct device_node *np;
254#endif 286 int cds_pci_slot;
255 287
256 if (ppc_md.progress) 288 if (ppc_md.progress)
257 ppc_md.progress("mpc85xx_cds_setup_arch()", 0); 289 ppc_md.progress("mpc85xx_cds_setup_arch()", 0);
258 290
259 cadmus = ioremap(CADMUS_BASE, CADMUS_SIZE); 291 np = of_find_compatible_node(NULL, NULL, "fsl,mpc8548cds-fpga");
260 cds_pci_slot = ((cadmus[CM_CSR] >> 6) & 0x3) + 1; 292 if (!np) {
293 pr_err("Could not find FPGA node.\n");
294 return;
295 }
296
297 cadmus = of_iomap(np, 0);
298 of_node_put(np);
299 if (!cadmus) {
300 pr_err("Fail to map FPGA area.\n");
301 return;
302 }
261 303
262 if (ppc_md.progress) { 304 if (ppc_md.progress) {
263 char buf[40]; 305 char buf[40];
306 cds_pci_slot = ((in_8(&cadmus->cm_csr) >> 6) & 0x3) + 1;
264 snprintf(buf, 40, "CDS Version = 0x%x in slot %d\n", 307 snprintf(buf, 40, "CDS Version = 0x%x in slot %d\n",
265 cadmus[CM_VER], cds_pci_slot); 308 in_8(&cadmus->cm_ver), cds_pci_slot);
266 ppc_md.progress(buf, 0); 309 ppc_md.progress(buf, 0);
267 } 310 }
268 311
@@ -292,7 +335,8 @@ static void mpc85xx_cds_show_cpuinfo(struct seq_file *m)
292 svid = mfspr(SPRN_SVR); 335 svid = mfspr(SPRN_SVR);
293 336
294 seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n"); 337 seq_printf(m, "Vendor\t\t: Freescale Semiconductor\n");
295 seq_printf(m, "Machine\t\t: MPC85xx CDS (0x%x)\n", cadmus[CM_VER]); 338 seq_printf(m, "Machine\t\t: MPC85xx CDS (0x%x)\n",
339 in_8(&cadmus->cm_ver));
296 seq_printf(m, "PVR\t\t: 0x%x\n", pvid); 340 seq_printf(m, "PVR\t\t: 0x%x\n", pvid);
297 seq_printf(m, "SVR\t\t: 0x%x\n", svid); 341 seq_printf(m, "SVR\t\t: 0x%x\n", svid);
298 342
@@ -323,7 +367,7 @@ define_machine(mpc85xx_cds) {
323 .get_irq = mpic_get_irq, 367 .get_irq = mpic_get_irq,
324#ifdef CONFIG_PCI 368#ifdef CONFIG_PCI
325 .restart = mpc85xx_cds_restart, 369 .restart = mpc85xx_cds_restart,
326 .pcibios_fixup_bus = fsl_pcibios_fixup_bus, 370 .pcibios_fixup_bus = mpc85xx_cds_fixup_bus,
327#else 371#else
328 .restart = fsl_rstcr_restart, 372 .restart = fsl_rstcr_restart,
329#endif 373#endif
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
index eefbb91e1d61..6e23e3e34bd9 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c
@@ -72,13 +72,13 @@ void __init mpc85xx_ds_pic_init(void)
72 72
73 if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) { 73 if (of_flat_dt_is_compatible(root, "fsl,MPC8572DS-CAMP")) {
74 mpic = mpic_alloc(NULL, 0, 74 mpic = mpic_alloc(NULL, 0,
75 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | 75 MPIC_NO_RESET |
76 MPIC_BIG_ENDIAN |
76 MPIC_SINGLE_DEST_CPU, 77 MPIC_SINGLE_DEST_CPU,
77 0, 256, " OpenPIC "); 78 0, 256, " OpenPIC ");
78 } else { 79 } else {
79 mpic = mpic_alloc(NULL, 0, 80 mpic = mpic_alloc(NULL, 0,
80 MPIC_WANTS_RESET | 81 MPIC_BIG_ENDIAN |
81 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
82 MPIC_SINGLE_DEST_CPU, 82 MPIC_SINGLE_DEST_CPU,
83 0, 256, " OpenPIC "); 83 0, 256, " OpenPIC ");
84 } 84 }
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
index 1d15a0cd2c82..f33662b46b8d 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (C) Freescale Semicondutor, Inc. 2006-2010. All rights reserved. 2 * Copyright (C) 2006-2010, 2012 Freescale Semicondutor, Inc.
3 * All rights reserved.
3 * 4 *
4 * Author: Andy Fleming <afleming@freescale.com> 5 * Author: Andy Fleming <afleming@freescale.com>
5 * 6 *
@@ -51,6 +52,7 @@
51#include <asm/qe_ic.h> 52#include <asm/qe_ic.h>
52#include <asm/mpic.h> 53#include <asm/mpic.h>
53#include <asm/swiotlb.h> 54#include <asm/swiotlb.h>
55#include <asm/fsl_guts.h>
54#include "smp.h" 56#include "smp.h"
55 57
56#include "mpc85xx.h" 58#include "mpc85xx.h"
@@ -268,34 +270,27 @@ static void __init mpc85xx_mds_qe_init(void)
268 mpc85xx_mds_reset_ucc_phys(); 270 mpc85xx_mds_reset_ucc_phys();
269 271
270 if (machine_is(p1021_mds)) { 272 if (machine_is(p1021_mds)) {
271#define MPC85xx_PMUXCR_OFFSET 0x60
272#define MPC85xx_PMUXCR_QE0 0x00008000
273#define MPC85xx_PMUXCR_QE3 0x00001000
274#define MPC85xx_PMUXCR_QE9 0x00000040
275#define MPC85xx_PMUXCR_QE12 0x00000008
276 static __be32 __iomem *pmuxcr;
277 273
278 np = of_find_node_by_name(NULL, "global-utilities"); 274 struct ccsr_guts_85xx __iomem *guts;
279 275
276 np = of_find_node_by_name(NULL, "global-utilities");
280 if (np) { 277 if (np) {
281 pmuxcr = of_iomap(np, 0) + MPC85xx_PMUXCR_OFFSET; 278 guts = of_iomap(np, 0);
282 279 if (!guts)
283 if (!pmuxcr) 280 pr_err("mpc85xx-rdb: could not map global utilities register\n");
284 printk(KERN_EMERG "Error: Alternate function" 281 else{
285 " signal multiplex control register not"
286 " mapped!\n");
287 else
288 /* P1021 has pins muxed for QE and other functions. To 282 /* P1021 has pins muxed for QE and other functions. To
289 * enable QE UEC mode, we need to set bit QE0 for UCC1 283 * enable QE UEC mode, we need to set bit QE0 for UCC1
290 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 284 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
291 * and QE12 for QE MII management signals in PMUXCR 285 * and QE12 for QE MII management signals in PMUXCR
292 * register. 286 * register.
293 */ 287 */
294 setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 | 288 setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
295 MPC85xx_PMUXCR_QE3 | 289 MPC85xx_PMUXCR_QE(3) |
296 MPC85xx_PMUXCR_QE9 | 290 MPC85xx_PMUXCR_QE(9) |
297 MPC85xx_PMUXCR_QE12); 291 MPC85xx_PMUXCR_QE(12));
298 292 iounmap(guts);
293 }
299 of_node_put(np); 294 of_node_put(np);
300 } 295 }
301 296
@@ -434,9 +429,8 @@ machine_arch_initcall(p1021_mds, swiotlb_setup_bus_notifier);
434 429
435static void __init mpc85xx_mds_pic_init(void) 430static void __init mpc85xx_mds_pic_init(void)
436{ 431{
437 struct mpic *mpic = mpic_alloc(NULL, 0, 432 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
438 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | 433 MPIC_SINGLE_DEST_CPU,
439 MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
440 0, 256, " OpenPIC "); 434 0, 256, " OpenPIC ");
441 BUG_ON(mpic == NULL); 435 BUG_ON(mpic == NULL);
442 436
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
index ccf520e890be..db214cd4c822 100644
--- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * MPC85xx RDB Board Setup 2 * MPC85xx RDB Board Setup
3 * 3 *
4 * Copyright 2009 Freescale Semiconductor Inc. 4 * Copyright 2009,2012 Freescale Semiconductor Inc.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -26,6 +26,9 @@
26#include <asm/prom.h> 26#include <asm/prom.h>
27#include <asm/udbg.h> 27#include <asm/udbg.h>
28#include <asm/mpic.h> 28#include <asm/mpic.h>
29#include <asm/qe.h>
30#include <asm/qe_ic.h>
31#include <asm/fsl_guts.h>
29 32
30#include <sysdev/fsl_soc.h> 33#include <sysdev/fsl_soc.h>
31#include <sysdev/fsl_pci.h> 34#include <sysdev/fsl_pci.h>
@@ -47,21 +50,36 @@ void __init mpc85xx_rdb_pic_init(void)
47 struct mpic *mpic; 50 struct mpic *mpic;
48 unsigned long root = of_get_flat_dt_root(); 51 unsigned long root = of_get_flat_dt_root();
49 52
53#ifdef CONFIG_QUICC_ENGINE
54 struct device_node *np;
55#endif
56
50 if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) { 57 if (of_flat_dt_is_compatible(root, "fsl,MPC85XXRDB-CAMP")) {
51 mpic = mpic_alloc(NULL, 0, 58 mpic = mpic_alloc(NULL, 0, MPIC_NO_RESET |
52 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS | 59 MPIC_BIG_ENDIAN |
53 MPIC_SINGLE_DEST_CPU, 60 MPIC_SINGLE_DEST_CPU,
54 0, 256, " OpenPIC "); 61 0, 256, " OpenPIC ");
55 } else { 62 } else {
56 mpic = mpic_alloc(NULL, 0, 63 mpic = mpic_alloc(NULL, 0,
57 MPIC_WANTS_RESET | 64 MPIC_BIG_ENDIAN |
58 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
59 MPIC_SINGLE_DEST_CPU, 65 MPIC_SINGLE_DEST_CPU,
60 0, 256, " OpenPIC "); 66 0, 256, " OpenPIC ");
61 } 67 }
62 68
63 BUG_ON(mpic == NULL); 69 BUG_ON(mpic == NULL);
64 mpic_init(mpic); 70 mpic_init(mpic);
71
72#ifdef CONFIG_QUICC_ENGINE
73 np = of_find_compatible_node(NULL, NULL, "fsl,qe-ic");
74 if (np) {
75 qe_ic_init(np, 0, qe_ic_cascade_low_mpic,
76 qe_ic_cascade_high_mpic);
77 of_node_put(np);
78
79 } else
80 pr_err("%s: Could not find qe-ic node\n", __func__);
81#endif
82
65} 83}
66 84
67/* 85/*
@@ -69,7 +87,7 @@ void __init mpc85xx_rdb_pic_init(void)
69 */ 87 */
70static void __init mpc85xx_rdb_setup_arch(void) 88static void __init mpc85xx_rdb_setup_arch(void)
71{ 89{
72#ifdef CONFIG_PCI 90#if defined(CONFIG_PCI) || defined(CONFIG_QUICC_ENGINE)
73 struct device_node *np; 91 struct device_node *np;
74#endif 92#endif
75 93
@@ -85,11 +103,73 @@ static void __init mpc85xx_rdb_setup_arch(void)
85#endif 103#endif
86 104
87 mpc85xx_smp_init(); 105 mpc85xx_smp_init();
106
107#ifdef CONFIG_QUICC_ENGINE
108 np = of_find_compatible_node(NULL, NULL, "fsl,qe");
109 if (!np) {
110 pr_err("%s: Could not find Quicc Engine node\n", __func__);
111 goto qe_fail;
112 }
113
114 qe_reset();
115 of_node_put(np);
116
117 np = of_find_node_by_name(NULL, "par_io");
118 if (np) {
119 struct device_node *ucc;
120
121 par_io_init(np);
122 of_node_put(np);
123
124 for_each_node_by_name(ucc, "ucc")
125 par_io_of_config(ucc);
126
127 }
128#if defined(CONFIG_UCC_GETH) || defined(CONFIG_SERIAL_QE)
129 if (machine_is(p1025_rdb)) {
130
131 struct ccsr_guts_85xx __iomem *guts;
132
133 np = of_find_node_by_name(NULL, "global-utilities");
134 if (np) {
135 guts = of_iomap(np, 0);
136 if (!guts) {
137
138 pr_err("mpc85xx-rdb: could not map global utilities register\n");
139
140 } else {
141 /* P1025 has pins muxed for QE and other functions. To
142 * enable QE UEC mode, we need to set bit QE0 for UCC1
143 * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9
144 * and QE12 for QE MII management singals in PMUXCR
145 * register.
146 */
147 setbits32(&guts->pmuxcr, MPC85xx_PMUXCR_QE(0) |
148 MPC85xx_PMUXCR_QE(3) |
149 MPC85xx_PMUXCR_QE(9) |
150 MPC85xx_PMUXCR_QE(12));
151 iounmap(guts);
152 }
153 of_node_put(np);
154 }
155
156 }
157#endif
158
159qe_fail:
160#endif /* CONFIG_QUICC_ENGINE */
161
88 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n"); 162 printk(KERN_INFO "MPC85xx RDB board from Freescale Semiconductor\n");
89} 163}
90 164
91machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices); 165machine_device_initcall(p2020_rdb, mpc85xx_common_publish_devices);
166machine_device_initcall(p2020_rdb_pc, mpc85xx_common_publish_devices);
167machine_device_initcall(p1020_mbg_pc, mpc85xx_common_publish_devices);
92machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices); 168machine_device_initcall(p1020_rdb, mpc85xx_common_publish_devices);
169machine_device_initcall(p1020_rdb_pc, mpc85xx_common_publish_devices);
170machine_device_initcall(p1020_utm_pc, mpc85xx_common_publish_devices);
171machine_device_initcall(p1021_rdb_pc, mpc85xx_common_publish_devices);
172machine_device_initcall(p1025_rdb, mpc85xx_common_publish_devices);
93 173
94/* 174/*
95 * Called very early, device-tree isn't unflattened 175 * Called very early, device-tree isn't unflattened
@@ -112,6 +192,52 @@ static int __init p1020_rdb_probe(void)
112 return 0; 192 return 0;
113} 193}
114 194
195static int __init p1020_rdb_pc_probe(void)
196{
197 unsigned long root = of_get_flat_dt_root();
198
199 return of_flat_dt_is_compatible(root, "fsl,P1020RDB-PC");
200}
201
202static int __init p1021_rdb_pc_probe(void)
203{
204 unsigned long root = of_get_flat_dt_root();
205
206 if (of_flat_dt_is_compatible(root, "fsl,P1021RDB-PC"))
207 return 1;
208 return 0;
209}
210
211static int __init p2020_rdb_pc_probe(void)
212{
213 unsigned long root = of_get_flat_dt_root();
214
215 if (of_flat_dt_is_compatible(root, "fsl,P2020RDB-PC"))
216 return 1;
217 return 0;
218}
219
220static int __init p1025_rdb_probe(void)
221{
222 unsigned long root = of_get_flat_dt_root();
223
224 return of_flat_dt_is_compatible(root, "fsl,P1025RDB");
225}
226
227static int __init p1020_mbg_pc_probe(void)
228{
229 unsigned long root = of_get_flat_dt_root();
230
231 return of_flat_dt_is_compatible(root, "fsl,P1020MBG-PC");
232}
233
234static int __init p1020_utm_pc_probe(void)
235{
236 unsigned long root = of_get_flat_dt_root();
237
238 return of_flat_dt_is_compatible(root, "fsl,P1020UTM-PC");
239}
240
115define_machine(p2020_rdb) { 241define_machine(p2020_rdb) {
116 .name = "P2020 RDB", 242 .name = "P2020 RDB",
117 .probe = p2020_rdb_probe, 243 .probe = p2020_rdb_probe,
@@ -139,3 +265,87 @@ define_machine(p1020_rdb) {
139 .calibrate_decr = generic_calibrate_decr, 265 .calibrate_decr = generic_calibrate_decr,
140 .progress = udbg_progress, 266 .progress = udbg_progress,
141}; 267};
268
269define_machine(p1021_rdb_pc) {
270 .name = "P1021 RDB-PC",
271 .probe = p1021_rdb_pc_probe,
272 .setup_arch = mpc85xx_rdb_setup_arch,
273 .init_IRQ = mpc85xx_rdb_pic_init,
274#ifdef CONFIG_PCI
275 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
276#endif
277 .get_irq = mpic_get_irq,
278 .restart = fsl_rstcr_restart,
279 .calibrate_decr = generic_calibrate_decr,
280 .progress = udbg_progress,
281};
282
283define_machine(p2020_rdb_pc) {
284 .name = "P2020RDB-PC",
285 .probe = p2020_rdb_pc_probe,
286 .setup_arch = mpc85xx_rdb_setup_arch,
287 .init_IRQ = mpc85xx_rdb_pic_init,
288#ifdef CONFIG_PCI
289 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
290#endif
291 .get_irq = mpic_get_irq,
292 .restart = fsl_rstcr_restart,
293 .calibrate_decr = generic_calibrate_decr,
294 .progress = udbg_progress,
295};
296
297define_machine(p1025_rdb) {
298 .name = "P1025 RDB",
299 .probe = p1025_rdb_probe,
300 .setup_arch = mpc85xx_rdb_setup_arch,
301 .init_IRQ = mpc85xx_rdb_pic_init,
302#ifdef CONFIG_PCI
303 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
304#endif
305 .get_irq = mpic_get_irq,
306 .restart = fsl_rstcr_restart,
307 .calibrate_decr = generic_calibrate_decr,
308 .progress = udbg_progress,
309};
310
311define_machine(p1020_mbg_pc) {
312 .name = "P1020 MBG-PC",
313 .probe = p1020_mbg_pc_probe,
314 .setup_arch = mpc85xx_rdb_setup_arch,
315 .init_IRQ = mpc85xx_rdb_pic_init,
316#ifdef CONFIG_PCI
317 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
318#endif
319 .get_irq = mpic_get_irq,
320 .restart = fsl_rstcr_restart,
321 .calibrate_decr = generic_calibrate_decr,
322 .progress = udbg_progress,
323};
324
325define_machine(p1020_utm_pc) {
326 .name = "P1020 UTM-PC",
327 .probe = p1020_utm_pc_probe,
328 .setup_arch = mpc85xx_rdb_setup_arch,
329 .init_IRQ = mpc85xx_rdb_pic_init,
330#ifdef CONFIG_PCI
331 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
332#endif
333 .get_irq = mpic_get_irq,
334 .restart = fsl_rstcr_restart,
335 .calibrate_decr = generic_calibrate_decr,
336 .progress = udbg_progress,
337};
338
339define_machine(p1020_rdb_pc) {
340 .name = "P1020RDB-PC",
341 .probe = p1020_rdb_pc_probe,
342 .setup_arch = mpc85xx_rdb_setup_arch,
343 .init_IRQ = mpc85xx_rdb_pic_init,
344#ifdef CONFIG_PCI
345 .pcibios_fixup_bus = fsl_pcibios_fixup_bus,
346#endif
347 .get_irq = mpic_get_irq,
348 .restart = fsl_rstcr_restart,
349 .calibrate_decr = generic_calibrate_decr,
350 .progress = udbg_progress,
351};
diff --git a/arch/powerpc/platforms/85xx/p1010rdb.c b/arch/powerpc/platforms/85xx/p1010rdb.c
index 538bc3f57e9d..d8bd6563d9ca 100644
--- a/arch/powerpc/platforms/85xx/p1010rdb.c
+++ b/arch/powerpc/platforms/85xx/p1010rdb.c
@@ -32,9 +32,8 @@
32 32
33void __init p1010_rdb_pic_init(void) 33void __init p1010_rdb_pic_init(void)
34{ 34{
35 struct mpic *mpic = mpic_alloc(NULL, 0, 35 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
36 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | 36 MPIC_SINGLE_DEST_CPU,
37 MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
38 0, 256, " OpenPIC "); 37 0, 256, " OpenPIC ");
39 38
40 BUG_ON(mpic == NULL); 39 BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
index bb3d84f4046f..0fe88e39945e 100644
--- a/arch/powerpc/platforms/85xx/p1022_ds.c
+++ b/arch/powerpc/platforms/85xx/p1022_ds.c
@@ -25,6 +25,7 @@
25 25
26#include <sysdev/fsl_soc.h> 26#include <sysdev/fsl_soc.h>
27#include <sysdev/fsl_pci.h> 27#include <sysdev/fsl_pci.h>
28#include <asm/udbg.h>
28#include <asm/fsl_guts.h> 29#include <asm/fsl_guts.h>
29#include "smp.h" 30#include "smp.h"
30 31
@@ -32,6 +33,10 @@
32 33
33#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) 34#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
34 35
36#define PMUXCR_ELBCDIU_MASK 0xc0000000
37#define PMUXCR_ELBCDIU_NOR16 0x80000000
38#define PMUXCR_ELBCDIU_DIU 0x40000000
39
35/* 40/*
36 * Board-specific initialization of the DIU. This code should probably be 41 * Board-specific initialization of the DIU. This code should probably be
37 * executed when the DIU is opened, rather than in arch code, but the DIU 42 * executed when the DIU is opened, rather than in arch code, but the DIU
@@ -49,11 +54,22 @@
49#define CLKDVDR_PXCLK_MASK 0x00FF0000 54#define CLKDVDR_PXCLK_MASK 0x00FF0000
50 55
51/* Some ngPIXIS register definitions */ 56/* Some ngPIXIS register definitions */
57#define PX_CTL 3
58#define PX_BRDCFG0 8
59#define PX_BRDCFG1 9
60
61#define PX_BRDCFG0_ELBC_SPI_MASK 0xc0
62#define PX_BRDCFG0_ELBC_SPI_ELBC 0x00
63#define PX_BRDCFG0_ELBC_SPI_NULL 0xc0
64#define PX_BRDCFG0_ELBC_DIU 0x02
65
52#define PX_BRDCFG1_DVIEN 0x80 66#define PX_BRDCFG1_DVIEN 0x80
53#define PX_BRDCFG1_DFPEN 0x40 67#define PX_BRDCFG1_DFPEN 0x40
54#define PX_BRDCFG1_BACKLIGHT 0x20 68#define PX_BRDCFG1_BACKLIGHT 0x20
55#define PX_BRDCFG1_DDCEN 0x10 69#define PX_BRDCFG1_DDCEN 0x10
56 70
71#define PX_CTL_ALTACC 0x80
72
57/* 73/*
58 * DIU Area Descriptor 74 * DIU Area Descriptor
59 * 75 *
@@ -132,44 +148,117 @@ static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
132 */ 148 */
133static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port) 149static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
134{ 150{
135 struct device_node *np; 151 struct device_node *guts_node;
136 void __iomem *pixis; 152 struct device_node *indirect_node = NULL;
137 u8 __iomem *brdcfg1; 153 struct ccsr_guts_85xx __iomem *guts;
138 154 u8 __iomem *lbc_lcs0_ba = NULL;
139 np = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga"); 155 u8 __iomem *lbc_lcs1_ba = NULL;
140 if (!np) 156 u8 b;
141 /* older device trees used "fsl,p1022ds-pixis" */ 157
142 np = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis"); 158 /* Map the global utilities registers. */
143 if (!np) { 159 guts_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts");
144 pr_err("p1022ds: missing ngPIXIS node\n"); 160 if (!guts_node) {
161 pr_err("p1022ds: missing global utilties device node\n");
145 return; 162 return;
146 } 163 }
147 164
148 pixis = of_iomap(np, 0); 165 guts = of_iomap(guts_node, 0);
149 if (!pixis) { 166 if (!guts) {
150 pr_err("p1022ds: could not map ngPIXIS registers\n"); 167 pr_err("p1022ds: could not map global utilties device\n");
151 return; 168 goto exit;
152 } 169 }
153 brdcfg1 = pixis + 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */ 170
171 indirect_node = of_find_compatible_node(NULL, NULL,
172 "fsl,p1022ds-indirect-pixis");
173 if (!indirect_node) {
174 pr_err("p1022ds: missing pixis indirect mode node\n");
175 goto exit;
176 }
177
178 lbc_lcs0_ba = of_iomap(indirect_node, 0);
179 if (!lbc_lcs0_ba) {
180 pr_err("p1022ds: could not map localbus chip select 0\n");
181 goto exit;
182 }
183
184 lbc_lcs1_ba = of_iomap(indirect_node, 1);
185 if (!lbc_lcs1_ba) {
186 pr_err("p1022ds: could not map localbus chip select 1\n");
187 goto exit;
188 }
189
190 /* Make sure we're in indirect mode first. */
191 if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
192 PMUXCR_ELBCDIU_DIU) {
193 struct device_node *pixis_node;
194 void __iomem *pixis;
195
196 pixis_node =
197 of_find_compatible_node(NULL, NULL, "fsl,p1022ds-fpga");
198 if (!pixis_node) {
199 pr_err("p1022ds: missing pixis node\n");
200 goto exit;
201 }
202
203 pixis = of_iomap(pixis_node, 0);
204 of_node_put(pixis_node);
205 if (!pixis) {
206 pr_err("p1022ds: could not map pixis registers\n");
207 goto exit;
208 }
209
210 /* Enable indirect PIXIS mode. */
211 setbits8(pixis + PX_CTL, PX_CTL_ALTACC);
212 iounmap(pixis);
213
214 /* Switch the board mux to the DIU */
215 out_8(lbc_lcs0_ba, PX_BRDCFG0); /* BRDCFG0 */
216 b = in_8(lbc_lcs1_ba);
217 b |= PX_BRDCFG0_ELBC_DIU;
218 out_8(lbc_lcs1_ba, b);
219
220 /* Set the chip mux to DIU mode. */
221 clrsetbits_be32(&guts->pmuxcr, PMUXCR_ELBCDIU_MASK,
222 PMUXCR_ELBCDIU_DIU);
223 in_be32(&guts->pmuxcr);
224 }
225
154 226
155 switch (port) { 227 switch (port) {
156 case FSL_DIU_PORT_DVI: 228 case FSL_DIU_PORT_DVI:
157 printk(KERN_INFO "%s:%u\n", __func__, __LINE__);
158 /* Enable the DVI port, disable the DFP and the backlight */ 229 /* Enable the DVI port, disable the DFP and the backlight */
159 clrsetbits_8(brdcfg1, PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT, 230 out_8(lbc_lcs0_ba, PX_BRDCFG1);
160 PX_BRDCFG1_DVIEN); 231 b = in_8(lbc_lcs1_ba);
232 b &= ~(PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT);
233 b |= PX_BRDCFG1_DVIEN;
234 out_8(lbc_lcs1_ba, b);
161 break; 235 break;
162 case FSL_DIU_PORT_LVDS: 236 case FSL_DIU_PORT_LVDS:
163 printk(KERN_INFO "%s:%u\n", __func__, __LINE__); 237 /*
238 * LVDS also needs backlight enabled, otherwise the display
239 * will be blank.
240 */
164 /* Enable the DFP port, disable the DVI and the backlight */ 241 /* Enable the DFP port, disable the DVI and the backlight */
165 clrsetbits_8(brdcfg1, PX_BRDCFG1_DVIEN | PX_BRDCFG1_BACKLIGHT, 242 out_8(lbc_lcs0_ba, PX_BRDCFG1);
166 PX_BRDCFG1_DFPEN); 243 b = in_8(lbc_lcs1_ba);
244 b &= ~PX_BRDCFG1_DVIEN;
245 b |= PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT;
246 out_8(lbc_lcs1_ba, b);
167 break; 247 break;
168 default: 248 default:
169 pr_err("p1022ds: unsupported monitor port %i\n", port); 249 pr_err("p1022ds: unsupported monitor port %i\n", port);
170 } 250 }
171 251
172 iounmap(pixis); 252exit:
253 if (lbc_lcs1_ba)
254 iounmap(lbc_lcs1_ba);
255 if (lbc_lcs0_ba)
256 iounmap(lbc_lcs0_ba);
257 if (guts)
258 iounmap(guts);
259
260 of_node_put(indirect_node);
261 of_node_put(guts_node);
173} 262}
174 263
175/** 264/**
@@ -241,15 +330,56 @@ p1022ds_valid_monitor_port(enum fsl_diu_monitor_port port)
241 330
242void __init p1022_ds_pic_init(void) 331void __init p1022_ds_pic_init(void)
243{ 332{
244 struct mpic *mpic = mpic_alloc(NULL, 0, 333 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
245 MPIC_WANTS_RESET |
246 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS |
247 MPIC_SINGLE_DEST_CPU, 334 MPIC_SINGLE_DEST_CPU,
248 0, 256, " OpenPIC "); 335 0, 256, " OpenPIC ");
249 BUG_ON(mpic == NULL); 336 BUG_ON(mpic == NULL);
250 mpic_init(mpic); 337 mpic_init(mpic);
251} 338}
252 339
340#if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE)
341
342/*
343 * Disables a node in the device tree.
344 *
345 * This function is called before kmalloc() is available, so the 'new' object
346 * should be allocated in the global area. The easiest way is to do that is
347 * to allocate one static local variable for each call to this function.
348 */
349static void __init disable_one_node(struct device_node *np, struct property *new)
350{
351 struct property *old;
352
353 old = of_find_property(np, new->name, NULL);
354 if (old)
355 prom_update_property(np, new, old);
356 else
357 prom_add_property(np, new);
358}
359
360/* TRUE if there is a "video=fslfb" command-line parameter. */
361static bool fslfb;
362
363/*
364 * Search for a "video=fslfb" command-line parameter, and set 'fslfb' to
365 * true if we find it.
366 *
367 * We need to use early_param() instead of __setup() because the normal
368 * __setup() gets called to late. However, early_param() gets called very
369 * early, before the device tree is unflattened, so all we can do now is set a
370 * global variable. Later on, p1022_ds_setup_arch() will use that variable
371 * to determine if we need to update the device tree.
372 */
373static int __init early_video_setup(char *options)
374{
375 fslfb = (strncmp(options, "fslfb:", 6) == 0);
376
377 return 0;
378}
379early_param("video", early_video_setup);
380
381#endif
382
253/* 383/*
254 * Setup the architecture 384 * Setup the architecture
255 */ 385 */
@@ -287,6 +417,34 @@ static void __init p1022_ds_setup_arch(void)
287 diu_ops.set_monitor_port = p1022ds_set_monitor_port; 417 diu_ops.set_monitor_port = p1022ds_set_monitor_port;
288 diu_ops.set_pixel_clock = p1022ds_set_pixel_clock; 418 diu_ops.set_pixel_clock = p1022ds_set_pixel_clock;
289 diu_ops.valid_monitor_port = p1022ds_valid_monitor_port; 419 diu_ops.valid_monitor_port = p1022ds_valid_monitor_port;
420
421 /*
422 * Disable the NOR flash node if there is video=fslfb... command-line
423 * parameter. When the DIU is active, NOR flash is unavailable, so we
424 * have to disable the node before the MTD driver loads.
425 */
426 if (fslfb) {
427 struct device_node *np =
428 of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
429
430 if (np) {
431 np = of_find_compatible_node(np, NULL, "cfi-flash");
432 if (np) {
433 static struct property nor_status = {
434 .name = "status",
435 .value = "disabled",
436 .length = sizeof("disabled"),
437 };
438
439 pr_info("p1022ds: disabling %s node",
440 np->full_name);
441 disable_one_node(np, &nor_status);
442 of_node_put(np);
443 }
444 }
445
446 }
447
290#endif 448#endif
291 449
292 mpc85xx_smp_init(); 450 mpc85xx_smp_init();
diff --git a/arch/powerpc/platforms/85xx/p1023_rds.c b/arch/powerpc/platforms/85xx/p1023_rds.c
index d951e7027bb6..6b07398e4369 100644
--- a/arch/powerpc/platforms/85xx/p1023_rds.c
+++ b/arch/powerpc/platforms/85xx/p1023_rds.c
@@ -93,9 +93,8 @@ machine_device_initcall(p1023_rds, mpc85xx_common_publish_devices);
93 93
94static void __init mpc85xx_rds_pic_init(void) 94static void __init mpc85xx_rds_pic_init(void)
95{ 95{
96 struct mpic *mpic = mpic_alloc(NULL, 0, 96 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
97 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | 97 MPIC_SINGLE_DEST_CPU,
98 MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
99 0, 256, " OpenPIC "); 98 0, 256, " OpenPIC ");
100 99
101 BUG_ON(mpic == NULL); 100 BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/85xx/sbc8548.c b/arch/powerpc/platforms/85xx/sbc8548.c
index 184a50784617..1677b8a22677 100644
--- a/arch/powerpc/platforms/85xx/sbc8548.c
+++ b/arch/powerpc/platforms/85xx/sbc8548.c
@@ -54,8 +54,7 @@ static int sbc_rev;
54 54
55static void __init sbc8548_pic_init(void) 55static void __init sbc8548_pic_init(void)
56{ 56{
57 struct mpic *mpic = mpic_alloc(NULL, 0, 57 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
58 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
59 0, 256, " OpenPIC "); 58 0, 256, " OpenPIC ");
60 BUG_ON(mpic == NULL); 59 BUG_ON(mpic == NULL);
61 mpic_init(mpic); 60 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c
index 940752e93051..3c3bbcc27566 100644
--- a/arch/powerpc/platforms/85xx/sbc8560.c
+++ b/arch/powerpc/platforms/85xx/sbc8560.c
@@ -41,8 +41,7 @@
41 41
42static void __init sbc8560_pic_init(void) 42static void __init sbc8560_pic_init(void)
43{ 43{
44 struct mpic *mpic = mpic_alloc(NULL, 0, 44 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
45 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
46 0, 256, " OpenPIC "); 45 0, 256, " OpenPIC ");
47 BUG_ON(mpic == NULL); 46 BUG_ON(mpic == NULL);
48 mpic_init(mpic); 47 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/socrates.c b/arch/powerpc/platforms/85xx/socrates.c
index 18f635906b27..b71919217756 100644
--- a/arch/powerpc/platforms/85xx/socrates.c
+++ b/arch/powerpc/platforms/85xx/socrates.c
@@ -48,8 +48,7 @@ static void __init socrates_pic_init(void)
48{ 48{
49 struct device_node *np; 49 struct device_node *np;
50 50
51 struct mpic *mpic = mpic_alloc(NULL, 0, 51 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
52 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
53 0, 256, " OpenPIC "); 52 0, 256, " OpenPIC ");
54 BUG_ON(mpic == NULL); 53 BUG_ON(mpic == NULL);
55 mpic_init(mpic); 54 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
index 12cb9bb2cc68..3bbbf7489487 100644
--- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
+++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c
@@ -51,7 +51,7 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = {
51static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); 51static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock);
52 52
53static void __iomem *socrates_fpga_pic_iobase; 53static void __iomem *socrates_fpga_pic_iobase;
54static struct irq_host *socrates_fpga_pic_irq_host; 54static struct irq_domain *socrates_fpga_pic_irq_host;
55static unsigned int socrates_fpga_irqs[3]; 55static unsigned int socrates_fpga_irqs[3];
56 56
57static inline uint32_t socrates_fpga_pic_read(int reg) 57static inline uint32_t socrates_fpga_pic_read(int reg)
@@ -227,7 +227,7 @@ static struct irq_chip socrates_fpga_pic_chip = {
227 .irq_set_type = socrates_fpga_pic_set_type, 227 .irq_set_type = socrates_fpga_pic_set_type,
228}; 228};
229 229
230static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, 230static int socrates_fpga_pic_host_map(struct irq_domain *h, unsigned int virq,
231 irq_hw_number_t hwirq) 231 irq_hw_number_t hwirq)
232{ 232{
233 /* All interrupts are LEVEL sensitive */ 233 /* All interrupts are LEVEL sensitive */
@@ -238,7 +238,7 @@ static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq,
238 return 0; 238 return 0;
239} 239}
240 240
241static int socrates_fpga_pic_host_xlate(struct irq_host *h, 241static int socrates_fpga_pic_host_xlate(struct irq_domain *h,
242 struct device_node *ct, const u32 *intspec, unsigned int intsize, 242 struct device_node *ct, const u32 *intspec, unsigned int intsize,
243 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 243 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
244{ 244{
@@ -269,7 +269,7 @@ static int socrates_fpga_pic_host_xlate(struct irq_host *h,
269 return 0; 269 return 0;
270} 270}
271 271
272static struct irq_host_ops socrates_fpga_pic_host_ops = { 272static const struct irq_domain_ops socrates_fpga_pic_host_ops = {
273 .map = socrates_fpga_pic_host_map, 273 .map = socrates_fpga_pic_host_map,
274 .xlate = socrates_fpga_pic_host_xlate, 274 .xlate = socrates_fpga_pic_host_xlate,
275}; 275};
@@ -279,10 +279,9 @@ void socrates_fpga_pic_init(struct device_node *pic)
279 unsigned long flags; 279 unsigned long flags;
280 int i; 280 int i;
281 281
282 /* Setup an irq_host structure */ 282 /* Setup an irq_domain structure */
283 socrates_fpga_pic_irq_host = irq_alloc_host(pic, IRQ_HOST_MAP_LINEAR, 283 socrates_fpga_pic_irq_host = irq_domain_add_linear(pic,
284 SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, 284 SOCRATES_FPGA_NUM_IRQS, &socrates_fpga_pic_host_ops, NULL);
285 SOCRATES_FPGA_NUM_IRQS);
286 if (socrates_fpga_pic_irq_host == NULL) { 285 if (socrates_fpga_pic_irq_host == NULL) {
287 pr_err("FPGA PIC: Unable to allocate host\n"); 286 pr_err("FPGA PIC: Unable to allocate host\n");
288 return; 287 return;
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c
index e9e5234b4e76..27ca3a7b04ab 100644
--- a/arch/powerpc/platforms/85xx/stx_gp3.c
+++ b/arch/powerpc/platforms/85xx/stx_gp3.c
@@ -48,8 +48,7 @@
48 48
49static void __init stx_gp3_pic_init(void) 49static void __init stx_gp3_pic_init(void)
50{ 50{
51 struct mpic *mpic = mpic_alloc(NULL, 0, 51 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
52 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN,
53 0, 256, " OpenPIC "); 52 0, 256, " OpenPIC ");
54 BUG_ON(mpic == NULL); 53 BUG_ON(mpic == NULL);
55 mpic_init(mpic); 54 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c
index bf7c89fb75bb..d7504cefe016 100644
--- a/arch/powerpc/platforms/85xx/tqm85xx.c
+++ b/arch/powerpc/platforms/85xx/tqm85xx.c
@@ -47,7 +47,7 @@
47static void __init tqm85xx_pic_init(void) 47static void __init tqm85xx_pic_init(void)
48{ 48{
49 struct mpic *mpic = mpic_alloc(NULL, 0, 49 struct mpic *mpic = mpic_alloc(NULL, 0,
50 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN, 50 MPIC_BIG_ENDIAN,
51 0, 256, " OpenPIC "); 51 0, 256, " OpenPIC ");
52 BUG_ON(mpic == NULL); 52 BUG_ON(mpic == NULL);
53 mpic_init(mpic); 53 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/85xx/xes_mpc85xx.c b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
index 3a69f8b77de6..503c21596c63 100644
--- a/arch/powerpc/platforms/85xx/xes_mpc85xx.c
+++ b/arch/powerpc/platforms/85xx/xes_mpc85xx.c
@@ -43,9 +43,7 @@
43 43
44void __init xes_mpc85xx_pic_init(void) 44void __init xes_mpc85xx_pic_init(void)
45{ 45{
46 struct mpic *mpic = mpic_alloc(NULL, 0, 46 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN,
47 MPIC_WANTS_RESET |
48 MPIC_BIG_ENDIAN | MPIC_BROKEN_FRR_NIRQS,
49 0, 256, " OpenPIC "); 47 0, 256, " OpenPIC ");
50 BUG_ON(mpic == NULL); 48 BUG_ON(mpic == NULL);
51 mpic_init(mpic); 49 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/86xx/Kconfig b/arch/powerpc/platforms/86xx/Kconfig
index 8d6599d54ea6..7a6279e38213 100644
--- a/arch/powerpc/platforms/86xx/Kconfig
+++ b/arch/powerpc/platforms/86xx/Kconfig
@@ -39,6 +39,7 @@ config GEF_PPC9A
39 select MMIO_NVRAM 39 select MMIO_NVRAM
40 select GENERIC_GPIO 40 select GENERIC_GPIO
41 select ARCH_REQUIRE_GPIOLIB 41 select ARCH_REQUIRE_GPIOLIB
42 select GE_FPGA
42 help 43 help
43 This option enables support for the GE PPC9A. 44 This option enables support for the GE PPC9A.
44 45
@@ -48,6 +49,7 @@ config GEF_SBC310
48 select MMIO_NVRAM 49 select MMIO_NVRAM
49 select GENERIC_GPIO 50 select GENERIC_GPIO
50 select ARCH_REQUIRE_GPIOLIB 51 select ARCH_REQUIRE_GPIOLIB
52 select GE_FPGA
51 help 53 help
52 This option enables support for the GE SBC310. 54 This option enables support for the GE SBC310.
53 55
@@ -57,6 +59,7 @@ config GEF_SBC610
57 select MMIO_NVRAM 59 select MMIO_NVRAM
58 select GENERIC_GPIO 60 select GENERIC_GPIO
59 select ARCH_REQUIRE_GPIOLIB 61 select ARCH_REQUIRE_GPIOLIB
62 select GE_FPGA
60 select HAS_RAPIDIO 63 select HAS_RAPIDIO
61 help 64 help
62 This option enables support for the GE SBC610. 65 This option enables support for the GE SBC610.
diff --git a/arch/powerpc/platforms/86xx/Makefile b/arch/powerpc/platforms/86xx/Makefile
index 4b0d7b1aa005..ede815d6489d 100644
--- a/arch/powerpc/platforms/86xx/Makefile
+++ b/arch/powerpc/platforms/86xx/Makefile
@@ -7,7 +7,6 @@ obj-$(CONFIG_SMP) += mpc86xx_smp.o
7obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o 7obj-$(CONFIG_MPC8641_HPCN) += mpc86xx_hpcn.o
8obj-$(CONFIG_SBC8641D) += sbc8641d.o 8obj-$(CONFIG_SBC8641D) += sbc8641d.o
9obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o 9obj-$(CONFIG_MPC8610_HPCD) += mpc8610_hpcd.o
10gef-gpio-$(CONFIG_GPIOLIB) += gef_gpio.o 10obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o
11obj-$(CONFIG_GEF_SBC610) += gef_sbc610.o gef_pic.o $(gef-gpio-y) 11obj-$(CONFIG_GEF_SBC310) += gef_sbc310.o
12obj-$(CONFIG_GEF_SBC310) += gef_sbc310.o gef_pic.o $(gef-gpio-y) 12obj-$(CONFIG_GEF_PPC9A) += gef_ppc9a.o
13obj-$(CONFIG_GEF_PPC9A) += gef_ppc9a.o gef_pic.o $(gef-gpio-y)
diff --git a/arch/powerpc/platforms/86xx/gef_gpio.c b/arch/powerpc/platforms/86xx/gef_gpio.c
deleted file mode 100644
index 2a703365e664..000000000000
--- a/arch/powerpc/platforms/86xx/gef_gpio.c
+++ /dev/null
@@ -1,171 +0,0 @@
1/*
2 * Driver for GE FPGA based GPIO
3 *
4 * Author: Martyn Welch <martyn.welch@ge.com>
5 *
6 * 2008 (c) GE Intelligent Platforms Embedded Systems, Inc.
7 *
8 * This file is licensed under the terms of the GNU General Public License
9 * version 2. This program is licensed "as is" without any warranty of any
10 * kind, whether express or implied.
11 */
12
13/* TODO
14 *
15 * Configuration of output modes (totem-pole/open-drain)
16 * Interrupt configuration - interrupts are always generated the FPGA relies on
17 * the I/O interrupt controllers mask to stop them propergating
18 */
19
20#include <linux/kernel.h>
21#include <linux/compiler.h>
22#include <linux/init.h>
23#include <linux/io.h>
24#include <linux/of.h>
25#include <linux/of_device.h>
26#include <linux/of_platform.h>
27#include <linux/of_gpio.h>
28#include <linux/gpio.h>
29#include <linux/slab.h>
30#include <linux/module.h>
31
32#define GEF_GPIO_DIRECT 0x00
33#define GEF_GPIO_IN 0x04
34#define GEF_GPIO_OUT 0x08
35#define GEF_GPIO_TRIG 0x0C
36#define GEF_GPIO_POLAR_A 0x10
37#define GEF_GPIO_POLAR_B 0x14
38#define GEF_GPIO_INT_STAT 0x18
39#define GEF_GPIO_OVERRUN 0x1C
40#define GEF_GPIO_MODE 0x20
41
42static void _gef_gpio_set(void __iomem *reg, unsigned int offset, int value)
43{
44 unsigned int data;
45
46 data = ioread32be(reg);
47 /* value: 0=low; 1=high */
48 if (value & 0x1)
49 data = data | (0x1 << offset);
50 else
51 data = data & ~(0x1 << offset);
52
53 iowrite32be(data, reg);
54}
55
56
57static int gef_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
58{
59 unsigned int data;
60 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
61
62 data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
63 data = data | (0x1 << offset);
64 iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
65
66 return 0;
67}
68
69static int gef_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
70{
71 unsigned int data;
72 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
73
74 /* Set direction before switching to input */
75 _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
76
77 data = ioread32be(mmchip->regs + GEF_GPIO_DIRECT);
78 data = data & ~(0x1 << offset);
79 iowrite32be(data, mmchip->regs + GEF_GPIO_DIRECT);
80
81 return 0;
82}
83
84static int gef_gpio_get(struct gpio_chip *chip, unsigned offset)
85{
86 unsigned int data;
87 int state = 0;
88 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
89
90 data = ioread32be(mmchip->regs + GEF_GPIO_IN);
91 state = (int)((data >> offset) & 0x1);
92
93 return state;
94}
95
96static void gef_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
97{
98 struct of_mm_gpio_chip *mmchip = to_of_mm_gpio_chip(chip);
99
100 _gef_gpio_set(mmchip->regs + GEF_GPIO_OUT, offset, value);
101}
102
103static int __init gef_gpio_init(void)
104{
105 struct device_node *np;
106 int retval;
107 struct of_mm_gpio_chip *gef_gpio_chip;
108
109 for_each_compatible_node(np, NULL, "gef,sbc610-gpio") {
110
111 pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
112
113 /* Allocate chip structure */
114 gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
115 if (!gef_gpio_chip) {
116 pr_err("%s: Unable to allocate structure\n",
117 np->full_name);
118 continue;
119 }
120
121 /* Setup pointers to chip functions */
122 gef_gpio_chip->gc.of_gpio_n_cells = 2;
123 gef_gpio_chip->gc.ngpio = 19;
124 gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
125 gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
126 gef_gpio_chip->gc.get = gef_gpio_get;
127 gef_gpio_chip->gc.set = gef_gpio_set;
128
129 /* This function adds a memory mapped GPIO chip */
130 retval = of_mm_gpiochip_add(np, gef_gpio_chip);
131 if (retval) {
132 kfree(gef_gpio_chip);
133 pr_err("%s: Unable to add GPIO\n", np->full_name);
134 }
135 }
136
137 for_each_compatible_node(np, NULL, "gef,sbc310-gpio") {
138
139 pr_debug("%s: Initialising GEF GPIO\n", np->full_name);
140
141 /* Allocate chip structure */
142 gef_gpio_chip = kzalloc(sizeof(*gef_gpio_chip), GFP_KERNEL);
143 if (!gef_gpio_chip) {
144 pr_err("%s: Unable to allocate structure\n",
145 np->full_name);
146 continue;
147 }
148
149 /* Setup pointers to chip functions */
150 gef_gpio_chip->gc.of_gpio_n_cells = 2;
151 gef_gpio_chip->gc.ngpio = 6;
152 gef_gpio_chip->gc.direction_input = gef_gpio_dir_in;
153 gef_gpio_chip->gc.direction_output = gef_gpio_dir_out;
154 gef_gpio_chip->gc.get = gef_gpio_get;
155 gef_gpio_chip->gc.set = gef_gpio_set;
156
157 /* This function adds a memory mapped GPIO chip */
158 retval = of_mm_gpiochip_add(np, gef_gpio_chip);
159 if (retval) {
160 kfree(gef_gpio_chip);
161 pr_err("%s: Unable to add GPIO\n", np->full_name);
162 }
163 }
164
165 return 0;
166};
167arch_initcall(gef_gpio_init);
168
169MODULE_DESCRIPTION("GE I/O FPGA GPIO driver");
170MODULE_AUTHOR("Martyn Welch <martyn.welch@ge.com");
171MODULE_LICENSE("GPL");
diff --git a/arch/powerpc/platforms/86xx/gef_ppc9a.c b/arch/powerpc/platforms/86xx/gef_ppc9a.c
index 60ce07e39100..ed58b6cfd60c 100644
--- a/arch/powerpc/platforms/86xx/gef_ppc9a.c
+++ b/arch/powerpc/platforms/86xx/gef_ppc9a.c
@@ -37,9 +37,9 @@
37 37
38#include <sysdev/fsl_pci.h> 38#include <sysdev/fsl_pci.h>
39#include <sysdev/fsl_soc.h> 39#include <sysdev/fsl_soc.h>
40#include <sysdev/ge/ge_pic.h>
40 41
41#include "mpc86xx.h" 42#include "mpc86xx.h"
42#include "gef_pic.h"
43 43
44#undef DEBUG 44#undef DEBUG
45 45
diff --git a/arch/powerpc/platforms/86xx/gef_sbc310.c b/arch/powerpc/platforms/86xx/gef_sbc310.c
index 3ecee25bf3ed..710db69bd523 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc310.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc310.c
@@ -37,9 +37,9 @@
37 37
38#include <sysdev/fsl_pci.h> 38#include <sysdev/fsl_pci.h>
39#include <sysdev/fsl_soc.h> 39#include <sysdev/fsl_soc.h>
40#include <sysdev/ge/ge_pic.h>
40 41
41#include "mpc86xx.h" 42#include "mpc86xx.h"
42#include "gef_pic.h"
43 43
44#undef DEBUG 44#undef DEBUG
45 45
diff --git a/arch/powerpc/platforms/86xx/gef_sbc610.c b/arch/powerpc/platforms/86xx/gef_sbc610.c
index 5090d608d9ee..4a13d2f4ac20 100644
--- a/arch/powerpc/platforms/86xx/gef_sbc610.c
+++ b/arch/powerpc/platforms/86xx/gef_sbc610.c
@@ -37,9 +37,9 @@
37 37
38#include <sysdev/fsl_pci.h> 38#include <sysdev/fsl_pci.h>
39#include <sysdev/fsl_soc.h> 39#include <sysdev/fsl_soc.h>
40#include <sysdev/ge/ge_pic.h>
40 41
41#include "mpc86xx.h" 42#include "mpc86xx.h"
42#include "gef_pic.h"
43 43
44#undef DEBUG 44#undef DEBUG
45 45
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c
index 52bbfa031531..22cc3571ae19 100644
--- a/arch/powerpc/platforms/86xx/pic.c
+++ b/arch/powerpc/platforms/86xx/pic.c
@@ -37,9 +37,8 @@ void __init mpc86xx_init_irq(void)
37 int cascade_irq; 37 int cascade_irq;
38#endif 38#endif
39 39
40 struct mpic *mpic = mpic_alloc(NULL, 0, 40 struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
41 MPIC_WANTS_RESET | MPIC_BIG_ENDIAN | 41 MPIC_SINGLE_DEST_CPU,
42 MPIC_BROKEN_FRR_NIRQS | MPIC_SINGLE_DEST_CPU,
43 0, 256, " MPIC "); 42 0, 256, " MPIC ");
44 BUG_ON(mpic == NULL); 43 BUG_ON(mpic == NULL);
45 44
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index ee56a9ea6a79..1fb0b3cddeb3 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -26,6 +26,7 @@ config MPC86XADS
26config MPC885ADS 26config MPC885ADS
27 bool "MPC885ADS" 27 bool "MPC885ADS"
28 select CPM1 28 select CPM1
29 select OF_DYNAMIC
29 help 30 help
30 Freescale Semiconductor MPC885 Application Development System (ADS). 31 Freescale Semiconductor MPC885 Application Development System (ADS).
31 Also known as DUET. 32 Also known as DUET.
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index 0cfb46d54b8c..a35ca44ade66 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -2,7 +2,6 @@ menu "Platform support"
2 2
3source "arch/powerpc/platforms/powernv/Kconfig" 3source "arch/powerpc/platforms/powernv/Kconfig"
4source "arch/powerpc/platforms/pseries/Kconfig" 4source "arch/powerpc/platforms/pseries/Kconfig"
5source "arch/powerpc/platforms/iseries/Kconfig"
6source "arch/powerpc/platforms/chrp/Kconfig" 5source "arch/powerpc/platforms/chrp/Kconfig"
7source "arch/powerpc/platforms/512x/Kconfig" 6source "arch/powerpc/platforms/512x/Kconfig"
8source "arch/powerpc/platforms/52xx/Kconfig" 7source "arch/powerpc/platforms/52xx/Kconfig"
@@ -87,6 +86,14 @@ config MPIC_WEIRD
87 bool 86 bool
88 default n 87 default n
89 88
89config MPIC_MSGR
90 bool "MPIC message register support"
91 depends on MPIC
92 default n
93 help
94 Enables support for the MPIC message registers. These
95 registers are used for inter-processor communication.
96
90config PPC_I8259 97config PPC_I8259
91 bool 98 bool
92 default n 99 default n
@@ -138,7 +145,7 @@ config MPIC_BROKEN_REGREAD
138 of the register contents in software. 145 of the register contents in software.
139 146
140config IBMVIO 147config IBMVIO
141 depends on PPC_PSERIES || PPC_ISERIES 148 depends on PPC_PSERIES
142 bool 149 bool
143 default y 150 default y
144 151
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 2635a22bade2..879b4a448498 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/
16obj-$(CONFIG_PPC_86xx) += 86xx/ 16obj-$(CONFIG_PPC_86xx) += 86xx/
17obj-$(CONFIG_PPC_POWERNV) += powernv/ 17obj-$(CONFIG_PPC_POWERNV) += powernv/
18obj-$(CONFIG_PPC_PSERIES) += pseries/ 18obj-$(CONFIG_PPC_PSERIES) += pseries/
19obj-$(CONFIG_PPC_ISERIES) += iseries/
20obj-$(CONFIG_PPC_MAPLE) += maple/ 19obj-$(CONFIG_PPC_MAPLE) += maple/
21obj-$(CONFIG_PPC_PASEMI) += pasemi/ 20obj-$(CONFIG_PPC_PASEMI) += pasemi/
22obj-$(CONFIG_PPC_CELL) += cell/ 21obj-$(CONFIG_PPC_CELL) += cell/
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c
index 40a6e34793b4..db360fc4cf0e 100644
--- a/arch/powerpc/platforms/cell/axon_msi.c
+++ b/arch/powerpc/platforms/cell/axon_msi.c
@@ -67,7 +67,7 @@
67 67
68 68
69struct axon_msic { 69struct axon_msic {
70 struct irq_host *irq_host; 70 struct irq_domain *irq_domain;
71 __le32 *fifo_virt; 71 __le32 *fifo_virt;
72 dma_addr_t fifo_phys; 72 dma_addr_t fifo_phys;
73 dcr_host_t dcr_host; 73 dcr_host_t dcr_host;
@@ -152,7 +152,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc)
152 152
153static struct axon_msic *find_msi_translator(struct pci_dev *dev) 153static struct axon_msic *find_msi_translator(struct pci_dev *dev)
154{ 154{
155 struct irq_host *irq_host; 155 struct irq_domain *irq_domain;
156 struct device_node *dn, *tmp; 156 struct device_node *dn, *tmp;
157 const phandle *ph; 157 const phandle *ph;
158 struct axon_msic *msic = NULL; 158 struct axon_msic *msic = NULL;
@@ -184,14 +184,14 @@ static struct axon_msic *find_msi_translator(struct pci_dev *dev)
184 goto out_error; 184 goto out_error;
185 } 185 }
186 186
187 irq_host = irq_find_host(dn); 187 irq_domain = irq_find_host(dn);
188 if (!irq_host) { 188 if (!irq_domain) {
189 dev_dbg(&dev->dev, "axon_msi: no irq_host found for node %s\n", 189 dev_dbg(&dev->dev, "axon_msi: no irq_domain found for node %s\n",
190 dn->full_name); 190 dn->full_name);
191 goto out_error; 191 goto out_error;
192 } 192 }
193 193
194 msic = irq_host->host_data; 194 msic = irq_domain->host_data;
195 195
196out_error: 196out_error:
197 of_node_put(dn); 197 of_node_put(dn);
@@ -280,7 +280,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
280 BUILD_BUG_ON(NR_IRQS > 65536); 280 BUILD_BUG_ON(NR_IRQS > 65536);
281 281
282 list_for_each_entry(entry, &dev->msi_list, list) { 282 list_for_each_entry(entry, &dev->msi_list, list) {
283 virq = irq_create_direct_mapping(msic->irq_host); 283 virq = irq_create_direct_mapping(msic->irq_domain);
284 if (virq == NO_IRQ) { 284 if (virq == NO_IRQ) {
285 dev_warn(&dev->dev, 285 dev_warn(&dev->dev,
286 "axon_msi: virq allocation failed!\n"); 286 "axon_msi: virq allocation failed!\n");
@@ -318,7 +318,7 @@ static struct irq_chip msic_irq_chip = {
318 .name = "AXON-MSI", 318 .name = "AXON-MSI",
319}; 319};
320 320
321static int msic_host_map(struct irq_host *h, unsigned int virq, 321static int msic_host_map(struct irq_domain *h, unsigned int virq,
322 irq_hw_number_t hw) 322 irq_hw_number_t hw)
323{ 323{
324 irq_set_chip_data(virq, h->host_data); 324 irq_set_chip_data(virq, h->host_data);
@@ -327,7 +327,7 @@ static int msic_host_map(struct irq_host *h, unsigned int virq,
327 return 0; 327 return 0;
328} 328}
329 329
330static struct irq_host_ops msic_host_ops = { 330static const struct irq_domain_ops msic_host_ops = {
331 .map = msic_host_map, 331 .map = msic_host_map,
332}; 332};
333 333
@@ -337,7 +337,7 @@ static void axon_msi_shutdown(struct platform_device *device)
337 u32 tmp; 337 u32 tmp;
338 338
339 pr_devel("axon_msi: disabling %s\n", 339 pr_devel("axon_msi: disabling %s\n",
340 msic->irq_host->of_node->full_name); 340 msic->irq_domain->of_node->full_name);
341 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); 341 tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG);
342 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; 342 tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE;
343 msic_dcr_write(msic, MSIC_CTRL_REG, tmp); 343 msic_dcr_write(msic, MSIC_CTRL_REG, tmp);
@@ -392,16 +392,13 @@ static int axon_msi_probe(struct platform_device *device)
392 } 392 }
393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES); 393 memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
394 394
395 msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP, 395 msic->irq_domain = irq_domain_add_nomap(dn, &msic_host_ops, msic);
396 NR_IRQS, &msic_host_ops, 0); 396 if (!msic->irq_domain) {
397 if (!msic->irq_host) { 397 printk(KERN_ERR "axon_msi: couldn't allocate irq_domain for %s\n",
398 printk(KERN_ERR "axon_msi: couldn't allocate irq_host for %s\n",
399 dn->full_name); 398 dn->full_name);
400 goto out_free_fifo; 399 goto out_free_fifo;
401 } 400 }
402 401
403 msic->irq_host->host_data = msic;
404
405 irq_set_handler_data(virq, msic); 402 irq_set_handler_data(virq, msic);
406 irq_set_chained_handler(virq, axon_msi_cascade); 403 irq_set_chained_handler(virq, axon_msi_cascade);
407 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); 404 pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq);
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index 2516c1cf8467..943c9d39aa16 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -95,7 +95,6 @@ static long beat_lpar_hpte_insert(unsigned long hpte_group,
95 unsigned long lpar_rc; 95 unsigned long lpar_rc;
96 u64 hpte_v, hpte_r, slot; 96 u64 hpte_v, hpte_r, slot;
97 97
98 /* same as iseries */
99 if (vflags & HPTE_V_SECONDARY) 98 if (vflags & HPTE_V_SECONDARY)
100 return -1; 99 return -1;
101 100
@@ -319,7 +318,6 @@ static long beat_lpar_hpte_insert_v3(unsigned long hpte_group,
319 unsigned long lpar_rc; 318 unsigned long lpar_rc;
320 u64 hpte_v, hpte_r, slot; 319 u64 hpte_v, hpte_r, slot;
321 320
322 /* same as iseries */
323 if (vflags & HPTE_V_SECONDARY) 321 if (vflags & HPTE_V_SECONDARY)
324 return -1; 322 return -1;
325 323
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c
index 55015e1f6939..e5c3a2c6090d 100644
--- a/arch/powerpc/platforms/cell/beat_interrupt.c
+++ b/arch/powerpc/platforms/cell/beat_interrupt.c
@@ -34,7 +34,7 @@ static DEFINE_RAW_SPINLOCK(beatic_irq_mask_lock);
34static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64]; 34static uint64_t beatic_irq_mask_enable[(MAX_IRQS+255)/64];
35static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64]; 35static uint64_t beatic_irq_mask_ack[(MAX_IRQS+255)/64];
36 36
37static struct irq_host *beatic_host; 37static struct irq_domain *beatic_host;
38 38
39/* 39/*
40 * In this implementation, "virq" == "IRQ plug number", 40 * In this implementation, "virq" == "IRQ plug number",
@@ -122,7 +122,7 @@ static struct irq_chip beatic_pic = {
122 * 122 *
123 * Note that the number (virq) is already assigned at upper layer. 123 * Note that the number (virq) is already assigned at upper layer.
124 */ 124 */
125static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) 125static void beatic_pic_host_unmap(struct irq_domain *h, unsigned int virq)
126{ 126{
127 beat_destruct_irq_plug(virq); 127 beat_destruct_irq_plug(virq);
128} 128}
@@ -133,7 +133,7 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq)
133 * 133 *
134 * Note that the number (virq) is already assigned at upper layer. 134 * Note that the number (virq) is already assigned at upper layer.
135 */ 135 */
136static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, 136static int beatic_pic_host_map(struct irq_domain *h, unsigned int virq,
137 irq_hw_number_t hw) 137 irq_hw_number_t hw)
138{ 138{
139 int64_t err; 139 int64_t err;
@@ -154,7 +154,7 @@ static int beatic_pic_host_map(struct irq_host *h, unsigned int virq,
154 * Called from irq_create_of_mapping() only. 154 * Called from irq_create_of_mapping() only.
155 * Note: We have only 1 entry to translate. 155 * Note: We have only 1 entry to translate.
156 */ 156 */
157static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct, 157static int beatic_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
158 const u32 *intspec, unsigned int intsize, 158 const u32 *intspec, unsigned int intsize,
159 irq_hw_number_t *out_hwirq, 159 irq_hw_number_t *out_hwirq,
160 unsigned int *out_flags) 160 unsigned int *out_flags)
@@ -166,13 +166,13 @@ static int beatic_pic_host_xlate(struct irq_host *h, struct device_node *ct,
166 return 0; 166 return 0;
167} 167}
168 168
169static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) 169static int beatic_pic_host_match(struct irq_domain *h, struct device_node *np)
170{ 170{
171 /* Match all */ 171 /* Match all */
172 return 1; 172 return 1;
173} 173}
174 174
175static struct irq_host_ops beatic_pic_host_ops = { 175static const struct irq_domain_ops beatic_pic_host_ops = {
176 .map = beatic_pic_host_map, 176 .map = beatic_pic_host_map,
177 .unmap = beatic_pic_host_unmap, 177 .unmap = beatic_pic_host_unmap,
178 .xlate = beatic_pic_host_xlate, 178 .xlate = beatic_pic_host_xlate,
@@ -239,9 +239,7 @@ void __init beatic_init_IRQ(void)
239 ppc_md.get_irq = beatic_get_irq; 239 ppc_md.get_irq = beatic_get_irq;
240 240
241 /* Allocate an irq host */ 241 /* Allocate an irq host */
242 beatic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, 242 beatic_host = irq_domain_add_nomap(NULL, &beatic_pic_host_ops, NULL);
243 &beatic_pic_host_ops,
244 0);
245 BUG_ON(beatic_host == NULL); 243 BUG_ON(beatic_host == NULL);
246 irq_set_default_host(beatic_host); 244 irq_set_default_host(beatic_host);
247} 245}
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 96a433dd2d64..2d42f3bb66d6 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -56,7 +56,7 @@ struct iic {
56 56
57static DEFINE_PER_CPU(struct iic, cpu_iic); 57static DEFINE_PER_CPU(struct iic, cpu_iic);
58#define IIC_NODE_COUNT 2 58#define IIC_NODE_COUNT 2
59static struct irq_host *iic_host; 59static struct irq_domain *iic_host;
60 60
61/* Convert between "pending" bits and hw irq number */ 61/* Convert between "pending" bits and hw irq number */
62static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) 62static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits)
@@ -186,7 +186,7 @@ void iic_message_pass(int cpu, int msg)
186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); 186 out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4);
187} 187}
188 188
189struct irq_host *iic_get_irq_host(int node) 189struct irq_domain *iic_get_irq_host(int node)
190{ 190{
191 return iic_host; 191 return iic_host;
192} 192}
@@ -222,13 +222,13 @@ void iic_request_IPIs(void)
222#endif /* CONFIG_SMP */ 222#endif /* CONFIG_SMP */
223 223
224 224
225static int iic_host_match(struct irq_host *h, struct device_node *node) 225static int iic_host_match(struct irq_domain *h, struct device_node *node)
226{ 226{
227 return of_device_is_compatible(node, 227 return of_device_is_compatible(node,
228 "IBM,CBEA-Internal-Interrupt-Controller"); 228 "IBM,CBEA-Internal-Interrupt-Controller");
229} 229}
230 230
231static int iic_host_map(struct irq_host *h, unsigned int virq, 231static int iic_host_map(struct irq_domain *h, unsigned int virq,
232 irq_hw_number_t hw) 232 irq_hw_number_t hw)
233{ 233{
234 switch (hw & IIC_IRQ_TYPE_MASK) { 234 switch (hw & IIC_IRQ_TYPE_MASK) {
@@ -245,7 +245,7 @@ static int iic_host_map(struct irq_host *h, unsigned int virq,
245 return 0; 245 return 0;
246} 246}
247 247
248static int iic_host_xlate(struct irq_host *h, struct device_node *ct, 248static int iic_host_xlate(struct irq_domain *h, struct device_node *ct,
249 const u32 *intspec, unsigned int intsize, 249 const u32 *intspec, unsigned int intsize,
250 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 250 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
251 251
@@ -285,7 +285,7 @@ static int iic_host_xlate(struct irq_host *h, struct device_node *ct,
285 return 0; 285 return 0;
286} 286}
287 287
288static struct irq_host_ops iic_host_ops = { 288static const struct irq_domain_ops iic_host_ops = {
289 .match = iic_host_match, 289 .match = iic_host_match,
290 .map = iic_host_map, 290 .map = iic_host_map,
291 .xlate = iic_host_xlate, 291 .xlate = iic_host_xlate,
@@ -378,8 +378,8 @@ static int __init setup_iic(void)
378void __init iic_init_IRQ(void) 378void __init iic_init_IRQ(void)
379{ 379{
380 /* Setup an irq host data structure */ 380 /* Setup an irq host data structure */
381 iic_host = irq_alloc_host(NULL, IRQ_HOST_MAP_LINEAR, IIC_SOURCE_COUNT, 381 iic_host = irq_domain_add_linear(NULL, IIC_SOURCE_COUNT, &iic_host_ops,
382 &iic_host_ops, IIC_IRQ_INVALID); 382 NULL);
383 BUG_ON(iic_host == NULL); 383 BUG_ON(iic_host == NULL);
384 irq_set_default_host(iic_host); 384 irq_set_default_host(iic_host);
385 385
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 62002a7edfed..fa3e294fd343 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -197,7 +197,8 @@ static void __init mpic_init_IRQ(void)
197 /* The MPIC driver will get everything it needs from the 197 /* The MPIC driver will get everything it needs from the
198 * device-tree, just pass 0 to all arguments 198 * device-tree, just pass 0 to all arguments
199 */ 199 */
200 mpic = mpic_alloc(dn, 0, MPIC_SECONDARY, 0, 0, " MPIC "); 200 mpic = mpic_alloc(dn, 0, MPIC_SECONDARY | MPIC_NO_RESET,
201 0, 0, " MPIC ");
201 if (mpic == NULL) 202 if (mpic == NULL)
202 continue; 203 continue;
203 mpic_init(mpic); 204 mpic_init(mpic);
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c
index 442c28c00f88..d8b7cc8a66ca 100644
--- a/arch/powerpc/platforms/cell/spider-pic.c
+++ b/arch/powerpc/platforms/cell/spider-pic.c
@@ -62,7 +62,7 @@ enum {
62#define SPIDER_IRQ_INVALID 63 62#define SPIDER_IRQ_INVALID 63
63 63
64struct spider_pic { 64struct spider_pic {
65 struct irq_host *host; 65 struct irq_domain *host;
66 void __iomem *regs; 66 void __iomem *regs;
67 unsigned int node_id; 67 unsigned int node_id;
68}; 68};
@@ -168,7 +168,7 @@ static struct irq_chip spider_pic = {
168 .irq_set_type = spider_set_irq_type, 168 .irq_set_type = spider_set_irq_type,
169}; 169};
170 170
171static int spider_host_map(struct irq_host *h, unsigned int virq, 171static int spider_host_map(struct irq_domain *h, unsigned int virq,
172 irq_hw_number_t hw) 172 irq_hw_number_t hw)
173{ 173{
174 irq_set_chip_data(virq, h->host_data); 174 irq_set_chip_data(virq, h->host_data);
@@ -180,7 +180,7 @@ static int spider_host_map(struct irq_host *h, unsigned int virq,
180 return 0; 180 return 0;
181} 181}
182 182
183static int spider_host_xlate(struct irq_host *h, struct device_node *ct, 183static int spider_host_xlate(struct irq_domain *h, struct device_node *ct,
184 const u32 *intspec, unsigned int intsize, 184 const u32 *intspec, unsigned int intsize,
185 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 185 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
186 186
@@ -194,7 +194,7 @@ static int spider_host_xlate(struct irq_host *h, struct device_node *ct,
194 return 0; 194 return 0;
195} 195}
196 196
197static struct irq_host_ops spider_host_ops = { 197static const struct irq_domain_ops spider_host_ops = {
198 .map = spider_host_map, 198 .map = spider_host_map,
199 .xlate = spider_host_xlate, 199 .xlate = spider_host_xlate,
200}; 200};
@@ -299,12 +299,10 @@ static void __init spider_init_one(struct device_node *of_node, int chip,
299 panic("spider_pic: can't map registers !"); 299 panic("spider_pic: can't map registers !");
300 300
301 /* Allocate a host */ 301 /* Allocate a host */
302 pic->host = irq_alloc_host(of_node, IRQ_HOST_MAP_LINEAR, 302 pic->host = irq_domain_add_linear(of_node, SPIDER_SRC_COUNT,
303 SPIDER_SRC_COUNT, &spider_host_ops, 303 &spider_host_ops, pic);
304 SPIDER_IRQ_INVALID);
305 if (pic->host == NULL) 304 if (pic->host == NULL)
306 panic("spider_pic: can't allocate irq host !"); 305 panic("spider_pic: can't allocate irq host !");
307 pic->host->host_data = pic;
308 306
309 /* Go through all sources and disable them */ 307 /* Go through all sources and disable them */
310 for (i = 0; i < SPIDER_SRC_COUNT; i++) { 308 for (i = 0; i < SPIDER_SRC_COUNT; i++) {
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index d4a094ca96f3..1d75c92ea8fb 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -646,6 +646,7 @@ long spufs_create(struct path *path, struct dentry *dentry,
646 646
647out: 647out:
648 mutex_unlock(&path->dentry->d_inode->i_mutex); 648 mutex_unlock(&path->dentry->d_inode->i_mutex);
649 dput(dentry);
649 return ret; 650 return ret;
650} 651}
651 652
@@ -757,9 +758,9 @@ spufs_create_root(struct super_block *sb, void *data)
757 goto out_iput; 758 goto out_iput;
758 759
759 ret = -ENOMEM; 760 ret = -ENOMEM;
760 sb->s_root = d_alloc_root(inode); 761 sb->s_root = d_make_root(inode);
761 if (!sb->s_root) 762 if (!sb->s_root)
762 goto out_iput; 763 goto out;
763 764
764 return 0; 765 return 0;
765out_iput: 766out_iput:
@@ -828,19 +829,19 @@ static int __init spufs_init(void)
828 ret = spu_sched_init(); 829 ret = spu_sched_init();
829 if (ret) 830 if (ret)
830 goto out_cache; 831 goto out_cache;
831 ret = register_filesystem(&spufs_type); 832 ret = register_spu_syscalls(&spufs_calls);
832 if (ret) 833 if (ret)
833 goto out_sched; 834 goto out_sched;
834 ret = register_spu_syscalls(&spufs_calls); 835 ret = register_filesystem(&spufs_type);
835 if (ret) 836 if (ret)
836 goto out_fs; 837 goto out_syscalls;
837 838
838 spufs_init_isolated_loader(); 839 spufs_init_isolated_loader();
839 840
840 return 0; 841 return 0;
841 842
842out_fs: 843out_syscalls:
843 unregister_filesystem(&spufs_type); 844 unregister_spu_syscalls(&spufs_calls);
844out_sched: 845out_sched:
845 spu_sched_exit(); 846 spu_sched_exit();
846out_cache: 847out_cache:
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index 8591bb62d7fc..5665dcc382c7 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -70,8 +70,6 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
70 ret = PTR_ERR(dentry); 70 ret = PTR_ERR(dentry);
71 if (!IS_ERR(dentry)) { 71 if (!IS_ERR(dentry)) {
72 ret = spufs_create(&path, dentry, flags, mode, neighbor); 72 ret = spufs_create(&path, dentry, flags, mode, neighbor);
73 mutex_unlock(&path.dentry->d_inode->i_mutex);
74 dput(dentry);
75 path_put(&path); 73 path_put(&path);
76 } 74 }
77 75
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index f1f17bb2c33c..c665d7de6c99 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -435,7 +435,8 @@ static void __init chrp_find_openpic(void)
435 if (len > 1) 435 if (len > 1)
436 isu_size = iranges[3]; 436 isu_size = iranges[3];
437 437
438 chrp_mpic = mpic_alloc(np, opaddr, 0, isu_size, 0, " MPIC "); 438 chrp_mpic = mpic_alloc(np, opaddr, MPIC_NO_RESET,
439 isu_size, 0, " MPIC ");
439 if (chrp_mpic == NULL) { 440 if (chrp_mpic == NULL) {
440 printk(KERN_ERR "Failed to allocate MPIC structure\n"); 441 printk(KERN_ERR "Failed to allocate MPIC structure\n");
441 goto bail; 442 goto bail;
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
index f61a2dd96b99..53d6eee01963 100644
--- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c
@@ -96,9 +96,9 @@ static struct irq_chip flipper_pic = {
96 * 96 *
97 */ 97 */
98 98
99static struct irq_host *flipper_irq_host; 99static struct irq_domain *flipper_irq_host;
100 100
101static int flipper_pic_map(struct irq_host *h, unsigned int virq, 101static int flipper_pic_map(struct irq_domain *h, unsigned int virq,
102 irq_hw_number_t hwirq) 102 irq_hw_number_t hwirq)
103{ 103{
104 irq_set_chip_data(virq, h->host_data); 104 irq_set_chip_data(virq, h->host_data);
@@ -107,13 +107,13 @@ static int flipper_pic_map(struct irq_host *h, unsigned int virq,
107 return 0; 107 return 0;
108} 108}
109 109
110static int flipper_pic_match(struct irq_host *h, struct device_node *np) 110static int flipper_pic_match(struct irq_domain *h, struct device_node *np)
111{ 111{
112 return 1; 112 return 1;
113} 113}
114 114
115 115
116static struct irq_host_ops flipper_irq_host_ops = { 116static const struct irq_domain_ops flipper_irq_domain_ops = {
117 .map = flipper_pic_map, 117 .map = flipper_pic_map,
118 .match = flipper_pic_match, 118 .match = flipper_pic_match,
119}; 119};
@@ -130,10 +130,10 @@ static void __flipper_quiesce(void __iomem *io_base)
130 out_be32(io_base + FLIPPER_ICR, 0xffffffff); 130 out_be32(io_base + FLIPPER_ICR, 0xffffffff);
131} 131}
132 132
133struct irq_host * __init flipper_pic_init(struct device_node *np) 133struct irq_domain * __init flipper_pic_init(struct device_node *np)
134{ 134{
135 struct device_node *pi; 135 struct device_node *pi;
136 struct irq_host *irq_host = NULL; 136 struct irq_domain *irq_domain = NULL;
137 struct resource res; 137 struct resource res;
138 void __iomem *io_base; 138 void __iomem *io_base;
139 int retval; 139 int retval;
@@ -159,17 +159,15 @@ struct irq_host * __init flipper_pic_init(struct device_node *np)
159 159
160 __flipper_quiesce(io_base); 160 __flipper_quiesce(io_base);
161 161
162 irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, FLIPPER_NR_IRQS, 162 irq_domain = irq_domain_add_linear(np, FLIPPER_NR_IRQS,
163 &flipper_irq_host_ops, -1); 163 &flipper_irq_domain_ops, io_base);
164 if (!irq_host) { 164 if (!irq_domain) {
165 pr_err("failed to allocate irq_host\n"); 165 pr_err("failed to allocate irq_domain\n");
166 return NULL; 166 return NULL;
167 } 167 }
168 168
169 irq_host->host_data = io_base;
170
171out: 169out:
172 return irq_host; 170 return irq_domain;
173} 171}
174 172
175unsigned int flipper_pic_get_irq(void) 173unsigned int flipper_pic_get_irq(void)
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
index e4919170c6bc..3006b5117ec6 100644
--- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
+++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c
@@ -89,9 +89,9 @@ static struct irq_chip hlwd_pic = {
89 * 89 *
90 */ 90 */
91 91
92static struct irq_host *hlwd_irq_host; 92static struct irq_domain *hlwd_irq_host;
93 93
94static int hlwd_pic_map(struct irq_host *h, unsigned int virq, 94static int hlwd_pic_map(struct irq_domain *h, unsigned int virq,
95 irq_hw_number_t hwirq) 95 irq_hw_number_t hwirq)
96{ 96{
97 irq_set_chip_data(virq, h->host_data); 97 irq_set_chip_data(virq, h->host_data);
@@ -100,11 +100,11 @@ static int hlwd_pic_map(struct irq_host *h, unsigned int virq,
100 return 0; 100 return 0;
101} 101}
102 102
103static struct irq_host_ops hlwd_irq_host_ops = { 103static const struct irq_domain_ops hlwd_irq_domain_ops = {
104 .map = hlwd_pic_map, 104 .map = hlwd_pic_map,
105}; 105};
106 106
107static unsigned int __hlwd_pic_get_irq(struct irq_host *h) 107static unsigned int __hlwd_pic_get_irq(struct irq_domain *h)
108{ 108{
109 void __iomem *io_base = h->host_data; 109 void __iomem *io_base = h->host_data;
110 int irq; 110 int irq;
@@ -123,14 +123,14 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq,
123 struct irq_desc *desc) 123 struct irq_desc *desc)
124{ 124{
125 struct irq_chip *chip = irq_desc_get_chip(desc); 125 struct irq_chip *chip = irq_desc_get_chip(desc);
126 struct irq_host *irq_host = irq_get_handler_data(cascade_virq); 126 struct irq_domain *irq_domain = irq_get_handler_data(cascade_virq);
127 unsigned int virq; 127 unsigned int virq;
128 128
129 raw_spin_lock(&desc->lock); 129 raw_spin_lock(&desc->lock);
130 chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */ 130 chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */
131 raw_spin_unlock(&desc->lock); 131 raw_spin_unlock(&desc->lock);
132 132
133 virq = __hlwd_pic_get_irq(irq_host); 133 virq = __hlwd_pic_get_irq(irq_domain);
134 if (virq != NO_IRQ) 134 if (virq != NO_IRQ)
135 generic_handle_irq(virq); 135 generic_handle_irq(virq);
136 else 136 else
@@ -155,9 +155,9 @@ static void __hlwd_quiesce(void __iomem *io_base)
155 out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff); 155 out_be32(io_base + HW_BROADWAY_ICR, 0xffffffff);
156} 156}
157 157
158struct irq_host *hlwd_pic_init(struct device_node *np) 158struct irq_domain *hlwd_pic_init(struct device_node *np)
159{ 159{
160 struct irq_host *irq_host; 160 struct irq_domain *irq_domain;
161 struct resource res; 161 struct resource res;
162 void __iomem *io_base; 162 void __iomem *io_base;
163 int retval; 163 int retval;
@@ -177,15 +177,14 @@ struct irq_host *hlwd_pic_init(struct device_node *np)
177 177
178 __hlwd_quiesce(io_base); 178 __hlwd_quiesce(io_base);
179 179
180 irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, HLWD_NR_IRQS, 180 irq_domain = irq_domain_add_linear(np, HLWD_NR_IRQS,
181 &hlwd_irq_host_ops, -1); 181 &hlwd_irq_domain_ops, io_base);
182 if (!irq_host) { 182 if (!irq_domain) {
183 pr_err("failed to allocate irq_host\n"); 183 pr_err("failed to allocate irq_domain\n");
184 return NULL; 184 return NULL;
185 } 185 }
186 irq_host->host_data = io_base;
187 186
188 return irq_host; 187 return irq_domain;
189} 188}
190 189
191unsigned int hlwd_pic_get_irq(void) 190unsigned int hlwd_pic_get_irq(void)
@@ -200,7 +199,7 @@ unsigned int hlwd_pic_get_irq(void)
200 199
201void hlwd_pic_probe(void) 200void hlwd_pic_probe(void)
202{ 201{
203 struct irq_host *host; 202 struct irq_domain *host;
204 struct device_node *np; 203 struct device_node *np;
205 const u32 *interrupts; 204 const u32 *interrupts;
206 int cascade_virq; 205 int cascade_virq;
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c
index 9cfcf20c0560..ab51b21b4bd7 100644
--- a/arch/powerpc/platforms/embedded6xx/holly.c
+++ b/arch/powerpc/platforms/embedded6xx/holly.c
@@ -154,11 +154,9 @@ static void __init holly_init_IRQ(void)
154 struct device_node *cascade_node = NULL; 154 struct device_node *cascade_node = NULL;
155#endif 155#endif
156 156
157 mpic = mpic_alloc(NULL, 0, 157 mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
158 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
159 MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108, 158 MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
160 24, 159 24, 0,
161 NR_IRQS-4, /* num_sources used */
162 "Tsi108_PIC"); 160 "Tsi108_PIC");
163 161
164 BUG_ON(mpic == NULL); 162 BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index bcfad92c9cec..455e7c087422 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -82,8 +82,7 @@ static void __init linkstation_init_IRQ(void)
82{ 82{
83 struct mpic *mpic; 83 struct mpic *mpic;
84 84
85 mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET, 85 mpic = mpic_alloc(NULL, 0, 0, 4, 0, " EPIC ");
86 4, 32, " EPIC ");
87 BUG_ON(mpic == NULL); 86 BUG_ON(mpic == NULL);
88 87
89 /* PCI IRQs */ 88 /* PCI IRQs */
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
index f3350d786f5b..74ccce36baed 100644
--- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
+++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c
@@ -108,11 +108,9 @@ static void __init mpc7448_hpc2_init_IRQ(void)
108 struct device_node *cascade_node = NULL; 108 struct device_node *cascade_node = NULL;
109#endif 109#endif
110 110
111 mpic = mpic_alloc(NULL, 0, 111 mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN |
112 MPIC_BIG_ENDIAN | MPIC_WANTS_RESET |
113 MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108, 112 MPIC_SPV_EOI | MPIC_NO_PTHROU_DIS | MPIC_REGSET_TSI108,
114 24, 113 24, 0,
115 NR_IRQS-4, /* num_sources used */
116 "Tsi108_PIC"); 114 "Tsi108_PIC");
117 115
118 BUG_ON(mpic == NULL); 116 BUG_ON(mpic == NULL);
diff --git a/arch/powerpc/platforms/embedded6xx/storcenter.c b/arch/powerpc/platforms/embedded6xx/storcenter.c
index afa638834965..e0ed3c71d69b 100644
--- a/arch/powerpc/platforms/embedded6xx/storcenter.c
+++ b/arch/powerpc/platforms/embedded6xx/storcenter.c
@@ -84,8 +84,7 @@ static void __init storcenter_init_IRQ(void)
84{ 84{
85 struct mpic *mpic; 85 struct mpic *mpic;
86 86
87 mpic = mpic_alloc(NULL, 0, MPIC_WANTS_RESET, 87 mpic = mpic_alloc(NULL, 0, 0, 16, 0, " OpenPIC ");
88 16, 32, " OpenPIC ");
89 BUG_ON(mpic == NULL); 88 BUG_ON(mpic == NULL);
90 89
91 /* 90 /*
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
deleted file mode 100644
index b57cda3a0817..000000000000
--- a/arch/powerpc/platforms/iseries/Kconfig
+++ /dev/null
@@ -1,38 +0,0 @@
1config PPC_ISERIES
2 bool "IBM Legacy iSeries"
3 depends on PPC64 && PPC_BOOK3S
4 select PPC_SMP_MUXED_IPI
5 select PPC_INDIRECT_PIO
6 select PPC_INDIRECT_MMIO
7 select PPC_PCI_CHOICE if EXPERT
8
9menu "iSeries device drivers"
10 depends on PPC_ISERIES
11
12config VIODASD
13 tristate "iSeries Virtual I/O disk support"
14 depends on BLOCK
15 select VIOPATH
16 help
17 If you are running on an iSeries system and you want to use
18 virtual disks created and managed by OS/400, say Y.
19
20config VIOCD
21 tristate "iSeries Virtual I/O CD support"
22 depends on BLOCK
23 select VIOPATH
24 help
25 If you are running Linux on an IBM iSeries system and you want to
26 read a CD drive owned by OS/400, say Y here.
27
28config VIOTAPE
29 tristate "iSeries Virtual Tape Support"
30 select VIOPATH
31 help
32 If you are running Linux on an iSeries system and you want Linux
33 to read and/or write a tape drive owned by OS/400, say Y here.
34
35endmenu
36
37config VIOPATH
38 bool
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
deleted file mode 100644
index a7602b11ed9d..000000000000
--- a/arch/powerpc/platforms/iseries/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1ccflags-y := -mno-minimal-toc
2
3obj-y += exception.o
4obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \
5 hvcall.o proc.o htab.o iommu.o misc.o irq.o
6obj-$(CONFIG_PCI) += pci.o
7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_VIOPATH) += viopath.o vio.o
9obj-$(CONFIG_MODULES) += ksyms.o
diff --git a/arch/powerpc/platforms/iseries/call_hpt.h b/arch/powerpc/platforms/iseries/call_hpt.h
deleted file mode 100644
index 8d95fe4b554e..000000000000
--- a/arch/powerpc/platforms/iseries/call_hpt.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
19#define _PLATFORMS_ISERIES_CALL_HPT_H
20
21/*
22 * This file contains the "hypervisor call" interface which is used to
23 * drive the hypervisor from the OS.
24 */
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28#include <asm/mmu.h>
29
30#define HvCallHptGetHptAddress HvCallHpt + 0
31#define HvCallHptGetHptPages HvCallHpt + 1
32#define HvCallHptSetPp HvCallHpt + 5
33#define HvCallHptSetSwBits HvCallHpt + 6
34#define HvCallHptUpdate HvCallHpt + 7
35#define HvCallHptInvalidateNoSyncICache HvCallHpt + 8
36#define HvCallHptGet HvCallHpt + 11
37#define HvCallHptFindNextValid HvCallHpt + 12
38#define HvCallHptFindValid HvCallHpt + 13
39#define HvCallHptAddValidate HvCallHpt + 16
40#define HvCallHptInvalidateSetSwBitsGet HvCallHpt + 18
41
42
43static inline u64 HvCallHpt_getHptAddress(void)
44{
45 return HvCall0(HvCallHptGetHptAddress);
46}
47
48static inline u64 HvCallHpt_getHptPages(void)
49{
50 return HvCall0(HvCallHptGetHptPages);
51}
52
53static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value)
54{
55 HvCall2(HvCallHptSetPp, hpteIndex, value);
56}
57
58static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff)
59{
60 HvCall3(HvCallHptSetSwBits, hpteIndex, bitson, bitsoff);
61}
62
63static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex)
64{
65 HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
66}
67
68static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson,
69 u8 bitsoff)
70{
71 u64 compressedStatus;
72
73 compressedStatus = HvCall4(HvCallHptInvalidateSetSwBitsGet,
74 hpteIndex, bitson, bitsoff, 1);
75 HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
76 return compressedStatus;
77}
78
79static inline u64 HvCallHpt_findValid(struct hash_pte *hpte, u64 vpn)
80{
81 return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0);
82}
83
84static inline u64 HvCallHpt_findNextValid(struct hash_pte *hpte, u32 hpteIndex,
85 u8 bitson, u8 bitsoff)
86{
87 return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex,
88 bitson, bitsoff);
89}
90
91static inline void HvCallHpt_get(struct hash_pte *hpte, u32 hpteIndex)
92{
93 HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0);
94}
95
96static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit,
97 struct hash_pte *hpte)
98{
99 HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
100}
101
102#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
diff --git a/arch/powerpc/platforms/iseries/call_pci.h b/arch/powerpc/platforms/iseries/call_pci.h
deleted file mode 100644
index dbdf69850ed9..000000000000
--- a/arch/powerpc/platforms/iseries/call_pci.h
+++ /dev/null
@@ -1,309 +0,0 @@
1/*
2 * Provides the Hypervisor PCI calls for iSeries Linux Parition.
3 * Copyright (C) 2001 <Wayne G Holm> <IBM Corporation>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the:
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place, Suite 330,
19 * Boston, MA 02111-1307 USA
20 *
21 * Change Activity:
22 * Created, Jan 9, 2001
23 */
24
25#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
26#define _PLATFORMS_ISERIES_CALL_PCI_H
27
28#include <asm/iseries/hv_call_sc.h>
29#include <asm/iseries/hv_types.h>
30
31/*
32 * DSA == Direct Select Address
33 * this struct must be 64 bits in total
34 */
35struct HvCallPci_DsaAddr {
36 u16 busNumber; /* PHB index? */
37 u8 subBusNumber; /* PCI bus number? */
38 u8 deviceId; /* device and function? */
39 u8 barNumber;
40 u8 reserved[3];
41};
42
43union HvDsaMap {
44 u64 DsaAddr;
45 struct HvCallPci_DsaAddr Dsa;
46};
47
48struct HvCallPci_LoadReturn {
49 u64 rc;
50 u64 value;
51};
52
53enum HvCallPci_DeviceType {
54 HvCallPci_NodeDevice = 1,
55 HvCallPci_SpDevice = 2,
56 HvCallPci_IopDevice = 3,
57 HvCallPci_BridgeDevice = 4,
58 HvCallPci_MultiFunctionDevice = 5,
59 HvCallPci_IoaDevice = 6
60};
61
62
63struct HvCallPci_DeviceInfo {
64 u32 deviceType; /* See DeviceType enum for values */
65};
66
67struct HvCallPci_BusUnitInfo {
68 u32 sizeReturned; /* length of data returned */
69 u32 deviceType; /* see DeviceType enum for values */
70};
71
72struct HvCallPci_BridgeInfo {
73 struct HvCallPci_BusUnitInfo busUnitInfo; /* Generic bus unit info */
74 u8 subBusNumber; /* Bus number of secondary bus */
75 u8 maxAgents; /* Max idsels on secondary bus */
76 u8 maxSubBusNumber; /* Max Sub Bus */
77 u8 logicalSlotNumber; /* Logical Slot Number for IOA */
78};
79
80
81/*
82 * Maximum BusUnitInfo buffer size. Provided for clients so
83 * they can allocate a buffer big enough for any type of bus
84 * unit. Increase as needed.
85 */
86enum {HvCallPci_MaxBusUnitInfoSize = 128};
87
88struct HvCallPci_BarParms {
89 u64 vaddr;
90 u64 raddr;
91 u64 size;
92 u64 protectStart;
93 u64 protectEnd;
94 u64 relocationOffset;
95 u64 pciAddress;
96 u64 reserved[3];
97};
98
99enum HvCallPci_VpdType {
100 HvCallPci_BusVpd = 1,
101 HvCallPci_BusAdapterVpd = 2
102};
103
104#define HvCallPciConfigLoad8 HvCallPci + 0
105#define HvCallPciConfigLoad16 HvCallPci + 1
106#define HvCallPciConfigLoad32 HvCallPci + 2
107#define HvCallPciConfigStore8 HvCallPci + 3
108#define HvCallPciConfigStore16 HvCallPci + 4
109#define HvCallPciConfigStore32 HvCallPci + 5
110#define HvCallPciEoi HvCallPci + 16
111#define HvCallPciGetBarParms HvCallPci + 18
112#define HvCallPciMaskFisr HvCallPci + 20
113#define HvCallPciUnmaskFisr HvCallPci + 21
114#define HvCallPciSetSlotReset HvCallPci + 25
115#define HvCallPciGetDeviceInfo HvCallPci + 27
116#define HvCallPciGetCardVpd HvCallPci + 28
117#define HvCallPciBarLoad8 HvCallPci + 40
118#define HvCallPciBarLoad16 HvCallPci + 41
119#define HvCallPciBarLoad32 HvCallPci + 42
120#define HvCallPciBarLoad64 HvCallPci + 43
121#define HvCallPciBarStore8 HvCallPci + 44
122#define HvCallPciBarStore16 HvCallPci + 45
123#define HvCallPciBarStore32 HvCallPci + 46
124#define HvCallPciBarStore64 HvCallPci + 47
125#define HvCallPciMaskInterrupts HvCallPci + 48
126#define HvCallPciUnmaskInterrupts HvCallPci + 49
127#define HvCallPciGetBusUnitInfo HvCallPci + 50
128
129static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
130 u8 deviceId, u32 offset, u16 *value)
131{
132 struct HvCallPci_DsaAddr dsa;
133 struct HvCallPci_LoadReturn retVal;
134
135 *((u64*)&dsa) = 0;
136
137 dsa.busNumber = busNumber;
138 dsa.subBusNumber = subBusNumber;
139 dsa.deviceId = deviceId;
140
141 HvCall3Ret16(HvCallPciConfigLoad16, &retVal, *(u64 *)&dsa, offset, 0);
142
143 *value = retVal.value;
144
145 return retVal.rc;
146}
147
148static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
149 u8 deviceId, u32 offset, u32 *value)
150{
151 struct HvCallPci_DsaAddr dsa;
152 struct HvCallPci_LoadReturn retVal;
153
154 *((u64*)&dsa) = 0;
155
156 dsa.busNumber = busNumber;
157 dsa.subBusNumber = subBusNumber;
158 dsa.deviceId = deviceId;
159
160 HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
161
162 *value = retVal.value;
163
164 return retVal.rc;
165}
166
167static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
168 u8 deviceId, u32 offset, u8 value)
169{
170 struct HvCallPci_DsaAddr dsa;
171
172 *((u64*)&dsa) = 0;
173
174 dsa.busNumber = busNumber;
175 dsa.subBusNumber = subBusNumber;
176 dsa.deviceId = deviceId;
177
178 return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
179}
180
181static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
182 u8 deviceIdParm)
183{
184 struct HvCallPci_DsaAddr dsa;
185 struct HvCallPci_LoadReturn retVal;
186
187 *((u64*)&dsa) = 0;
188
189 dsa.busNumber = busNumberParm;
190 dsa.subBusNumber = subBusParm;
191 dsa.deviceId = deviceIdParm;
192
193 HvCall1Ret16(HvCallPciEoi, &retVal, *(u64*)&dsa);
194
195 return retVal.rc;
196}
197
198static inline u64 HvCallPci_getBarParms(u16 busNumberParm, u8 subBusParm,
199 u8 deviceIdParm, u8 barNumberParm, u64 parms, u32 sizeofParms)
200{
201 struct HvCallPci_DsaAddr dsa;
202
203 *((u64*)&dsa) = 0;
204
205 dsa.busNumber = busNumberParm;
206 dsa.subBusNumber = subBusParm;
207 dsa.deviceId = deviceIdParm;
208 dsa.barNumber = barNumberParm;
209
210 return HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms);
211}
212
213static inline u64 HvCallPci_maskFisr(u16 busNumberParm, u8 subBusParm,
214 u8 deviceIdParm, u64 fisrMask)
215{
216 struct HvCallPci_DsaAddr dsa;
217
218 *((u64*)&dsa) = 0;
219
220 dsa.busNumber = busNumberParm;
221 dsa.subBusNumber = subBusParm;
222 dsa.deviceId = deviceIdParm;
223
224 return HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask);
225}
226
227static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
228 u8 deviceIdParm, u64 fisrMask)
229{
230 struct HvCallPci_DsaAddr dsa;
231
232 *((u64*)&dsa) = 0;
233
234 dsa.busNumber = busNumberParm;
235 dsa.subBusNumber = subBusParm;
236 dsa.deviceId = deviceIdParm;
237
238 return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
239}
240
241static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
242 u8 deviceNumberParm, u64 parms, u32 sizeofParms)
243{
244 struct HvCallPci_DsaAddr dsa;
245
246 *((u64*)&dsa) = 0;
247
248 dsa.busNumber = busNumberParm;
249 dsa.subBusNumber = subBusParm;
250 dsa.deviceId = deviceNumberParm << 4;
251
252 return HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms);
253}
254
255static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm, u8 subBusParm,
256 u8 deviceIdParm, u64 interruptMask)
257{
258 struct HvCallPci_DsaAddr dsa;
259
260 *((u64*)&dsa) = 0;
261
262 dsa.busNumber = busNumberParm;
263 dsa.subBusNumber = subBusParm;
264 dsa.deviceId = deviceIdParm;
265
266 return HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask);
267}
268
269static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm, u8 subBusParm,
270 u8 deviceIdParm, u64 interruptMask)
271{
272 struct HvCallPci_DsaAddr dsa;
273
274 *((u64*)&dsa) = 0;
275
276 dsa.busNumber = busNumberParm;
277 dsa.subBusNumber = subBusParm;
278 dsa.deviceId = deviceIdParm;
279
280 return HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask);
281}
282
283static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm, u8 subBusParm,
284 u8 deviceIdParm, u64 parms, u32 sizeofParms)
285{
286 struct HvCallPci_DsaAddr dsa;
287
288 *((u64*)&dsa) = 0;
289
290 dsa.busNumber = busNumberParm;
291 dsa.subBusNumber = subBusParm;
292 dsa.deviceId = deviceIdParm;
293
294 return HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms,
295 sizeofParms);
296}
297
298static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
299 u16 sizeParm)
300{
301 u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
302 sizeParm, HvCallPci_BusVpd);
303 if (xRc == -1)
304 return -1;
305 else
306 return xRc & 0xFFFF;
307}
308
309#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/call_sm.h b/arch/powerpc/platforms/iseries/call_sm.h
deleted file mode 100644
index c7e251619f48..000000000000
--- a/arch/powerpc/platforms/iseries/call_sm.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_CALL_SM_H
19#define _ISERIES_CALL_SM_H
20
21/*
22 * This file contains the "hypervisor call" interface which is used to
23 * drive the hypervisor from the OS.
24 */
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28
29#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
30
31static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
32 u64 indexIntoBitMap)
33{
34 return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
35}
36
37#endif /* _ISERIES_CALL_SM_H */
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
deleted file mode 100644
index f0491cc28900..000000000000
--- a/arch/powerpc/platforms/iseries/dt.c
+++ /dev/null
@@ -1,643 +0,0 @@
1/*
2 * Copyright (C) 2005-2006 Michael Ellerman, IBM Corporation
3 * Copyright (C) 2000-2004, IBM Corporation
4 *
5 * Description:
6 * This file contains all the routines to build a flattened device
7 * tree for a legacy iSeries machine.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/pci.h>
20#include <linux/pci_regs.h>
21#include <linux/pci_ids.h>
22#include <linux/threads.h>
23#include <linux/bitops.h>
24#include <linux/string.h>
25#include <linux/kernel.h>
26#include <linux/if_ether.h> /* ETH_ALEN */
27
28#include <asm/machdep.h>
29#include <asm/prom.h>
30#include <asm/lppaca.h>
31#include <asm/cputable.h>
32#include <asm/abs_addr.h>
33#include <asm/system.h>
34#include <asm/iseries/hv_types.h>
35#include <asm/iseries/hv_lp_config.h>
36#include <asm/iseries/hv_call_xm.h>
37#include <asm/udbg.h>
38
39#include "processor_vpd.h"
40#include "call_hpt.h"
41#include "call_pci.h"
42#include "pci.h"
43#include "it_exp_vpd_panel.h"
44#include "naca.h"
45
46#ifdef DEBUG
47#define DBG(fmt...) udbg_printf(fmt)
48#else
49#define DBG(fmt...)
50#endif
51
52/*
53 * These are created by the linker script at the start and end
54 * of the section containing all the strings marked with the DS macro.
55 */
56extern char __dt_strings_start[];
57extern char __dt_strings_end[];
58
59#define DS(s) ({ \
60 static const char __s[] __attribute__((section(".dt_strings"))) = s; \
61 __s; \
62})
63
64struct iseries_flat_dt {
65 struct boot_param_header header;
66 u64 reserve_map[2];
67};
68
69static void * __initdata dt_data;
70
71/*
72 * Putting these strings here keeps them out of the .dt_strings section
73 * that we capture for the strings blob of the flattened device tree.
74 */
75static char __initdata device_type_cpu[] = "cpu";
76static char __initdata device_type_memory[] = "memory";
77static char __initdata device_type_serial[] = "serial";
78static char __initdata device_type_network[] = "network";
79static char __initdata device_type_pci[] = "pci";
80static char __initdata device_type_vdevice[] = "vdevice";
81static char __initdata device_type_vscsi[] = "vscsi";
82
83
84/* EBCDIC to ASCII conversion routines */
85
86static unsigned char __init e2a(unsigned char x)
87{
88 switch (x) {
89 case 0x81 ... 0x89:
90 return x - 0x81 + 'a';
91 case 0x91 ... 0x99:
92 return x - 0x91 + 'j';
93 case 0xA2 ... 0xA9:
94 return x - 0xA2 + 's';
95 case 0xC1 ... 0xC9:
96 return x - 0xC1 + 'A';
97 case 0xD1 ... 0xD9:
98 return x - 0xD1 + 'J';
99 case 0xE2 ... 0xE9:
100 return x - 0xE2 + 'S';
101 case 0xF0 ... 0xF9:
102 return x - 0xF0 + '0';
103 }
104 return ' ';
105}
106
107static unsigned char * __init strne2a(unsigned char *dest,
108 const unsigned char *src, size_t n)
109{
110 int i;
111
112 n = strnlen(src, n);
113
114 for (i = 0; i < n; i++)
115 dest[i] = e2a(src[i]);
116
117 return dest;
118}
119
120static struct iseries_flat_dt * __init dt_init(void)
121{
122 struct iseries_flat_dt *dt;
123 unsigned long str_len;
124
125 str_len = __dt_strings_end - __dt_strings_start;
126 dt = (struct iseries_flat_dt *)ALIGN(klimit, 8);
127 dt->header.off_mem_rsvmap =
128 offsetof(struct iseries_flat_dt, reserve_map);
129 dt->header.off_dt_strings = ALIGN(sizeof(*dt), 8);
130 dt->header.off_dt_struct = dt->header.off_dt_strings
131 + ALIGN(str_len, 8);
132 dt_data = (void *)((unsigned long)dt + dt->header.off_dt_struct);
133 dt->header.dt_strings_size = str_len;
134
135 /* There is no notion of hardware cpu id on iSeries */
136 dt->header.boot_cpuid_phys = smp_processor_id();
137
138 memcpy((char *)dt + dt->header.off_dt_strings, __dt_strings_start,
139 str_len);
140
141 dt->header.magic = OF_DT_HEADER;
142 dt->header.version = 0x10;
143 dt->header.last_comp_version = 0x10;
144
145 dt->reserve_map[0] = 0;
146 dt->reserve_map[1] = 0;
147
148 return dt;
149}
150
151static void __init dt_push_u32(struct iseries_flat_dt *dt, u32 value)
152{
153 *((u32 *)dt_data) = value;
154 dt_data += sizeof(u32);
155}
156
157#ifdef notyet
158static void __init dt_push_u64(struct iseries_flat_dt *dt, u64 value)
159{
160 *((u64 *)dt_data) = value;
161 dt_data += sizeof(u64);
162}
163#endif
164
165static void __init dt_push_bytes(struct iseries_flat_dt *dt, const char *data,
166 int len)
167{
168 memcpy(dt_data, data, len);
169 dt_data += ALIGN(len, 4);
170}
171
172static void __init dt_start_node(struct iseries_flat_dt *dt, const char *name)
173{
174 dt_push_u32(dt, OF_DT_BEGIN_NODE);
175 dt_push_bytes(dt, name, strlen(name) + 1);
176}
177
178#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
179
180static void __init __dt_prop(struct iseries_flat_dt *dt, const char *name,
181 const void *data, int len)
182{
183 unsigned long offset;
184
185 dt_push_u32(dt, OF_DT_PROP);
186
187 /* Length of the data */
188 dt_push_u32(dt, len);
189
190 offset = name - __dt_strings_start;
191
192 /* The offset of the properties name in the string blob. */
193 dt_push_u32(dt, (u32)offset);
194
195 /* The actual data. */
196 dt_push_bytes(dt, data, len);
197}
198#define dt_prop(dt, name, data, len) __dt_prop((dt), DS(name), (data), (len))
199
200#define dt_prop_str(dt, name, data) \
201 dt_prop((dt), name, (data), strlen((data)) + 1); /* + 1 for NULL */
202
203static void __init __dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
204 u32 data)
205{
206 __dt_prop(dt, name, &data, sizeof(u32));
207}
208#define dt_prop_u32(dt, name, data) __dt_prop_u32((dt), DS(name), (data))
209
210static void __init __maybe_unused __dt_prop_u64(struct iseries_flat_dt *dt,
211 const char *name, u64 data)
212{
213 __dt_prop(dt, name, &data, sizeof(u64));
214}
215#define dt_prop_u64(dt, name, data) __dt_prop_u64((dt), DS(name), (data))
216
217#define dt_prop_u64_list(dt, name, data, n) \
218 dt_prop((dt), name, (data), sizeof(u64) * (n))
219
220#define dt_prop_u32_list(dt, name, data, n) \
221 dt_prop((dt), name, (data), sizeof(u32) * (n))
222
223#define dt_prop_empty(dt, name) dt_prop((dt), name, NULL, 0)
224
225static void __init dt_cpus(struct iseries_flat_dt *dt)
226{
227 unsigned char buf[32];
228 unsigned char *p;
229 unsigned int i, index;
230 struct IoHriProcessorVpd *d;
231 u32 pft_size[2];
232
233 /* yuck */
234 snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
235 p = strchr(buf, ' ');
236 if (!p) p = buf + strlen(buf);
237
238 dt_start_node(dt, "cpus");
239 dt_prop_u32(dt, "#address-cells", 1);
240 dt_prop_u32(dt, "#size-cells", 0);
241
242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
244
245 for (i = 0; i < NR_LPPACAS; i++) {
246 if (lppaca[i].dyn_proc_status >= 2)
247 continue;
248
249 snprintf(p, 32 - (p - buf), "@%d", i);
250 dt_start_node(dt, buf);
251
252 dt_prop_str(dt, "device_type", device_type_cpu);
253
254 index = lppaca[i].dyn_hv_phys_proc_index;
255 d = &xIoHriProcessorVpd[index];
256
257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
258 dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
259
260 dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
261 dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
262
263 /* magic conversions to Hz copied from old code */
264 dt_prop_u32(dt, "clock-frequency",
265 ((1UL << 34) * 1000000) / d->xProcFreq);
266 dt_prop_u32(dt, "timebase-frequency",
267 ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
268
269 dt_prop_u32(dt, "reg", i);
270
271 dt_prop_u32_list(dt, "ibm,pft-size", pft_size, 2);
272
273 dt_end_node(dt);
274 }
275
276 dt_end_node(dt);
277}
278
279static void __init dt_model(struct iseries_flat_dt *dt)
280{
281 char buf[16] = "IBM,";
282
283 /* N.B. lparcfg.c knows about the "IBM," prefixes ... */
284 /* "IBM," + mfgId[2:3] + systemSerial[1:5] */
285 strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2);
286 strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5);
287 buf[11] = '\0';
288 dt_prop_str(dt, "system-id", buf);
289
290 /* "IBM," + machineType[0:4] */
291 strne2a(buf + 4, xItExtVpdPanel.machineType, 4);
292 buf[8] = '\0';
293 dt_prop_str(dt, "model", buf);
294
295 dt_prop_str(dt, "compatible", "IBM,iSeries");
296 dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
297}
298
299static void __init dt_initrd(struct iseries_flat_dt *dt)
300{
301#ifdef CONFIG_BLK_DEV_INITRD
302 if (naca.xRamDisk) {
303 dt_prop_u64(dt, "linux,initrd-start", (u64)naca.xRamDisk);
304 dt_prop_u64(dt, "linux,initrd-end",
305 (u64)naca.xRamDisk + naca.xRamDiskSize * HW_PAGE_SIZE);
306 }
307#endif
308}
309
310static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
311 const char *name, u32 reg, int unit,
312 const char *type, const char *compat, int end)
313{
314 char buf[32];
315
316 snprintf(buf, 32, "%s@%08x", name, reg + ((unit >= 0) ? unit : 0));
317 dt_start_node(dt, buf);
318 dt_prop_str(dt, "device_type", type);
319 if (compat)
320 dt_prop_str(dt, "compatible", compat);
321 dt_prop_u32(dt, "reg", reg + ((unit >= 0) ? unit : 0));
322 if (unit >= 0)
323 dt_prop_u32(dt, "linux,unit_address", unit);
324 if (end)
325 dt_end_node(dt);
326}
327
328static void __init dt_vdevices(struct iseries_flat_dt *dt)
329{
330 u32 reg = 0;
331 HvLpIndexMap vlan_map;
332 int i;
333
334 dt_start_node(dt, "vdevice");
335 dt_prop_str(dt, "device_type", device_type_vdevice);
336 dt_prop_str(dt, "compatible", "IBM,iSeries-vdevice");
337 dt_prop_u32(dt, "#address-cells", 1);
338 dt_prop_u32(dt, "#size-cells", 0);
339
340 dt_do_vdevice(dt, "vty", reg, -1, device_type_serial,
341 "IBM,iSeries-vty", 1);
342 reg++;
343
344 dt_do_vdevice(dt, "v-scsi", reg, -1, device_type_vscsi,
345 "IBM,v-scsi", 1);
346 reg++;
347
348 vlan_map = HvLpConfig_getVirtualLanIndexMap();
349 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
350 unsigned char mac_addr[ETH_ALEN];
351
352 if ((vlan_map & (0x8000 >> i)) == 0)
353 continue;
354 dt_do_vdevice(dt, "l-lan", reg, i, device_type_network,
355 "IBM,iSeries-l-lan", 0);
356 mac_addr[0] = 0x02;
357 mac_addr[1] = 0x01;
358 mac_addr[2] = 0xff;
359 mac_addr[3] = i;
360 mac_addr[4] = 0xff;
361 mac_addr[5] = HvLpConfig_getLpIndex_outline();
362 dt_prop(dt, "local-mac-address", (char *)mac_addr, ETH_ALEN);
363 dt_prop(dt, "mac-address", (char *)mac_addr, ETH_ALEN);
364 dt_prop_u32(dt, "max-frame-size", 9000);
365 dt_prop_u32(dt, "address-bits", 48);
366
367 dt_end_node(dt);
368 }
369
370 dt_end_node(dt);
371}
372
373struct pci_class_name {
374 u16 code;
375 const char *name;
376 const char *type;
377};
378
379static struct pci_class_name __initdata pci_class_name[] = {
380 { PCI_CLASS_NETWORK_ETHERNET, "ethernet", device_type_network },
381};
382
383static struct pci_class_name * __init dt_find_pci_class_name(u16 class_code)
384{
385 struct pci_class_name *cp;
386
387 for (cp = pci_class_name;
388 cp < &pci_class_name[ARRAY_SIZE(pci_class_name)]; cp++)
389 if (cp->code == class_code)
390 return cp;
391 return NULL;
392}
393
394/*
395 * This assumes that the node slot is always on the primary bus!
396 */
397static void __init scan_bridge_slot(struct iseries_flat_dt *dt,
398 HvBusNumber bus, struct HvCallPci_BridgeInfo *bridge_info)
399{
400 HvSubBusNumber sub_bus = bridge_info->subBusNumber;
401 u16 vendor_id;
402 u16 device_id;
403 u32 class_id;
404 int err;
405 char buf[32];
406 u32 reg[5];
407 int id_sel = ISERIES_GET_DEVICE_FROM_SUBBUS(sub_bus);
408 int function = ISERIES_GET_FUNCTION_FROM_SUBBUS(sub_bus);
409 HvAgentId eads_id_sel = ISERIES_PCI_AGENTID(id_sel, function);
410 u8 devfn;
411 struct pci_class_name *cp;
412
413 /*
414 * Connect all functions of any device found.
415 */
416 for (id_sel = 1; id_sel <= bridge_info->maxAgents; id_sel++) {
417 for (function = 0; function < 8; function++) {
418 HvAgentId agent_id = ISERIES_PCI_AGENTID(id_sel,
419 function);
420 err = HvCallXm_connectBusUnit(bus, sub_bus,
421 agent_id, 0);
422 if (err) {
423 if (err != 0x302)
424 DBG("connectBusUnit(%x, %x, %x) %x\n",
425 bus, sub_bus, agent_id, err);
426 continue;
427 }
428
429 err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
430 PCI_VENDOR_ID, &vendor_id);
431 if (err) {
432 DBG("ReadVendor(%x, %x, %x) %x\n",
433 bus, sub_bus, agent_id, err);
434 continue;
435 }
436 err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
437 PCI_DEVICE_ID, &device_id);
438 if (err) {
439 DBG("ReadDevice(%x, %x, %x) %x\n",
440 bus, sub_bus, agent_id, err);
441 continue;
442 }
443 err = HvCallPci_configLoad32(bus, sub_bus, agent_id,
444 PCI_CLASS_REVISION , &class_id);
445 if (err) {
446 DBG("ReadClass(%x, %x, %x) %x\n",
447 bus, sub_bus, agent_id, err);
448 continue;
449 }
450
451 devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(eads_id_sel),
452 function);
453 cp = dt_find_pci_class_name(class_id >> 16);
454 if (cp && cp->name)
455 strncpy(buf, cp->name, sizeof(buf) - 1);
456 else
457 snprintf(buf, sizeof(buf), "pci%x,%x",
458 vendor_id, device_id);
459 buf[sizeof(buf) - 1] = '\0';
460 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
461 "@%x", PCI_SLOT(devfn));
462 buf[sizeof(buf) - 1] = '\0';
463 if (function != 0)
464 snprintf(buf + strlen(buf),
465 sizeof(buf) - strlen(buf),
466 ",%x", function);
467 dt_start_node(dt, buf);
468 reg[0] = (bus << 16) | (devfn << 8);
469 reg[1] = 0;
470 reg[2] = 0;
471 reg[3] = 0;
472 reg[4] = 0;
473 dt_prop_u32_list(dt, "reg", reg, 5);
474 if (cp && (cp->type || cp->name))
475 dt_prop_str(dt, "device_type",
476 cp->type ? cp->type : cp->name);
477 dt_prop_u32(dt, "vendor-id", vendor_id);
478 dt_prop_u32(dt, "device-id", device_id);
479 dt_prop_u32(dt, "class-code", class_id >> 8);
480 dt_prop_u32(dt, "revision-id", class_id & 0xff);
481 dt_prop_u32(dt, "linux,subbus", sub_bus);
482 dt_prop_u32(dt, "linux,agent-id", agent_id);
483 dt_prop_u32(dt, "linux,logical-slot-number",
484 bridge_info->logicalSlotNumber);
485 dt_end_node(dt);
486
487 }
488 }
489}
490
491static void __init scan_bridge(struct iseries_flat_dt *dt, HvBusNumber bus,
492 HvSubBusNumber sub_bus, int id_sel)
493{
494 struct HvCallPci_BridgeInfo bridge_info;
495 HvAgentId agent_id;
496 int function;
497 int ret;
498
499 /* Note: hvSubBus and irq is always be 0 at this level! */
500 for (function = 0; function < 8; ++function) {
501 agent_id = ISERIES_PCI_AGENTID(id_sel, function);
502 ret = HvCallXm_connectBusUnit(bus, sub_bus, agent_id, 0);
503 if (ret != 0) {
504 if (ret != 0xb)
505 DBG("connectBusUnit(%x, %x, %x) %x\n",
506 bus, sub_bus, agent_id, ret);
507 continue;
508 }
509 DBG("found device at bus %d idsel %d func %d (AgentId %x)\n",
510 bus, id_sel, function, agent_id);
511 ret = HvCallPci_getBusUnitInfo(bus, sub_bus, agent_id,
512 iseries_hv_addr(&bridge_info),
513 sizeof(struct HvCallPci_BridgeInfo));
514 if (ret != 0)
515 continue;
516 DBG("bridge info: type %x subbus %x "
517 "maxAgents %x maxsubbus %x logslot %x\n",
518 bridge_info.busUnitInfo.deviceType,
519 bridge_info.subBusNumber,
520 bridge_info.maxAgents,
521 bridge_info.maxSubBusNumber,
522 bridge_info.logicalSlotNumber);
523 if (bridge_info.busUnitInfo.deviceType ==
524 HvCallPci_BridgeDevice)
525 scan_bridge_slot(dt, bus, &bridge_info);
526 else
527 DBG("PCI: Invalid Bridge Configuration(0x%02X)",
528 bridge_info.busUnitInfo.deviceType);
529 }
530}
531
532static void __init scan_phb(struct iseries_flat_dt *dt, HvBusNumber bus)
533{
534 struct HvCallPci_DeviceInfo dev_info;
535 const HvSubBusNumber sub_bus = 0; /* EADs is always 0. */
536 int err;
537 int id_sel;
538 const int max_agents = 8;
539
540 /*
541 * Probe for EADs Bridges
542 */
543 for (id_sel = 1; id_sel < max_agents; ++id_sel) {
544 err = HvCallPci_getDeviceInfo(bus, sub_bus, id_sel,
545 iseries_hv_addr(&dev_info),
546 sizeof(struct HvCallPci_DeviceInfo));
547 if (err) {
548 if (err != 0x302)
549 DBG("getDeviceInfo(%x, %x, %x) %x\n",
550 bus, sub_bus, id_sel, err);
551 continue;
552 }
553 if (dev_info.deviceType != HvCallPci_NodeDevice) {
554 DBG("PCI: Invalid System Configuration"
555 "(0x%02X) for bus 0x%02x id 0x%02x.\n",
556 dev_info.deviceType, bus, id_sel);
557 continue;
558 }
559 scan_bridge(dt, bus, sub_bus, id_sel);
560 }
561}
562
563static void __init dt_pci_devices(struct iseries_flat_dt *dt)
564{
565 HvBusNumber bus;
566 char buf[32];
567 u32 buses[2];
568 int phb_num = 0;
569
570 /* Check all possible buses. */
571 for (bus = 0; bus < 256; bus++) {
572 int err = HvCallXm_testBus(bus);
573
574 if (err) {
575 /*
576 * Check for Unexpected Return code, a clue that
577 * something has gone wrong.
578 */
579 if (err != 0x0301)
580 DBG("Unexpected Return on Probe(0x%02X) "
581 "0x%04X\n", bus, err);
582 continue;
583 }
584 DBG("bus %d appears to exist\n", bus);
585 snprintf(buf, 32, "pci@%d", phb_num);
586 dt_start_node(dt, buf);
587 dt_prop_str(dt, "device_type", device_type_pci);
588 dt_prop_str(dt, "compatible", "IBM,iSeries-Logical-PHB");
589 dt_prop_u32(dt, "#address-cells", 3);
590 dt_prop_u32(dt, "#size-cells", 2);
591 buses[0] = buses[1] = bus;
592 dt_prop_u32_list(dt, "bus-range", buses, 2);
593 scan_phb(dt, bus);
594 dt_end_node(dt);
595 phb_num++;
596 }
597}
598
599static void dt_finish(struct iseries_flat_dt *dt)
600{
601 dt_push_u32(dt, OF_DT_END);
602 dt->header.totalsize = (unsigned long)dt_data - (unsigned long)dt;
603 klimit = ALIGN((unsigned long)dt_data, 8);
604}
605
606void * __init build_flat_dt(unsigned long phys_mem_size)
607{
608 struct iseries_flat_dt *iseries_dt;
609 u64 tmp[2];
610
611 iseries_dt = dt_init();
612
613 dt_start_node(iseries_dt, "");
614
615 dt_prop_u32(iseries_dt, "#address-cells", 2);
616 dt_prop_u32(iseries_dt, "#size-cells", 2);
617 dt_model(iseries_dt);
618
619 /* /memory */
620 dt_start_node(iseries_dt, "memory@0");
621 dt_prop_str(iseries_dt, "device_type", device_type_memory);
622 tmp[0] = 0;
623 tmp[1] = phys_mem_size;
624 dt_prop_u64_list(iseries_dt, "reg", tmp, 2);
625 dt_end_node(iseries_dt);
626
627 /* /chosen */
628 dt_start_node(iseries_dt, "chosen");
629 dt_prop_str(iseries_dt, "bootargs", cmd_line);
630 dt_initrd(iseries_dt);
631 dt_end_node(iseries_dt);
632
633 dt_cpus(iseries_dt);
634
635 dt_vdevices(iseries_dt);
636 dt_pci_devices(iseries_dt);
637
638 dt_end_node(iseries_dt);
639
640 dt_finish(iseries_dt);
641
642 return iseries_dt;
643}
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
deleted file mode 100644
index f519ee17ff7d..000000000000
--- a/arch/powerpc/platforms/iseries/exception.S
+++ /dev/null
@@ -1,311 +0,0 @@
1/*
2 * Low level routines for legacy iSeries support.
3 *
4 * Extracted from head_64.S
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 *
16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
18 *
19 * This file contains the low-level support and setup for the
20 * PowerPC-64 platform, including trap and interrupt dispatch.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <asm/reg.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/thread_info.h>
32#include <asm/ptrace.h>
33#include <asm/cputable.h>
34#include <asm/mmu.h>
35
36#include "exception.h"
37
38 .text
39
40 .globl system_reset_iSeries
41system_reset_iSeries:
42 bl .relative_toc
43 mfspr r13,SPRN_SPRG3 /* Get alpaca address */
44 LOAD_REG_ADDR(r23, alpaca)
45 li r0,ALPACA_SIZE
46 sub r23,r13,r23
47 divdu r24,r23,r0 /* r24 has cpu number */
48 cmpwi 0,r24,0 /* Are we processor 0? */
49 bne 1f
50 LOAD_REG_ADDR(r13, boot_paca)
51 mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
52 mfmsr r23
53 ori r23,r23,MSR_RI
54 mtmsrd r23 /* RI on */
55 b .__start_initialization_iSeries /* Start up the first processor */
561: mfspr r4,SPRN_CTRLF
57 li r5,CTRL_RUNLATCH /* Turn off the run light */
58 andc r4,r4,r5
59 mtspr SPRN_CTRLT,r4
60
61/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
62/* In the UP case we'll yield() later, and we will not access the paca anyway */
63#ifdef CONFIG_SMP
64iSeries_secondary_wait_paca:
65 HMT_LOW
66 LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
67 ld r23,0(r23)
68
69 cmpdi 0,r23,0
70 bne 2f /* go on when the master is ready */
71
72 /* Keep poking the Hypervisor until we're released */
73 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
74 lis r3,0x8002
75 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
76 li r0,-1 /* r0=-1 indicates a Hypervisor call */
77 sc /* Invoke the hypervisor via a system call */
78 b iSeries_secondary_wait_paca
79
802:
81 HMT_MEDIUM
82 sync
83
84 LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
85 lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
86 cmpld 0,r24,r3 /* is our cpu number allocated? */
87 bge iSeries_secondary_yield /* no, yield forever */
88
89 /* Load our paca now that it's been allocated */
90 LOAD_REG_ADDR(r13, paca)
91 ld r13,0(r13)
92 mulli r0,r24,PACA_SIZE
93 add r13,r13,r0
94 mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
95 mfmsr r23
96 ori r23,r23,MSR_RI
97 mtmsrd r23 /* RI on */
98
99iSeries_secondary_smp_loop:
100 lbz r23,PACAPROCSTART(r13) /* Test if this processor
101 * should start */
102 cmpwi 0,r23,0
103 bne 3f /* go on when we are told */
104
105 HMT_LOW
106 /* Let the Hypervisor know we are alive */
107 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
108 lis r3,0x8002
109 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
110 li r0,-1 /* r0=-1 indicates a Hypervisor call */
111 sc /* Invoke the hypervisor via a system call */
112 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
113 b iSeries_secondary_smp_loop /* wait for signal to start */
114
1153:
116 HMT_MEDIUM
117 sync
118 LOAD_REG_ADDR(r3,current_set)
119 sldi r28,r24,3 /* get current_set[cpu#] */
120 ldx r3,r3,r28
121 addi r1,r3,THREAD_SIZE
122 subi r1,r1,STACK_FRAME_OVERHEAD
123
124 b __secondary_start /* Loop until told to go */
125#endif /* CONFIG_SMP */
126
127iSeries_secondary_yield:
128 /* Yield the processor. This is required for non-SMP kernels
129 which are running on multi-threaded machines. */
130 HMT_LOW
131 lis r3,0x8000
132 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
133 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
134 li r4,0 /* "yield timed" */
135 li r5,-1 /* "yield forever" */
136 li r0,-1 /* r0=-1 indicates a Hypervisor call */
137 sc /* Invoke the hypervisor via a system call */
138 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
139 b iSeries_secondary_yield /* If SMP not configured, secondaries
140 * loop forever */
141
142/*** ISeries-LPAR interrupt handlers ***/
143
144 STD_EXCEPTION_ISERIES(machine_check, PACA_EXMC)
145
146 .globl data_access_iSeries
147data_access_iSeries:
148 mtspr SPRN_SPRG_SCRATCH0,r13
149BEGIN_FTR_SECTION
150 mfspr r13,SPRN_SPRG_PACA
151 std r9,PACA_EXSLB+EX_R9(r13)
152 std r10,PACA_EXSLB+EX_R10(r13)
153 mfspr r10,SPRN_DAR
154 mfspr r9,SPRN_DSISR
155 srdi r10,r10,60
156 rlwimi r10,r9,16,0x20
157 mfcr r9
158 cmpwi r10,0x2c
159 beq .do_stab_bolted_iSeries
160 ld r10,PACA_EXSLB+EX_R10(r13)
161 std r11,PACA_EXGEN+EX_R11(r13)
162 ld r11,PACA_EXSLB+EX_R9(r13)
163 std r12,PACA_EXGEN+EX_R12(r13)
164 mfspr r12,SPRN_SPRG_SCRATCH0
165 std r10,PACA_EXGEN+EX_R10(r13)
166 std r11,PACA_EXGEN+EX_R9(r13)
167 std r12,PACA_EXGEN+EX_R13(r13)
168 EXCEPTION_PROLOG_ISERIES_1
169FTR_SECTION_ELSE
170 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
171 EXCEPTION_PROLOG_ISERIES_1
172ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
173 b data_access_common
174
175.do_stab_bolted_iSeries:
176 std r11,PACA_EXSLB+EX_R11(r13)
177 std r12,PACA_EXSLB+EX_R12(r13)
178 mfspr r10,SPRN_SPRG_SCRATCH0
179 std r10,PACA_EXSLB+EX_R13(r13)
180 EXCEPTION_PROLOG_ISERIES_1
181 b .do_stab_bolted
182
183 .globl data_access_slb_iSeries
184data_access_slb_iSeries:
185 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
186 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
187 std r3,PACA_EXSLB+EX_R3(r13)
188 mfspr r3,SPRN_DAR
189 std r9,PACA_EXSLB+EX_R9(r13)
190 mfcr r9
191#ifdef __DISABLED__
192 cmpdi r3,0
193 bge slb_miss_user_iseries
194#endif
195 std r10,PACA_EXSLB+EX_R10(r13)
196 std r11,PACA_EXSLB+EX_R11(r13)
197 std r12,PACA_EXSLB+EX_R12(r13)
198 mfspr r10,SPRN_SPRG_SCRATCH0
199 std r10,PACA_EXSLB+EX_R13(r13)
200 ld r12,PACALPPACAPTR(r13)
201 ld r12,LPPACASRR1(r12)
202 b .slb_miss_realmode
203
204 STD_EXCEPTION_ISERIES(instruction_access, PACA_EXGEN)
205
206 .globl instruction_access_slb_iSeries
207instruction_access_slb_iSeries:
208 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
209 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
210 std r3,PACA_EXSLB+EX_R3(r13)
211 ld r3,PACALPPACAPTR(r13)
212 ld r3,LPPACASRR0(r3) /* get SRR0 value */
213 std r9,PACA_EXSLB+EX_R9(r13)
214 mfcr r9
215#ifdef __DISABLED__
216 cmpdi r3,0
217 bge slb_miss_user_iseries
218#endif
219 std r10,PACA_EXSLB+EX_R10(r13)
220 std r11,PACA_EXSLB+EX_R11(r13)
221 std r12,PACA_EXSLB+EX_R12(r13)
222 mfspr r10,SPRN_SPRG_SCRATCH0
223 std r10,PACA_EXSLB+EX_R13(r13)
224 ld r12,PACALPPACAPTR(r13)
225 ld r12,LPPACASRR1(r12)
226 b .slb_miss_realmode
227
228#ifdef __DISABLED__
229slb_miss_user_iseries:
230 std r10,PACA_EXGEN+EX_R10(r13)
231 std r11,PACA_EXGEN+EX_R11(r13)
232 std r12,PACA_EXGEN+EX_R12(r13)
233 mfspr r10,SPRG_SCRATCH0
234 ld r11,PACA_EXSLB+EX_R9(r13)
235 ld r12,PACA_EXSLB+EX_R3(r13)
236 std r10,PACA_EXGEN+EX_R13(r13)
237 std r11,PACA_EXGEN+EX_R9(r13)
238 std r12,PACA_EXGEN+EX_R3(r13)
239 EXCEPTION_PROLOG_ISERIES_1
240 b slb_miss_user_common
241#endif
242
243 MASKABLE_EXCEPTION_ISERIES(hardware_interrupt)
244 STD_EXCEPTION_ISERIES(alignment, PACA_EXGEN)
245 STD_EXCEPTION_ISERIES(program_check, PACA_EXGEN)
246 STD_EXCEPTION_ISERIES(fp_unavailable, PACA_EXGEN)
247 MASKABLE_EXCEPTION_ISERIES(decrementer)
248 STD_EXCEPTION_ISERIES(trap_0a, PACA_EXGEN)
249 STD_EXCEPTION_ISERIES(trap_0b, PACA_EXGEN)
250
251 .globl system_call_iSeries
252system_call_iSeries:
253 mr r9,r13
254 mfspr r13,SPRN_SPRG_PACA
255 EXCEPTION_PROLOG_ISERIES_1
256 b system_call_common
257
258 STD_EXCEPTION_ISERIES(single_step, PACA_EXGEN)
259 STD_EXCEPTION_ISERIES(trap_0e, PACA_EXGEN)
260 STD_EXCEPTION_ISERIES(performance_monitor, PACA_EXGEN)
261
262decrementer_iSeries_masked:
263 /* We may not have a valid TOC pointer in here. */
264 li r11,1
265 ld r12,PACALPPACAPTR(r13)
266 stb r11,LPPACADECRINT(r12)
267 li r12,-1
268 clrldi r12,r12,33 /* set DEC to 0x7fffffff */
269 mtspr SPRN_DEC,r12
270 /* fall through */
271
272hardware_interrupt_iSeries_masked:
273 mtcrf 0x80,r9 /* Restore regs */
274 ld r12,PACALPPACAPTR(r13)
275 ld r11,LPPACASRR0(r12)
276 ld r12,LPPACASRR1(r12)
277 mtspr SPRN_SRR0,r11
278 mtspr SPRN_SRR1,r12
279 ld r9,PACA_EXGEN+EX_R9(r13)
280 ld r10,PACA_EXGEN+EX_R10(r13)
281 ld r11,PACA_EXGEN+EX_R11(r13)
282 ld r12,PACA_EXGEN+EX_R12(r13)
283 ld r13,PACA_EXGEN+EX_R13(r13)
284 rfid
285 b . /* prevent speculative execution */
286
287_INIT_STATIC(__start_initialization_iSeries)
288 /* Clear out the BSS */
289 LOAD_REG_ADDR(r11,__bss_stop)
290 LOAD_REG_ADDR(r8,__bss_start)
291 sub r11,r11,r8 /* bss size */
292 addi r11,r11,7 /* round up to an even double word */
293 rldicl. r11,r11,61,3 /* shift right by 3 */
294 beq 4f
295 addi r8,r8,-8
296 li r0,0
297 mtctr r11 /* zero this many doublewords */
2983: stdu r0,8(r8)
299 bdnz 3b
3004:
301 LOAD_REG_ADDR(r1,init_thread_union)
302 addi r1,r1,THREAD_SIZE
303 li r0,0
304 stdu r0,-STACK_FRAME_OVERHEAD(r1)
305
306 bl .iSeries_early_setup
307 bl .early_setup
308
309 /* relocation is on at this point */
310
311 b .start_here_common
diff --git a/arch/powerpc/platforms/iseries/exception.h b/arch/powerpc/platforms/iseries/exception.h
deleted file mode 100644
index 50271b550a99..000000000000
--- a/arch/powerpc/platforms/iseries/exception.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _ASM_POWERPC_ISERIES_EXCEPTION_H
2#define _ASM_POWERPC_ISERIES_EXCEPTION_H
3/*
4 * Extracted from head_64.S
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 *
16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
18 *
19 * This file contains the low-level support and setup for the
20 * PowerPC-64 platform, including trap and interrupt dispatch.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27#include <asm/exception-64s.h>
28
29#define EXCEPTION_PROLOG_ISERIES_1 \
30 mfmsr r10; \
31 ld r12,PACALPPACAPTR(r13); \
32 ld r11,LPPACASRR0(r12); \
33 ld r12,LPPACASRR1(r12); \
34 ori r10,r10,MSR_RI; \
35 mtmsrd r10,1
36
37#define STD_EXCEPTION_ISERIES(label, area) \
38 .globl label##_iSeries; \
39label##_iSeries: \
40 HMT_MEDIUM; \
41 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
42 EXCEPTION_PROLOG_1(area, NOTEST, 0); \
43 EXCEPTION_PROLOG_ISERIES_1; \
44 b label##_common
45
46#define MASKABLE_EXCEPTION_ISERIES(label) \
47 .globl label##_iSeries; \
48label##_iSeries: \
49 HMT_MEDIUM; \
50 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
51 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
52 lbz r10,PACASOFTIRQEN(r13); \
53 cmpwi 0,r10,0; \
54 beq- label##_iSeries_masked; \
55 EXCEPTION_PROLOG_ISERIES_1; \
56 b label##_common; \
57
58#endif /* _ASM_POWERPC_ISERIES_EXCEPTION_H */
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
deleted file mode 100644
index 3ae66ab9d5e7..000000000000
--- a/arch/powerpc/platforms/iseries/htab.c
+++ /dev/null
@@ -1,257 +0,0 @@
1/*
2 * iSeries hashtable management.
3 * Derived from pSeries_htab.c
4 *
5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <asm/machdep.h>
14#include <asm/pgtable.h>
15#include <asm/mmu.h>
16#include <asm/mmu_context.h>
17#include <asm/abs_addr.h>
18#include <linux/spinlock.h>
19
20#include "call_hpt.h"
21
22static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp;
23
24/*
25 * Very primitive algorithm for picking up a lock
26 */
27static inline void iSeries_hlock(unsigned long slot)
28{
29 if (slot & 0x8)
30 slot = ~slot;
31 spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
32}
33
34static inline void iSeries_hunlock(unsigned long slot)
35{
36 if (slot & 0x8)
37 slot = ~slot;
38 spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
39}
40
41static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
42 unsigned long pa, unsigned long rflags,
43 unsigned long vflags, int psize, int ssize)
44{
45 long slot;
46 struct hash_pte lhpte;
47 int secondary = 0;
48
49 BUG_ON(psize != MMU_PAGE_4K);
50
51 /*
52 * The hypervisor tries both primary and secondary.
53 * If we are being called to insert in the secondary,
54 * it means we have already tried both primary and secondary,
55 * so we return failure immediately.
56 */
57 if (vflags & HPTE_V_SECONDARY)
58 return -1;
59
60 iSeries_hlock(hpte_group);
61
62 slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
63 if (unlikely(lhpte.v & HPTE_V_VALID)) {
64 if (vflags & HPTE_V_BOLTED) {
65 HvCallHpt_setSwBits(slot, 0x10, 0);
66 HvCallHpt_setPp(slot, PP_RWXX);
67 iSeries_hunlock(hpte_group);
68 if (slot < 0)
69 return 0x8 | (slot & 7);
70 else
71 return slot & 7;
72 }
73 BUG();
74 }
75
76 if (slot == -1) { /* No available entry found in either group */
77 iSeries_hunlock(hpte_group);
78 return -1;
79 }
80
81 if (slot < 0) { /* MSB set means secondary group */
82 vflags |= HPTE_V_SECONDARY;
83 secondary = 1;
84 slot &= 0x7fffffffffffffff;
85 }
86
87
88 lhpte.v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M) |
89 vflags | HPTE_V_VALID;
90 lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
91
92 /* Now fill in the actual HPTE */
93 HvCallHpt_addValidate(slot, secondary, &lhpte);
94
95 iSeries_hunlock(hpte_group);
96
97 return (secondary << 3) | (slot & 7);
98}
99
100static unsigned long iSeries_hpte_getword0(unsigned long slot)
101{
102 struct hash_pte hpte;
103
104 HvCallHpt_get(&hpte, slot);
105 return hpte.v;
106}
107
108static long iSeries_hpte_remove(unsigned long hpte_group)
109{
110 unsigned long slot_offset;
111 int i;
112 unsigned long hpte_v;
113
114 /* Pick a random slot to start at */
115 slot_offset = mftb() & 0x7;
116
117 iSeries_hlock(hpte_group);
118
119 for (i = 0; i < HPTES_PER_GROUP; i++) {
120 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
121
122 if (! (hpte_v & HPTE_V_BOLTED)) {
123 HvCallHpt_invalidateSetSwBitsGet(hpte_group +
124 slot_offset, 0, 0);
125 iSeries_hunlock(hpte_group);
126 return i;
127 }
128
129 slot_offset++;
130 slot_offset &= 0x7;
131 }
132
133 iSeries_hunlock(hpte_group);
134
135 return -1;
136}
137
138/*
139 * The HyperVisor expects the "flags" argument in this form:
140 * bits 0..59 : reserved
141 * bit 60 : N
142 * bits 61..63 : PP2,PP1,PP0
143 */
144static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
145 unsigned long va, int psize, int ssize, int local)
146{
147 struct hash_pte hpte;
148 unsigned long want_v;
149
150 iSeries_hlock(slot);
151
152 HvCallHpt_get(&hpte, slot);
153 want_v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M);
154
155 if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
156 /*
157 * Hypervisor expects bits as NPPP, which is
158 * different from how they are mapped in our PP.
159 */
160 HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
161 iSeries_hunlock(slot);
162 return 0;
163 }
164 iSeries_hunlock(slot);
165
166 return -1;
167}
168
169/*
170 * Functions used to find the PTE for a particular virtual address.
171 * Only used during boot when bolting pages.
172 *
173 * Input : vpn : virtual page number
174 * Output: PTE index within the page table of the entry
175 * -1 on failure
176 */
177static long iSeries_hpte_find(unsigned long vpn)
178{
179 struct hash_pte hpte;
180 long slot;
181
182 /*
183 * The HvCallHpt_findValid interface is as follows:
184 * 0xffffffffffffffff : No entry found.
185 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
186 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
187 */
188 slot = HvCallHpt_findValid(&hpte, vpn);
189 if (hpte.v & HPTE_V_VALID) {
190 if (slot < 0) {
191 slot &= 0x7fffffffffffffff;
192 slot = -slot;
193 }
194 } else
195 slot = -1;
196 return slot;
197}
198
199/*
200 * Update the page protection bits. Intended to be used to create
201 * guard pages for kernel data structures on pages which are bolted
202 * in the HPT. Assumes pages being operated on will not be stolen.
203 * Does not work on large pages.
204 *
205 * No need to lock here because we should be the only user.
206 */
207static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
208 int psize, int ssize)
209{
210 unsigned long vsid,va,vpn;
211 long slot;
212
213 BUG_ON(psize != MMU_PAGE_4K);
214
215 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
216 va = (vsid << 28) | (ea & 0x0fffffff);
217 vpn = va >> HW_PAGE_SHIFT;
218 slot = iSeries_hpte_find(vpn);
219 if (slot == -1)
220 panic("updateboltedpp: Could not find page to bolt\n");
221 HvCallHpt_setPp(slot, newpp);
222}
223
224static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
225 int psize, int ssize, int local)
226{
227 unsigned long hpte_v;
228 unsigned long avpn = va >> 23;
229 unsigned long flags;
230
231 local_irq_save(flags);
232
233 iSeries_hlock(slot);
234
235 hpte_v = iSeries_hpte_getword0(slot);
236
237 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
238 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
239
240 iSeries_hunlock(slot);
241
242 local_irq_restore(flags);
243}
244
245void __init hpte_init_iSeries(void)
246{
247 int i;
248
249 for (i = 0; i < ARRAY_SIZE(iSeries_hlocks); i++)
250 spin_lock_init(&iSeries_hlocks[i]);
251
252 ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
253 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
254 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
255 ppc_md.hpte_insert = iSeries_hpte_insert;
256 ppc_md.hpte_remove = iSeries_hpte_remove;
257}
diff --git a/arch/powerpc/platforms/iseries/hvcall.S b/arch/powerpc/platforms/iseries/hvcall.S
deleted file mode 100644
index 07ae6ad5f49f..000000000000
--- a/arch/powerpc/platforms/iseries/hvcall.S
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * This file contains the code to perform calls to the
3 * iSeries LPAR hypervisor
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <asm/ppc_asm.h>
12#include <asm/processor.h>
13#include <asm/ptrace.h> /* XXX for STACK_FRAME_OVERHEAD */
14
15 .text
16
17/*
18 * Hypervisor call
19 *
20 * Invoke the iSeries hypervisor via the System Call instruction
21 * Parameters are passed to this routine in registers r3 - r10
22 *
23 * r3 contains the HV function to be called
24 * r4-r10 contain the operands to the hypervisor function
25 *
26 */
27
28_GLOBAL(HvCall)
29_GLOBAL(HvCall0)
30_GLOBAL(HvCall1)
31_GLOBAL(HvCall2)
32_GLOBAL(HvCall3)
33_GLOBAL(HvCall4)
34_GLOBAL(HvCall5)
35_GLOBAL(HvCall6)
36_GLOBAL(HvCall7)
37
38
39 mfcr r0
40 std r0,-8(r1)
41 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
42
43 /* r0 = 0xffffffffffffffff indicates a hypervisor call */
44
45 li r0,-1
46
47 /* Invoke the hypervisor */
48
49 sc
50
51 ld r1,0(r1)
52 ld r0,-8(r1)
53 mtcrf 0xff,r0
54
55 /* return to caller, return value in r3 */
56
57 blr
58
59_GLOBAL(HvCall0Ret16)
60_GLOBAL(HvCall1Ret16)
61_GLOBAL(HvCall2Ret16)
62_GLOBAL(HvCall3Ret16)
63_GLOBAL(HvCall4Ret16)
64_GLOBAL(HvCall5Ret16)
65_GLOBAL(HvCall6Ret16)
66_GLOBAL(HvCall7Ret16)
67
68 mfcr r0
69 std r0,-8(r1)
70 std r31,-16(r1)
71 stdu r1,-(STACK_FRAME_OVERHEAD+32)(r1)
72
73 mr r31,r4
74 li r0,-1
75 mr r4,r5
76 mr r5,r6
77 mr r6,r7
78 mr r7,r8
79 mr r8,r9
80 mr r9,r10
81
82 sc
83
84 std r3,0(r31)
85 std r4,8(r31)
86
87 mr r3,r5
88
89 ld r1,0(r1)
90 ld r0,-8(r1)
91 mtcrf 0xff,r0
92 ld r31,-16(r1)
93
94 blr
diff --git a/arch/powerpc/platforms/iseries/hvlog.c b/arch/powerpc/platforms/iseries/hvlog.c
deleted file mode 100644
index f476d71194fa..000000000000
--- a/arch/powerpc/platforms/iseries/hvlog.c
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <asm/page.h>
11#include <asm/abs_addr.h>
12#include <asm/iseries/hv_call.h>
13#include <asm/iseries/hv_call_sc.h>
14#include <asm/iseries/hv_types.h>
15
16
17void HvCall_writeLogBuffer(const void *buffer, u64 len)
18{
19 struct HvLpBufferList hv_buf;
20 u64 left_this_page;
21 u64 cur = virt_to_abs(buffer);
22
23 while (len) {
24 hv_buf.addr = cur;
25 left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur;
26 if (left_this_page > len)
27 left_this_page = len;
28 hv_buf.len = left_this_page;
29 len -= left_this_page;
30 HvCall2(HvCallBaseWriteLogBuffer,
31 virt_to_abs(&hv_buf),
32 left_this_page);
33 cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE;
34 }
35}
diff --git a/arch/powerpc/platforms/iseries/hvlpconfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c
deleted file mode 100644
index f62a0c5fa670..000000000000
--- a/arch/powerpc/platforms/iseries/hvlpconfig.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/export.h>
20#include <asm/iseries/hv_lp_config.h>
21#include "it_lp_naca.h"
22
23HvLpIndex HvLpConfig_getLpIndex_outline(void)
24{
25 return HvLpConfig_getLpIndex();
26}
27EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline);
28
29HvLpIndex HvLpConfig_getLpIndex(void)
30{
31 return itLpNaca.xLpIndex;
32}
33EXPORT_SYMBOL(HvLpConfig_getLpIndex);
34
35HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
36{
37 return itLpNaca.xPrimaryLpIndex;
38}
39EXPORT_SYMBOL_GPL(HvLpConfig_getPrimaryLpIndex);
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
deleted file mode 100644
index 2f3d9110248c..000000000000
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ /dev/null
@@ -1,260 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup:
5 *
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
8 *
9 * Dynamic DMA mapping support, iSeries-specific parts.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/list.h>
30#include <linux/pci.h>
31#include <linux/export.h>
32#include <linux/slab.h>
33
34#include <asm/iommu.h>
35#include <asm/vio.h>
36#include <asm/tce.h>
37#include <asm/machdep.h>
38#include <asm/abs_addr.h>
39#include <asm/prom.h>
40#include <asm/pci-bridge.h>
41#include <asm/iseries/hv_call_xm.h>
42#include <asm/iseries/hv_call_event.h>
43#include <asm/iseries/iommu.h>
44
45static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
46 unsigned long uaddr, enum dma_data_direction direction,
47 struct dma_attrs *attrs)
48{
49 u64 rc;
50 u64 tce, rpn;
51
52 while (npages--) {
53 rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
54 tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
55
56 if (tbl->it_type == TCE_VB) {
57 /* Virtual Bus */
58 tce |= TCE_VALID|TCE_ALLIO;
59 if (direction != DMA_TO_DEVICE)
60 tce |= TCE_VB_WRITE;
61 } else {
62 /* PCI Bus */
63 tce |= TCE_PCI_READ; /* Read allowed */
64 if (direction != DMA_TO_DEVICE)
65 tce |= TCE_PCI_WRITE;
66 }
67
68 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
69 if (rc)
70 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
71 rc);
72 index++;
73 uaddr += TCE_PAGE_SIZE;
74 }
75 return 0;
76}
77
78static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
79{
80 u64 rc;
81
82 while (npages--) {
83 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
84 if (rc)
85 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
86 rc);
87 index++;
88 }
89}
90
91/*
92 * Structure passed to HvCallXm_getTceTableParms
93 */
94struct iommu_table_cb {
95 unsigned long itc_busno; /* Bus number for this tce table */
96 unsigned long itc_start; /* Will be NULL for secondary */
97 unsigned long itc_totalsize; /* Size (in pages) of whole table */
98 unsigned long itc_offset; /* Index into real tce table of the
99 start of our section */
100 unsigned long itc_size; /* Size (in pages) of our section */
101 unsigned long itc_index; /* Index of this tce table */
102 unsigned short itc_maxtables; /* Max num of tables for partition */
103 unsigned char itc_virtbus; /* Flag to indicate virtual bus */
104 unsigned char itc_slotno; /* IOA Tce Slot Index */
105 unsigned char itc_rsvd[4];
106};
107
108/*
109 * Call Hv with the architected data structure to get TCE table info.
110 * info. Put the returned data into the Linux representation of the
111 * TCE table data.
112 * The Hardware Tce table comes in three flavors.
113 * 1. TCE table shared between Buses.
114 * 2. TCE table per Bus.
115 * 3. TCE Table per IOA.
116 */
117void iommu_table_getparms_iSeries(unsigned long busno,
118 unsigned char slotno,
119 unsigned char virtbus,
120 struct iommu_table* tbl)
121{
122 struct iommu_table_cb *parms;
123
124 parms = kzalloc(sizeof(*parms), GFP_KERNEL);
125 if (parms == NULL)
126 panic("PCI_DMA: TCE Table Allocation failed.");
127
128 parms->itc_busno = busno;
129 parms->itc_slotno = slotno;
130 parms->itc_virtbus = virtbus;
131
132 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
133
134 if (parms->itc_size == 0)
135 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
136
137 /* itc_size is in pages worth of table, it_size is in # of entries */
138 tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
139 tbl->it_busno = parms->itc_busno;
140 tbl->it_offset = parms->itc_offset;
141 tbl->it_index = parms->itc_index;
142 tbl->it_blocksize = 1;
143 tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
144
145 kfree(parms);
146}
147
148
149#ifdef CONFIG_PCI
150/*
151 * This function compares the known tables to find an iommu_table
152 * that has already been built for hardware TCEs.
153 */
154static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
155{
156 struct device_node *node;
157
158 for (node = NULL; (node = of_find_all_nodes(node)); ) {
159 struct pci_dn *pdn = PCI_DN(node);
160 struct iommu_table *it;
161
162 if (pdn == NULL)
163 continue;
164 it = pdn->iommu_table;
165 if ((it != NULL) &&
166 (it->it_type == TCE_PCI) &&
167 (it->it_offset == tbl->it_offset) &&
168 (it->it_index == tbl->it_index) &&
169 (it->it_size == tbl->it_size)) {
170 of_node_put(node);
171 return it;
172 }
173 }
174 return NULL;
175}
176
177
178static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
179{
180 struct iommu_table *tbl;
181 struct device_node *dn = pci_device_to_OF_node(pdev);
182 struct pci_dn *pdn = PCI_DN(dn);
183 const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
184
185 BUG_ON(lsn == NULL);
186
187 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
188
189 iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
190
191 /* Look for existing tce table */
192 pdn->iommu_table = iommu_table_find(tbl);
193 if (pdn->iommu_table == NULL)
194 pdn->iommu_table = iommu_init_table(tbl, -1);
195 else
196 kfree(tbl);
197 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
198}
199#else
200#define pci_dma_dev_setup_iseries NULL
201#endif
202
203static struct iommu_table veth_iommu_table;
204static struct iommu_table vio_iommu_table;
205
206void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
207{
208 return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
209 DMA_BIT_MASK(32), flag, -1);
210}
211EXPORT_SYMBOL_GPL(iseries_hv_alloc);
212
213void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
214{
215 iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
216}
217EXPORT_SYMBOL_GPL(iseries_hv_free);
218
219dma_addr_t iseries_hv_map(void *vaddr, size_t size,
220 enum dma_data_direction direction)
221{
222 return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
223 (unsigned long)vaddr % PAGE_SIZE, size,
224 DMA_BIT_MASK(32), direction, NULL);
225}
226
227void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
228 enum dma_data_direction direction)
229{
230 iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
231}
232
233void __init iommu_vio_init(void)
234{
235 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
236 veth_iommu_table.it_size /= 2;
237 vio_iommu_table = veth_iommu_table;
238 vio_iommu_table.it_offset += veth_iommu_table.it_size;
239
240 if (!iommu_init_table(&veth_iommu_table, -1))
241 printk("Virtual Bus VETH TCE table failed.\n");
242 if (!iommu_init_table(&vio_iommu_table, -1))
243 printk("Virtual Bus VIO TCE table failed.\n");
244}
245
246struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
247{
248 if (strcmp(dev->type, "network") == 0)
249 return &veth_iommu_table;
250 return &vio_iommu_table;
251}
252
253void iommu_init_early_iSeries(void)
254{
255 ppc_md.tce_build = tce_build_iSeries;
256 ppc_md.tce_free = tce_free_iSeries;
257
258 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
259 set_pci_dma_ops(&dma_iommu_ops);
260}
diff --git a/arch/powerpc/platforms/iseries/ipl_parms.h b/arch/powerpc/platforms/iseries/ipl_parms.h
deleted file mode 100644
index 83e4ca42fc57..000000000000
--- a/arch/powerpc/platforms/iseries/ipl_parms.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_IPL_PARMS_H
19#define _ISERIES_IPL_PARMS_H
20
21/*
22 * This struct maps the IPL Parameters DMA'd from the SP.
23 *
24 * Warning:
25 * This data must map in exactly 64 bytes and match the architecture for
26 * the IPL parms
27 */
28
29#include <asm/types.h>
30
31struct ItIplParmsReal {
32 u8 xFormat; // Defines format of IplParms x00-x00
33 u8 xRsvd01:6; // Reserved x01-x01
34 u8 xAlternateSearch:1; // Alternate search indicator ...
35 u8 xUaSupplied:1; // UA Supplied on programmed IPL...
36 u8 xLsUaFormat; // Format byte for UA x02-x02
37 u8 xRsvd02; // Reserved x03-x03
38 u32 xLsUa; // LS UA x04-x07
39 u32 xUnusedLsLid; // First OS LID to load x08-x0B
40 u16 xLsBusNumber; // LS Bus Number x0C-x0D
41 u8 xLsCardAdr; // LS Card Address x0E-x0E
42 u8 xLsBoardAdr; // LS Board Address x0F-x0F
43 u32 xRsvd03; // Reserved x10-x13
44 u8 xSpcnPresent:1; // SPCN present x14-x14
45 u8 xCpmPresent:1; // CPM present ...
46 u8 xRsvd04:6; // Reserved ...
47 u8 xRsvd05:4; // Reserved x15-x15
48 u8 xKeyLock:4; // Keylock setting ...
49 u8 xRsvd06:6; // Reserved x16-x16
50 u8 xIplMode:2; // Ipl mode (A|B|C|D) ...
51 u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17
52 u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiatedx18-x19
53 u16 xPowerOnResetIpl:1; // Indicate POR condition ...
54 u16 xMainStorePreserved:1; // Main Storage is preserved ...
55 u16 xRsvd07:13; // Reserved ...
56 u16 xIplSource:16; // Ipl source x1A-x1B
57 u8 xIplReason:8; // Reason for this IPL x1C-x1C
58 u8 xRsvd08; // Reserved x1D-x1D
59 u16 xRsvd09; // Reserved x1E-x1F
60 u16 xSysBoxType; // System Box Type x20-x21
61 u16 xSysProcType; // System Processor Type x22-x23
62 u32 xRsvd10; // Reserved x24-x27
63 u64 xRsvd11; // Reserved x28-x2F
64 u64 xRsvd12; // Reserved x30-x37
65 u64 xRsvd13; // Reserved x38-x3F
66};
67
68#endif /* _ISERIES_IPL_PARMS_H */
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
deleted file mode 100644
index b2103453eb01..000000000000
--- a/arch/powerpc/platforms/iseries/irq.c
+++ /dev/null
@@ -1,400 +0,0 @@
1/*
2 * This module supports the iSeries PCI bus interrupt handling
3 * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp>
4 * Copyright (C) 2004-2005 IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the:
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330,
20 * Boston, MA 02111-1307 USA
21 *
22 * Change Activity:
23 * Created, December 13, 2000 by Wayne Holm
24 * End Change Activity
25 */
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/threads.h>
29#include <linux/smp.h>
30#include <linux/param.h>
31#include <linux/string.h>
32#include <linux/bootmem.h>
33#include <linux/irq.h>
34#include <linux/spinlock.h>
35
36#include <asm/paca.h>
37#include <asm/iseries/hv_types.h>
38#include <asm/iseries/hv_lp_event.h>
39#include <asm/iseries/hv_call_xm.h>
40#include <asm/iseries/it_lp_queue.h>
41
42#include "irq.h"
43#include "pci.h"
44#include "call_pci.h"
45
46#ifdef CONFIG_PCI
47
48enum pci_event_type {
49 pe_bus_created = 0, /* PHB has been created */
50 pe_bus_error = 1, /* PHB has failed */
51 pe_bus_failed = 2, /* Msg to Secondary, Primary failed bus */
52 pe_node_failed = 4, /* Multi-adapter bridge has failed */
53 pe_node_recovered = 5, /* Multi-adapter bridge has recovered */
54 pe_bus_recovered = 12, /* PHB has been recovered */
55 pe_unquiese_bus = 18, /* Secondary bus unqiescing */
56 pe_bridge_error = 21, /* Bridge Error */
57 pe_slot_interrupt = 22 /* Slot interrupt */
58};
59
60struct pci_event {
61 struct HvLpEvent event;
62 union {
63 u64 __align; /* Align on an 8-byte boundary */
64 struct {
65 u32 fisr;
66 HvBusNumber bus_number;
67 HvSubBusNumber sub_bus_number;
68 HvAgentId dev_id;
69 } slot;
70 struct {
71 HvBusNumber bus_number;
72 HvSubBusNumber sub_bus_number;
73 } bus;
74 struct {
75 HvBusNumber bus_number;
76 HvSubBusNumber sub_bus_number;
77 HvAgentId dev_id;
78 } node;
79 } data;
80};
81
82static DEFINE_SPINLOCK(pending_irqs_lock);
83static int num_pending_irqs;
84static int pending_irqs[NR_IRQS];
85
86static void int_received(struct pci_event *event)
87{
88 int irq;
89
90 switch (event->event.xSubtype) {
91 case pe_slot_interrupt:
92 irq = event->event.xCorrelationToken;
93 if (irq < NR_IRQS) {
94 spin_lock(&pending_irqs_lock);
95 pending_irqs[irq]++;
96 num_pending_irqs++;
97 spin_unlock(&pending_irqs_lock);
98 } else {
99 printk(KERN_WARNING "int_received: bad irq number %d\n",
100 irq);
101 HvCallPci_eoi(event->data.slot.bus_number,
102 event->data.slot.sub_bus_number,
103 event->data.slot.dev_id);
104 }
105 break;
106 /* Ignore error recovery events for now */
107 case pe_bus_created:
108 printk(KERN_INFO "int_received: system bus %d created\n",
109 event->data.bus.bus_number);
110 break;
111 case pe_bus_error:
112 case pe_bus_failed:
113 printk(KERN_INFO "int_received: system bus %d failed\n",
114 event->data.bus.bus_number);
115 break;
116 case pe_bus_recovered:
117 case pe_unquiese_bus:
118 printk(KERN_INFO "int_received: system bus %d recovered\n",
119 event->data.bus.bus_number);
120 break;
121 case pe_node_failed:
122 case pe_bridge_error:
123 printk(KERN_INFO
124 "int_received: multi-adapter bridge %d/%d/%d failed\n",
125 event->data.node.bus_number,
126 event->data.node.sub_bus_number,
127 event->data.node.dev_id);
128 break;
129 case pe_node_recovered:
130 printk(KERN_INFO
131 "int_received: multi-adapter bridge %d/%d/%d recovered\n",
132 event->data.node.bus_number,
133 event->data.node.sub_bus_number,
134 event->data.node.dev_id);
135 break;
136 default:
137 printk(KERN_ERR
138 "int_received: unrecognized event subtype 0x%x\n",
139 event->event.xSubtype);
140 break;
141 }
142}
143
144static void pci_event_handler(struct HvLpEvent *event)
145{
146 if (event && (event->xType == HvLpEvent_Type_PciIo)) {
147 if (hvlpevent_is_int(event))
148 int_received((struct pci_event *)event);
149 else
150 printk(KERN_ERR
151 "pci_event_handler: unexpected ack received\n");
152 } else if (event)
153 printk(KERN_ERR
154 "pci_event_handler: Unrecognized PCI event type 0x%x\n",
155 (int)event->xType);
156 else
157 printk(KERN_ERR "pci_event_handler: NULL event received\n");
158}
159
160#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff)
161#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
162#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
163#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7)
164
165/*
166 * This will be called by device drivers (via enable_IRQ)
167 * to enable INTA in the bridge interrupt status register.
168 */
169static void iseries_enable_IRQ(struct irq_data *d)
170{
171 u32 bus, dev_id, function, mask;
172 const u32 sub_bus = 0;
173 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
174
175 /* The IRQ has already been locked by the caller */
176 bus = REAL_IRQ_TO_BUS(rirq);
177 function = REAL_IRQ_TO_FUNC(rirq);
178 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
179
180 /* Unmask secondary INTA */
181 mask = 0x80000000;
182 HvCallPci_unmaskInterrupts(bus, sub_bus, dev_id, mask);
183}
184
185/* This is called by iseries_activate_IRQs */
186static unsigned int iseries_startup_IRQ(struct irq_data *d)
187{
188 u32 bus, dev_id, function, mask;
189 const u32 sub_bus = 0;
190 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
191
192 bus = REAL_IRQ_TO_BUS(rirq);
193 function = REAL_IRQ_TO_FUNC(rirq);
194 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
195
196 /* Link the IRQ number to the bridge */
197 HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq);
198
199 /* Unmask bridge interrupts in the FISR */
200 mask = 0x01010000 << function;
201 HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask);
202 iseries_enable_IRQ(d);
203 return 0;
204}
205
206/*
207 * This is called out of iSeries_fixup to activate interrupt
208 * generation for usable slots
209 */
210void __init iSeries_activate_IRQs()
211{
212 int irq;
213 unsigned long flags;
214
215 for_each_irq (irq) {
216 struct irq_desc *desc = irq_to_desc(irq);
217 struct irq_chip *chip;
218
219 if (!desc)
220 continue;
221
222 chip = irq_desc_get_chip(desc);
223 if (chip && chip->irq_startup) {
224 raw_spin_lock_irqsave(&desc->lock, flags);
225 chip->irq_startup(&desc->irq_data);
226 raw_spin_unlock_irqrestore(&desc->lock, flags);
227 }
228 }
229}
230
231/* this is not called anywhere currently */
232static void iseries_shutdown_IRQ(struct irq_data *d)
233{
234 u32 bus, dev_id, function, mask;
235 const u32 sub_bus = 0;
236 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
237
238 /* irq should be locked by the caller */
239 bus = REAL_IRQ_TO_BUS(rirq);
240 function = REAL_IRQ_TO_FUNC(rirq);
241 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
242
243 /* Invalidate the IRQ number in the bridge */
244 HvCallXm_connectBusUnit(bus, sub_bus, dev_id, 0);
245
246 /* Mask bridge interrupts in the FISR */
247 mask = 0x01010000 << function;
248 HvCallPci_maskFisr(bus, sub_bus, dev_id, mask);
249}
250
251/*
252 * This will be called by device drivers (via disable_IRQ)
253 * to disable INTA in the bridge interrupt status register.
254 */
255static void iseries_disable_IRQ(struct irq_data *d)
256{
257 u32 bus, dev_id, function, mask;
258 const u32 sub_bus = 0;
259 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
260
261 /* The IRQ has already been locked by the caller */
262 bus = REAL_IRQ_TO_BUS(rirq);
263 function = REAL_IRQ_TO_FUNC(rirq);
264 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
265
266 /* Mask secondary INTA */
267 mask = 0x80000000;
268 HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask);
269}
270
271static void iseries_end_IRQ(struct irq_data *d)
272{
273 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
274
275 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
276 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
277}
278
279static struct irq_chip iseries_pic = {
280 .name = "iSeries",
281 .irq_startup = iseries_startup_IRQ,
282 .irq_shutdown = iseries_shutdown_IRQ,
283 .irq_unmask = iseries_enable_IRQ,
284 .irq_mask = iseries_disable_IRQ,
285 .irq_eoi = iseries_end_IRQ
286};
287
288/*
289 * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
290 * It calculates the irq value for the slot.
291 * Note that sub_bus is always 0 (at the moment at least).
292 */
293int __init iSeries_allocate_IRQ(HvBusNumber bus,
294 HvSubBusNumber sub_bus, u32 bsubbus)
295{
296 unsigned int realirq;
297 u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus);
298 u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus);
299
300 realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
301 + function;
302
303 return irq_create_mapping(NULL, realirq);
304}
305
306#endif /* CONFIG_PCI */
307
308/*
309 * Get the next pending IRQ.
310 */
311unsigned int iSeries_get_irq(void)
312{
313 int irq = NO_IRQ_IGNORE;
314
315#ifdef CONFIG_SMP
316 if (get_lppaca()->int_dword.fields.ipi_cnt) {
317 get_lppaca()->int_dword.fields.ipi_cnt = 0;
318 smp_ipi_demux();
319 }
320#endif /* CONFIG_SMP */
321 if (hvlpevent_is_pending())
322 process_hvlpevents();
323
324#ifdef CONFIG_PCI
325 if (num_pending_irqs) {
326 spin_lock(&pending_irqs_lock);
327 for (irq = 0; irq < NR_IRQS; irq++) {
328 if (pending_irqs[irq]) {
329 pending_irqs[irq]--;
330 num_pending_irqs--;
331 break;
332 }
333 }
334 spin_unlock(&pending_irqs_lock);
335 if (irq >= NR_IRQS)
336 irq = NO_IRQ_IGNORE;
337 }
338#endif
339
340 return irq;
341}
342
343#ifdef CONFIG_PCI
344
345static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
346 irq_hw_number_t hw)
347{
348 irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
349
350 return 0;
351}
352
353static int iseries_irq_host_match(struct irq_host *h, struct device_node *np)
354{
355 /* Match all */
356 return 1;
357}
358
359static struct irq_host_ops iseries_irq_host_ops = {
360 .map = iseries_irq_host_map,
361 .match = iseries_irq_host_match,
362};
363
364/*
365 * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
366 * It must be called before the bus walk.
367 */
368void __init iSeries_init_IRQ(void)
369{
370 /* Register PCI event handler and open an event path */
371 struct irq_host *host;
372 int ret;
373
374 /*
375 * The Hypervisor only allows us up to 256 interrupt
376 * sources (the irq number is passed in a u8).
377 */
378 irq_set_virq_count(256);
379
380 /* Create irq host. No need for a revmap since HV will give us
381 * back our virtual irq number
382 */
383 host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
384 &iseries_irq_host_ops, 0);
385 BUG_ON(host == NULL);
386 irq_set_default_host(host);
387
388 ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
389 &pci_event_handler);
390 if (ret == 0) {
391 ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
392 if (ret != 0)
393 printk(KERN_ERR "iseries_init_IRQ: open event path "
394 "failed with rc 0x%x\n", ret);
395 } else
396 printk(KERN_ERR "iseries_init_IRQ: register handler "
397 "failed with rc 0x%x\n", ret);
398}
399
400#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
deleted file mode 100644
index a1c236074034..000000000000
--- a/arch/powerpc/platforms/iseries/irq.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef _ISERIES_IRQ_H
2#define _ISERIES_IRQ_H
3
4#ifdef CONFIG_PCI
5extern void iSeries_init_IRQ(void);
6extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
7extern void iSeries_activate_IRQs(void);
8#else
9#define iSeries_init_IRQ NULL
10#endif
11extern unsigned int iSeries_get_irq(void);
12
13#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h b/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h
deleted file mode 100644
index 6de9097b7f57..000000000000
--- a/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright (C) 2002 Dave Boutcher IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
19#define _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
20
21/*
22 * This struct maps the panel information
23 *
24 * Warning:
25 * This data must match the architecture for the panel information
26 */
27
28#include <asm/types.h>
29
30struct ItExtVpdPanel {
31 /* Definition of the Extended Vpd On Panel Data Area */
32 char systemSerial[8];
33 char mfgID[4];
34 char reserved1[24];
35 char machineType[4];
36 char systemID[6];
37 char somUniqueCnt[4];
38 char serialNumberCount;
39 char reserved2[7];
40 u16 bbu3;
41 u16 bbu2;
42 u16 bbu1;
43 char xLocationLabel[8];
44 u8 xRsvd1[6];
45 u16 xFrameId;
46 u8 xRsvd2[48];
47};
48
49extern struct ItExtVpdPanel xItExtVpdPanel;
50
51#endif /* _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H */
diff --git a/arch/powerpc/platforms/iseries/it_lp_naca.h b/arch/powerpc/platforms/iseries/it_lp_naca.h
deleted file mode 100644
index cf6dcf6ef07b..000000000000
--- a/arch/powerpc/platforms/iseries/it_lp_naca.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_IT_LP_NACA_H
19#define _PLATFORMS_ISERIES_IT_LP_NACA_H
20
21#include <linux/types.h>
22
23/*
24 * This control block contains the data that is shared between the
25 * hypervisor (PLIC) and the OS.
26 */
27
28struct ItLpNaca {
29// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
30 u32 xDesc; // Eye catcher x00-x03
31 u16 xSize; // Size of this class x04-x05
32 u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07
33 u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08
34 u8 xPrimaryLpIndex; // LP Index of Primary x09-x09
35 u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A
36 u8 xLpIndex; // LP Index x0B-x0B
37 u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
38 u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
39 u8 xPirEnvironMode; // Piranha or hardware x10-x10
40 u8 xPirConsoleMode; // Piranha console indicator x11-x11
41 u8 xPirDasdMode; // Piranha dasd indicator x12-x12
42 u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
43 u8 flags; // flags, see below x18-x1F
44 u8 xSpVpdFormat; // VPD areas are in CSP format ...
45 u8 xIntProcRatio; // Ratio of int procs to procs ...
46 u8 xRsvd1_2[5]; // Reserved ...
47 u16 xRsvd1_3; // Reserved x20-x21
48 u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
49 u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25
50 u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27
51 u64 xLoadAreaAddr; // ER address of load area x28-x2F
52 u32 xLoadAreaChunks; // Chunks for the load area x30-x33
53 u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37
54 // doing an ASR switch on PASE
55 // system call.
56 u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f
57 u8 xRsvd1_4[64]; // x40-x7F
58
59// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
60 u8 xRsvd2_0[128]; // Reserved x00-x7F
61
62// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators
63// NB: Padding required to keep xInterruptHdlr at x300 which is required
64// for v4r4 PLIC.
65 u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F
66 u8 xRsvd3_0[384]; // Reserved 180-2FF
67
68// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt
69// handlers
70 u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF
71};
72
73extern struct ItLpNaca itLpNaca;
74
75#define ITLPNACA_LPAR 0x80 /* Is LPAR installed on the system */
76#define ITLPNACA_PARTITIONED 0x40 /* Is the system partitioned */
77#define ITLPNACA_HWSYNCEDTBS 0x20 /* Hardware synced TBs */
78#define ITLPNACA_HMTINT 0x10 /* Utilize MHT for interrupts */
79
80#endif /* _PLATFORMS_ISERIES_IT_LP_NACA_H */
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
deleted file mode 100644
index 997e234fb8b7..000000000000
--- a/arch/powerpc/platforms/iseries/ksyms.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * (C) 2001-2005 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/export.h>
10
11#include <asm/hw_irq.h>
12#include <asm/iseries/hv_call_sc.h>
13
14EXPORT_SYMBOL(HvCall0);
15EXPORT_SYMBOL(HvCall1);
16EXPORT_SYMBOL(HvCall2);
17EXPORT_SYMBOL(HvCall3);
18EXPORT_SYMBOL(HvCall4);
19EXPORT_SYMBOL(HvCall5);
20EXPORT_SYMBOL(HvCall6);
21EXPORT_SYMBOL(HvCall7);
diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c
deleted file mode 100644
index 00e0ec813a1c..000000000000
--- a/arch/powerpc/platforms/iseries/lpardata.c
+++ /dev/null
@@ -1,318 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/types.h>
10#include <linux/threads.h>
11#include <linux/bitops.h>
12#include <asm/processor.h>
13#include <asm/ptrace.h>
14#include <asm/abs_addr.h>
15#include <asm/lppaca.h>
16#include <asm/paca.h>
17#include <asm/iseries/lpar_map.h>
18#include <asm/iseries/it_lp_queue.h>
19#include <asm/iseries/alpaca.h>
20
21#include "naca.h"
22#include "vpd_areas.h"
23#include "spcomm_area.h"
24#include "ipl_parms.h"
25#include "processor_vpd.h"
26#include "release_data.h"
27#include "it_exp_vpd_panel.h"
28#include "it_lp_naca.h"
29
30/* The HvReleaseData is the root of the information shared between
31 * the hypervisor and Linux.
32 */
33const struct HvReleaseData hvReleaseData = {
34 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
35 .xSize = sizeof(struct HvReleaseData),
36 .xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
37 .xSlicNacaAddr = &naca, /* 64-bit Naca address */
38 .xMsNucDataOffset = LPARMAP_PHYS,
39 .xFlags = HVREL_TAGSINACTIVE /* tags inactive */
40 /* 64 bit */
41 /* shared processors */
42 /* HMT allowed */
43 | 6, /* TEMP: This allows non-GA driver */
44 .xVrmIndex = 4, /* We are v5r2m0 */
45 .xMinSupportedPlicVrmIndex = 3, /* v5r1m0 */
46 .xMinCompatablePlicVrmIndex = 3, /* v5r1m0 */
47 .xVrmName = { 0xd3, 0x89, 0x95, 0xa4, /* "Linux 2.4.64" ebcdic */
48 0xa7, 0x40, 0xf2, 0x4b,
49 0xf4, 0x4b, 0xf6, 0xf4 },
50};
51
52/*
53 * The NACA. The first dword of the naca is required by the iSeries
54 * hypervisor to point to itVpdAreas. The hypervisor finds the NACA
55 * through the pointer in hvReleaseData.
56 */
57struct naca_struct naca = {
58 .xItVpdAreas = &itVpdAreas,
59 .xRamDisk = 0,
60 .xRamDiskSize = 0,
61};
62
63struct ItLpRegSave {
64 u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
65 u16 xSize; // Size of this class 004-005
66 u8 xInUse; // Area is live 006-007
67 u8 xRsvd1[9]; // Reserved 007-00F
68
69 u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F
70 u32 xCTRL; // Control Register 170-173
71 u32 xDEC; // Decrementer 174-177
72 u32 xFPSCR; // FP Status and Control Reg 178-17B
73 u32 xPVR; // Processor Version Number 17C-17F
74
75 u64 xMMCR0; // Monitor Mode Control Reg 0 180-187
76 u32 xPMC1; // Perf Monitor Counter 1 188-18B
77 u32 xPMC2; // Perf Monitor Counter 2 18C-18F
78 u32 xPMC3; // Perf Monitor Counter 3 190-193
79 u32 xPMC4; // Perf Monitor Counter 4 194-197
80 u32 xPIR; // Processor ID Reg 198-19B
81
82 u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F
83 u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3
84 u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7
85 u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB
86 u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF
87 u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3
88 u32 xTSC; // Thread Switch Control 1B4-1B7
89 u32 xTST; // Thread Switch Timeout 1B8-1BB
90 u32 xRsvd; // Reserved 1BC-1BF
91
92 u64 xACCR; // Address Compare Control Reg 1C0-1C7
93 u64 xIMR; // Instruction Match Register 1C8-1CF
94 u64 xSDR1; // Storage Description Reg 1 1D0-1D7
95 u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF
96 u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7
97 u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF
98 u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7
99 u64 xTB; // Time Base Register 1F8-1FF
100
101 u64 xFPR[32]; // Floating Point Registers 200-2FF
102
103 u64 xMSR; // Machine State Register 300-307
104 u64 xNIA; // Next Instruction Address 308-30F
105
106 u64 xDABR; // Data Address Breakpoint Reg 310-317
107 u64 xIABR; // Inst Address Breakpoint Reg 318-31F
108
109 u64 xHID0; // HW Implementation Dependent0 320-327
110
111 u64 xHID4; // HW Implementation Dependent4 328-32F
112 u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337
113 u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F
114 u64 xSDAR; // Sample Data Address Register 340-347
115 u64 xSIAR; // Sample Inst Address Register 348-34F
116
117 u8 xRsvd3[176]; // Reserved 350-3FF
118};
119
120extern void system_reset_iSeries(void);
121extern void machine_check_iSeries(void);
122extern void data_access_iSeries(void);
123extern void instruction_access_iSeries(void);
124extern void hardware_interrupt_iSeries(void);
125extern void alignment_iSeries(void);
126extern void program_check_iSeries(void);
127extern void fp_unavailable_iSeries(void);
128extern void decrementer_iSeries(void);
129extern void trap_0a_iSeries(void);
130extern void trap_0b_iSeries(void);
131extern void system_call_iSeries(void);
132extern void single_step_iSeries(void);
133extern void trap_0e_iSeries(void);
134extern void performance_monitor_iSeries(void);
135extern void data_access_slb_iSeries(void);
136extern void instruction_access_slb_iSeries(void);
137
138struct ItLpNaca itLpNaca = {
139 .xDesc = 0xd397d581, /* "LpNa" ebcdic */
140 .xSize = 0x0400, /* size of ItLpNaca */
141 .xIntHdlrOffset = 0x0300, /* offset to int array */
142 .xMaxIntHdlrEntries = 19, /* # ents */
143 .xPrimaryLpIndex = 0, /* Part # of primary */
144 .xServiceLpIndex = 0, /* Part # of serv */
145 .xLpIndex = 0, /* Part # of me */
146 .xMaxLpQueues = 0, /* # of LP queues */
147 .xLpQueueOffset = 0x100, /* offset of start of LP queues */
148 .xPirEnvironMode = 0, /* Piranha stuff */
149 .xPirConsoleMode = 0,
150 .xPirDasdMode = 0,
151 .flags = 0,
152 .xSpVpdFormat = 0,
153 .xIntProcRatio = 0,
154 .xPlicVrmIndex = 0, /* VRM index of PLIC */
155 .xMinSupportedSlicVrmInd = 0, /* min supported SLIC */
156 .xMinCompatableSlicVrmInd = 0, /* min compat SLIC */
157 .xLoadAreaAddr = 0, /* 64-bit addr of load area */
158 .xLoadAreaChunks = 0, /* chunks for load area */
159 .xPaseSysCallCRMask = 0, /* PASE mask */
160 .xSlicSegmentTablePtr = 0, /* seg table */
161 .xOldLpQueue = { 0 }, /* Old LP Queue */
162 .xInterruptHdlr = {
163 (u64)system_reset_iSeries, /* 0x100 System Reset */
164 (u64)machine_check_iSeries, /* 0x200 Machine Check */
165 (u64)data_access_iSeries, /* 0x300 Data Access */
166 (u64)instruction_access_iSeries, /* 0x400 Instruction Access */
167 (u64)hardware_interrupt_iSeries, /* 0x500 External */
168 (u64)alignment_iSeries, /* 0x600 Alignment */
169 (u64)program_check_iSeries, /* 0x700 Program Check */
170 (u64)fp_unavailable_iSeries, /* 0x800 FP Unavailable */
171 (u64)decrementer_iSeries, /* 0x900 Decrementer */
172 (u64)trap_0a_iSeries, /* 0xa00 Trap 0A */
173 (u64)trap_0b_iSeries, /* 0xb00 Trap 0B */
174 (u64)system_call_iSeries, /* 0xc00 System Call */
175 (u64)single_step_iSeries, /* 0xd00 Single Step */
176 (u64)trap_0e_iSeries, /* 0xe00 Trap 0E */
177 (u64)performance_monitor_iSeries,/* 0xf00 Performance Monitor */
178 0, /* int 0x1000 */
179 0, /* int 0x1010 */
180 0, /* int 0x1020 CPU ctls */
181 (u64)hardware_interrupt_iSeries, /* SC Ret Hdlr */
182 (u64)data_access_slb_iSeries, /* 0x380 D-SLB */
183 (u64)instruction_access_slb_iSeries /* 0x480 I-SLB */
184 }
185};
186
187/* May be filled in by the hypervisor so cannot end up in the BSS */
188static struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
189
190/* May be filled in by the hypervisor so cannot end up in the BSS */
191struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
192
193#define maxPhysicalProcessors 32
194
195struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
196 {
197 .xInstCacheOperandSize = 32,
198 .xDataCacheOperandSize = 32,
199 .xProcFreq = 50000000,
200 .xTimeBaseFreq = 50000000,
201 .xPVR = 0x3600
202 }
203};
204
205/* Space for Main Store Vpd 27,200 bytes */
206/* May be filled in by the hypervisor so cannot end up in the BSS */
207u64 xMsVpd[3400] __attribute__((__section__(".data")));
208
209/* Space for Recovery Log Buffer */
210/* May be filled in by the hypervisor so cannot end up in the BSS */
211static u64 xRecoveryLogBuffer[32] __attribute__((__section__(".data")));
212
213static const struct SpCommArea xSpCommArea = {
214 .xDesc = 0xE2D7C3C2,
215 .xFormat = 1,
216};
217
218static const struct ItLpRegSave iseries_reg_save[] = {
219 [0 ... (NR_CPUS-1)] = {
220 .xDesc = 0xd397d9e2, /* "LpRS" */
221 .xSize = sizeof(struct ItLpRegSave),
222 },
223};
224
225#define ALPACA_INIT(number) \
226{ \
227 .lppaca_ptr = &lppaca[number], \
228 .reg_save_ptr = &iseries_reg_save[number], \
229}
230
231const struct alpaca alpaca[] = {
232 ALPACA_INIT( 0),
233#if NR_CPUS > 1
234 ALPACA_INIT( 1), ALPACA_INIT( 2), ALPACA_INIT( 3),
235#if NR_CPUS > 4
236 ALPACA_INIT( 4), ALPACA_INIT( 5), ALPACA_INIT( 6), ALPACA_INIT( 7),
237#if NR_CPUS > 8
238 ALPACA_INIT( 8), ALPACA_INIT( 9), ALPACA_INIT(10), ALPACA_INIT(11),
239 ALPACA_INIT(12), ALPACA_INIT(13), ALPACA_INIT(14), ALPACA_INIT(15),
240 ALPACA_INIT(16), ALPACA_INIT(17), ALPACA_INIT(18), ALPACA_INIT(19),
241 ALPACA_INIT(20), ALPACA_INIT(21), ALPACA_INIT(22), ALPACA_INIT(23),
242 ALPACA_INIT(24), ALPACA_INIT(25), ALPACA_INIT(26), ALPACA_INIT(27),
243 ALPACA_INIT(28), ALPACA_INIT(29), ALPACA_INIT(30), ALPACA_INIT(31),
244#if NR_CPUS > 32
245 ALPACA_INIT(32), ALPACA_INIT(33), ALPACA_INIT(34), ALPACA_INIT(35),
246 ALPACA_INIT(36), ALPACA_INIT(37), ALPACA_INIT(38), ALPACA_INIT(39),
247 ALPACA_INIT(40), ALPACA_INIT(41), ALPACA_INIT(42), ALPACA_INIT(43),
248 ALPACA_INIT(44), ALPACA_INIT(45), ALPACA_INIT(46), ALPACA_INIT(47),
249 ALPACA_INIT(48), ALPACA_INIT(49), ALPACA_INIT(50), ALPACA_INIT(51),
250 ALPACA_INIT(52), ALPACA_INIT(53), ALPACA_INIT(54), ALPACA_INIT(55),
251 ALPACA_INIT(56), ALPACA_INIT(57), ALPACA_INIT(58), ALPACA_INIT(59),
252 ALPACA_INIT(60), ALPACA_INIT(61), ALPACA_INIT(62), ALPACA_INIT(63),
253#endif
254#endif
255#endif
256#endif
257};
258
259/* The LparMap data is now located at offset 0x6000 in head.S
260 * It was put there so that the HvReleaseData could address it
261 * with a 32-bit offset as required by the iSeries hypervisor
262 *
263 * The Naca has a pointer to the ItVpdAreas. The hypervisor finds
264 * the Naca via the HvReleaseData area. The HvReleaseData has the
265 * offset into the Naca of the pointer to the ItVpdAreas.
266 */
267const struct ItVpdAreas itVpdAreas = {
268 .xSlicDesc = 0xc9a3e5c1, /* "ItVA" */
269 .xSlicSize = sizeof(struct ItVpdAreas),
270 .xSlicVpdEntries = ItVpdMaxEntries, /* # VPD array entries */
271 .xSlicDmaEntries = ItDmaMaxEntries, /* # DMA array entries */
272 .xSlicMaxLogicalProcs = NR_CPUS * 2, /* Max logical procs */
273 .xSlicMaxPhysicalProcs = maxPhysicalProcessors, /* Max physical procs */
274 .xSlicDmaToksOffset = offsetof(struct ItVpdAreas, xPlicDmaToks),
275 .xSlicVpdAdrsOffset = offsetof(struct ItVpdAreas, xSlicVpdAdrs),
276 .xSlicDmaLensOffset = offsetof(struct ItVpdAreas, xPlicDmaLens),
277 .xSlicVpdLensOffset = offsetof(struct ItVpdAreas, xSlicVpdLens),
278 .xSlicMaxSlotLabels = 0, /* max slot labels */
279 .xSlicMaxLpQueues = 1, /* max LP queues */
280 .xPlicDmaLens = { 0 }, /* DMA lengths */
281 .xPlicDmaToks = { 0 }, /* DMA tokens */
282 .xSlicVpdLens = { /* VPD lengths */
283 0,0,0, /* 0 - 2 */
284 sizeof(xItExtVpdPanel), /* 3 Extended VPD */
285 sizeof(struct alpaca), /* 4 length of (fake) Paca */
286 0, /* 5 */
287 sizeof(struct ItIplParmsReal),/* 6 length of IPL parms */
288 26992, /* 7 length of MS VPD */
289 0, /* 8 */
290 sizeof(struct ItLpNaca),/* 9 length of LP Naca */
291 0, /* 10 */
292 256, /* 11 length of Recovery Log Buf */
293 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
294 0,0,0, /* 13 - 15 */
295 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
296 0,0,0,0,0,0, /* 17 - 22 */
297 sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
298 0,0 /* 24 - 25 */
299 },
300 .xSlicVpdAdrs = { /* VPD addresses */
301 0,0,0, /* 0 - 2 */
302 &xItExtVpdPanel, /* 3 Extended VPD */
303 &alpaca[0], /* 4 first (fake) Paca */
304 0, /* 5 */
305 &xItIplParmsReal, /* 6 IPL parms */
306 &xMsVpd, /* 7 MS Vpd */
307 0, /* 8 */
308 &itLpNaca, /* 9 LpNaca */
309 0, /* 10 */
310 &xRecoveryLogBuffer, /* 11 Recovery Log Buffer */
311 &xSpCommArea, /* 12 SP Comm Area */
312 0,0,0, /* 13 - 15 */
313 &xIoHriProcessorVpd, /* 16 Proc Vpd */
314 0,0,0,0,0,0, /* 17 - 22 */
315 &hvlpevent_queue, /* 23 Lp Queue */
316 0,0
317 }
318};
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
deleted file mode 100644
index 202e22798d30..000000000000
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ /dev/null
@@ -1,341 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/stddef.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/bootmem.h>
14#include <linux/seq_file.h>
15#include <linux/proc_fs.h>
16#include <linux/export.h>
17
18#include <asm/system.h>
19#include <asm/paca.h>
20#include <asm/firmware.h>
21#include <asm/iseries/it_lp_queue.h>
22#include <asm/iseries/hv_lp_event.h>
23#include <asm/iseries/hv_call_event.h>
24#include "it_lp_naca.h"
25
26/*
27 * The LpQueue is used to pass event data from the hypervisor to
28 * the partition. This is where I/O interrupt events are communicated.
29 *
30 * It is written to by the hypervisor so cannot end up in the BSS.
31 */
32struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
33
34DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
35
36static char *event_types[HvLpEvent_Type_NumTypes] = {
37 "Hypervisor",
38 "Machine Facilities",
39 "Session Manager",
40 "SPD I/O",
41 "Virtual Bus",
42 "PCI I/O",
43 "RIO I/O",
44 "Virtual Lan",
45 "Virtual I/O"
46};
47
48/* Array of LpEvent handler functions */
49static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
50static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
51
52static struct HvLpEvent * get_next_hvlpevent(void)
53{
54 struct HvLpEvent * event;
55 event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
56
57 if (hvlpevent_is_valid(event)) {
58 /* rmb() needed only for weakly consistent machines (regatta) */
59 rmb();
60 /* Set pointer to next potential event */
61 hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
62 IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
63 IT_LP_EVENT_ALIGN;
64
65 /* Wrap to beginning if no room at end */
66 if (hvlpevent_queue.hq_current_event >
67 hvlpevent_queue.hq_last_event) {
68 hvlpevent_queue.hq_current_event =
69 hvlpevent_queue.hq_event_stack;
70 }
71 } else {
72 event = NULL;
73 }
74
75 return event;
76}
77
78static unsigned long spread_lpevents = NR_CPUS;
79
80int hvlpevent_is_pending(void)
81{
82 struct HvLpEvent *next_event;
83
84 if (smp_processor_id() >= spread_lpevents)
85 return 0;
86
87 next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
88
89 return hvlpevent_is_valid(next_event) ||
90 hvlpevent_queue.hq_overflow_pending;
91}
92
93static void hvlpevent_clear_valid(struct HvLpEvent * event)
94{
95 /* Tell the Hypervisor that we're done with this event.
96 * Also clear bits within this event that might look like valid bits.
97 * ie. on 64-byte boundaries.
98 */
99 struct HvLpEvent *tmp;
100 unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
101 IT_LP_EVENT_ALIGN) - 1;
102
103 switch (extra) {
104 case 3:
105 tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
106 hvlpevent_invalidate(tmp);
107 case 2:
108 tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
109 hvlpevent_invalidate(tmp);
110 case 1:
111 tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
112 hvlpevent_invalidate(tmp);
113 }
114
115 mb();
116
117 hvlpevent_invalidate(event);
118}
119
120void process_hvlpevents(void)
121{
122 struct HvLpEvent * event;
123
124 restart:
125 /* If we have recursed, just return */
126 if (!spin_trylock(&hvlpevent_queue.hq_lock))
127 return;
128
129 for (;;) {
130 event = get_next_hvlpevent();
131 if (event) {
132 /* Call appropriate handler here, passing
133 * a pointer to the LpEvent. The handler
134 * must make a copy of the LpEvent if it
135 * needs it in a bottom half. (perhaps for
136 * an ACK)
137 *
138 * Handlers are responsible for ACK processing
139 *
140 * The Hypervisor guarantees that LpEvents will
141 * only be delivered with types that we have
142 * registered for, so no type check is necessary
143 * here!
144 */
145 if (event->xType < HvLpEvent_Type_NumTypes)
146 __get_cpu_var(hvlpevent_counts)[event->xType]++;
147 if (event->xType < HvLpEvent_Type_NumTypes &&
148 lpEventHandler[event->xType])
149 lpEventHandler[event->xType](event);
150 else {
151 u8 type = event->xType;
152
153 /*
154 * Don't printk in the spinlock as printk
155 * may require ack events form the HV to send
156 * any characters there.
157 */
158 hvlpevent_clear_valid(event);
159 spin_unlock(&hvlpevent_queue.hq_lock);
160 printk(KERN_INFO
161 "Unexpected Lp Event type=%d\n", type);
162 goto restart;
163 }
164
165 hvlpevent_clear_valid(event);
166 } else if (hvlpevent_queue.hq_overflow_pending)
167 /*
168 * No more valid events. If overflow events are
169 * pending process them
170 */
171 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
172 else
173 break;
174 }
175
176 spin_unlock(&hvlpevent_queue.hq_lock);
177}
178
179static int set_spread_lpevents(char *str)
180{
181 unsigned long val = simple_strtoul(str, NULL, 0);
182
183 /*
184 * The parameter is the number of processors to share in processing
185 * lp events.
186 */
187 if (( val > 0) && (val <= NR_CPUS)) {
188 spread_lpevents = val;
189 printk("lpevent processing spread over %ld processors\n", val);
190 } else {
191 printk("invalid spread_lpevents %ld\n", val);
192 }
193
194 return 1;
195}
196__setup("spread_lpevents=", set_spread_lpevents);
197
198void __init setup_hvlpevent_queue(void)
199{
200 void *eventStack;
201
202 spin_lock_init(&hvlpevent_queue.hq_lock);
203
204 /* Allocate a page for the Event Stack. */
205 eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
206 memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
207
208 /* Invoke the hypervisor to initialize the event stack */
209 HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
210
211 hvlpevent_queue.hq_event_stack = eventStack;
212 hvlpevent_queue.hq_current_event = eventStack;
213 hvlpevent_queue.hq_last_event = (char *)eventStack +
214 (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
215 hvlpevent_queue.hq_index = 0;
216}
217
218/* Register a handler for an LpEvent type */
219int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
220{
221 if (eventType < HvLpEvent_Type_NumTypes) {
222 lpEventHandler[eventType] = handler;
223 return 0;
224 }
225 return 1;
226}
227EXPORT_SYMBOL(HvLpEvent_registerHandler);
228
229int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
230{
231 might_sleep();
232
233 if (eventType < HvLpEvent_Type_NumTypes) {
234 if (!lpEventHandlerPaths[eventType]) {
235 lpEventHandler[eventType] = NULL;
236 /*
237 * We now sleep until all other CPUs have scheduled.
238 * This ensures that the deletion is seen by all
239 * other CPUs, and that the deleted handler isn't
240 * still running on another CPU when we return.
241 */
242 synchronize_sched();
243 return 0;
244 }
245 }
246 return 1;
247}
248EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
249
250/*
251 * lpIndex is the partition index of the target partition.
252 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
253 * indicates to use our partition index - for the other types.
254 */
255int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
256{
257 if ((eventType < HvLpEvent_Type_NumTypes) &&
258 lpEventHandler[eventType]) {
259 if (lpIndex == 0)
260 lpIndex = itLpNaca.xLpIndex;
261 HvCallEvent_openLpEventPath(lpIndex, eventType);
262 ++lpEventHandlerPaths[eventType];
263 return 0;
264 }
265 return 1;
266}
267
268int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
269{
270 if ((eventType < HvLpEvent_Type_NumTypes) &&
271 lpEventHandler[eventType] &&
272 lpEventHandlerPaths[eventType]) {
273 if (lpIndex == 0)
274 lpIndex = itLpNaca.xLpIndex;
275 HvCallEvent_closeLpEventPath(lpIndex, eventType);
276 --lpEventHandlerPaths[eventType];
277 return 0;
278 }
279 return 1;
280}
281
282static int proc_lpevents_show(struct seq_file *m, void *v)
283{
284 int cpu, i;
285 unsigned long sum;
286 static unsigned long cpu_totals[NR_CPUS];
287
288 /* FIXME: do we care that there's no locking here? */
289 sum = 0;
290 for_each_online_cpu(cpu) {
291 cpu_totals[cpu] = 0;
292 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
293 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
294 }
295 sum += cpu_totals[cpu];
296 }
297
298 seq_printf(m, "LpEventQueue 0\n");
299 seq_printf(m, " events processed:\t%lu\n", sum);
300
301 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
302 sum = 0;
303 for_each_online_cpu(cpu) {
304 sum += per_cpu(hvlpevent_counts, cpu)[i];
305 }
306
307 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
308 }
309
310 seq_printf(m, "\n events processed by processor:\n");
311
312 for_each_online_cpu(cpu) {
313 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
314 }
315
316 return 0;
317}
318
319static int proc_lpevents_open(struct inode *inode, struct file *file)
320{
321 return single_open(file, proc_lpevents_show, NULL);
322}
323
324static const struct file_operations proc_lpevents_operations = {
325 .open = proc_lpevents_open,
326 .read = seq_read,
327 .llseek = seq_lseek,
328 .release = single_release,
329};
330
331static int __init proc_lpevents_init(void)
332{
333 if (!firmware_has_feature(FW_FEATURE_ISERIES))
334 return 0;
335
336 proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
337 &proc_lpevents_operations);
338 return 0;
339}
340__initcall(proc_lpevents_init);
341
diff --git a/arch/powerpc/platforms/iseries/main_store.h b/arch/powerpc/platforms/iseries/main_store.h
deleted file mode 100644
index 1a7a3f50e40b..000000000000
--- a/arch/powerpc/platforms/iseries/main_store.h
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ISERIES_MAIN_STORE_H
20#define _ISERIES_MAIN_STORE_H
21
22/* Main Store Vpd for Condor,iStar,sStar */
23struct IoHriMainStoreSegment4 {
24 u8 msArea0Exists:1;
25 u8 msArea1Exists:1;
26 u8 msArea2Exists:1;
27 u8 msArea3Exists:1;
28 u8 reserved1:4;
29 u8 reserved2;
30
31 u8 msArea0Functional:1;
32 u8 msArea1Functional:1;
33 u8 msArea2Functional:1;
34 u8 msArea3Functional:1;
35 u8 reserved3:4;
36 u8 reserved4;
37
38 u32 totalMainStore;
39
40 u64 msArea0Ptr;
41 u64 msArea1Ptr;
42 u64 msArea2Ptr;
43 u64 msArea3Ptr;
44
45 u32 cardProductionLevel;
46
47 u32 msAdrHole;
48
49 u8 msArea0HasRiserVpd:1;
50 u8 msArea1HasRiserVpd:1;
51 u8 msArea2HasRiserVpd:1;
52 u8 msArea3HasRiserVpd:1;
53 u8 reserved5:4;
54 u8 reserved6;
55 u16 reserved7;
56
57 u8 reserved8[28];
58
59 u64 nonInterleavedBlocksStartAdr;
60 u64 nonInterleavedBlocksEndAdr;
61};
62
63/* Main Store VPD for Power4 */
64struct __attribute((packed)) IoHriMainStoreChipInfo1 {
65 u32 chipMfgID;
66 char chipECLevel[4];
67};
68
69struct IoHriMainStoreVpdIdData {
70 char typeNumber[4];
71 char modelNumber[4];
72 char partNumber[12];
73 char serialNumber[12];
74};
75
76struct __attribute((packed)) IoHriMainStoreVpdFruData {
77 char fruLabel[8];
78 u8 numberOfSlots;
79 u8 pluggingType;
80 u16 slotMapIndex;
81};
82
83struct __attribute((packed)) IoHriMainStoreAdrRangeBlock {
84 void *blockStart;
85 void *blockEnd;
86 u32 blockProcChipId;
87};
88
89#define MaxAreaAdrRangeBlocks 4
90
91struct __attribute((packed)) IoHriMainStoreArea4 {
92 u32 msVpdFormat;
93 u8 containedVpdType;
94 u8 reserved1;
95 u16 reserved2;
96
97 u64 msExists;
98 u64 msFunctional;
99
100 u32 memorySize;
101 u32 procNodeId;
102
103 u32 numAdrRangeBlocks;
104 struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks];
105
106 struct IoHriMainStoreChipInfo1 chipInfo0;
107 struct IoHriMainStoreChipInfo1 chipInfo1;
108 struct IoHriMainStoreChipInfo1 chipInfo2;
109 struct IoHriMainStoreChipInfo1 chipInfo3;
110 struct IoHriMainStoreChipInfo1 chipInfo4;
111 struct IoHriMainStoreChipInfo1 chipInfo5;
112 struct IoHriMainStoreChipInfo1 chipInfo6;
113 struct IoHriMainStoreChipInfo1 chipInfo7;
114
115 void *msRamAreaArray;
116 u32 msRamAreaArrayNumEntries;
117 u32 msRamAreaArrayEntrySize;
118
119 u32 numaDimmExists;
120 u32 numaDimmFunctional;
121 void *numaDimmArray;
122 u32 numaDimmArrayNumEntries;
123 u32 numaDimmArrayEntrySize;
124
125 struct IoHriMainStoreVpdIdData idData;
126
127 u64 powerData;
128 u64 cardAssemblyPartNum;
129 u64 chipSerialNum;
130
131 u64 reserved3;
132 char reserved4[16];
133
134 struct IoHriMainStoreVpdFruData fruData;
135
136 u8 vpdPortNum;
137 u8 reserved5;
138 u8 frameId;
139 u8 rackUnit;
140 char asciiKeywordVpd[256];
141 u32 reserved6;
142};
143
144
145struct IoHriMainStoreSegment5 {
146 u16 reserved1;
147 u8 reserved2;
148 u8 msVpdFormat;
149
150 u32 totalMainStore;
151 u64 maxConfiguredMsAdr;
152
153 struct IoHriMainStoreArea4 *msAreaArray;
154 u32 msAreaArrayNumEntries;
155 u32 msAreaArrayEntrySize;
156
157 u32 msAreaExists;
158 u32 msAreaFunctional;
159
160 u64 reserved3;
161};
162
163extern u64 xMsVpd[];
164
165#endif /* _ISERIES_MAIN_STORE_H */
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
deleted file mode 100644
index 254c1fc3d8dd..000000000000
--- a/arch/powerpc/platforms/iseries/mf.c
+++ /dev/null
@@ -1,1275 +0,0 @@
1/*
2 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
3 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
4 *
5 * This modules exists as an interface between a Linux secondary partition
6 * running on an iSeries and the primary partition's Virtual Service
7 * Processor (VSP) object. The VSP has final authority over powering on/off
8 * all partitions in the iSeries. It also provides miscellaneous low-level
9 * machine facility type operations.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/completion.h>
32#include <linux/delay.h>
33#include <linux/export.h>
34#include <linux/proc_fs.h>
35#include <linux/dma-mapping.h>
36#include <linux/bcd.h>
37#include <linux/rtc.h>
38#include <linux/slab.h>
39
40#include <asm/time.h>
41#include <asm/uaccess.h>
42#include <asm/paca.h>
43#include <asm/abs_addr.h>
44#include <asm/firmware.h>
45#include <asm/iseries/mf.h>
46#include <asm/iseries/hv_lp_config.h>
47#include <asm/iseries/hv_lp_event.h>
48#include <asm/iseries/it_lp_queue.h>
49
50#include "setup.h"
51
52static int mf_initialized;
53
54/*
55 * This is the structure layout for the Machine Facilities LPAR event
56 * flows.
57 */
58struct vsp_cmd_data {
59 u64 token;
60 u16 cmd;
61 HvLpIndex lp_index;
62 u8 result_code;
63 u32 reserved;
64 union {
65 u64 state; /* GetStateOut */
66 u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */
67 u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */
68 u64 page[4]; /* GetSrcHistoryIn */
69 u64 flag; /* GetAutoIplWhenPrimaryIplsOut,
70 SetAutoIplWhenPrimaryIplsIn,
71 WhiteButtonPowerOffIn,
72 Function08FastPowerOffIn,
73 IsSpcnRackPowerIncompleteOut */
74 struct {
75 u64 token;
76 u64 address_type;
77 u64 side;
78 u32 length;
79 u32 offset;
80 } kern; /* SetKernelImageIn, GetKernelImageIn,
81 SetKernelCmdLineIn, GetKernelCmdLineIn */
82 u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
83 u8 reserved[80];
84 } sub_data;
85};
86
87struct vsp_rsp_data {
88 struct completion com;
89 struct vsp_cmd_data *response;
90};
91
92struct alloc_data {
93 u16 size;
94 u16 type;
95 u32 count;
96 u16 reserved1;
97 u8 reserved2;
98 HvLpIndex target_lp;
99};
100
101struct ce_msg_data;
102
103typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp);
104
105struct ce_msg_comp_data {
106 ce_msg_comp_hdlr handler;
107 void *token;
108};
109
110struct ce_msg_data {
111 u8 ce_msg[12];
112 char reserved[4];
113 struct ce_msg_comp_data *completion;
114};
115
116struct io_mf_lp_event {
117 struct HvLpEvent hp_lp_event;
118 u16 subtype_result_code;
119 u16 reserved1;
120 u32 reserved2;
121 union {
122 struct alloc_data alloc;
123 struct ce_msg_data ce_msg;
124 struct vsp_cmd_data vsp_cmd;
125 } data;
126};
127
128#define subtype_data(a, b, c, d) \
129 (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
130
131/*
132 * All outgoing event traffic is kept on a FIFO queue. The first
133 * pointer points to the one that is outstanding, and all new
134 * requests get stuck on the end. Also, we keep a certain number of
135 * preallocated pending events so that we can operate very early in
136 * the boot up sequence (before kmalloc is ready).
137 */
138struct pending_event {
139 struct pending_event *next;
140 struct io_mf_lp_event event;
141 MFCompleteHandler hdlr;
142 char dma_data[72];
143 unsigned dma_data_length;
144 unsigned remote_address;
145};
146static spinlock_t pending_event_spinlock;
147static struct pending_event *pending_event_head;
148static struct pending_event *pending_event_tail;
149static struct pending_event *pending_event_avail;
150#define PENDING_EVENT_PREALLOC_LEN 16
151static struct pending_event pending_event_prealloc[PENDING_EVENT_PREALLOC_LEN];
152
153/*
154 * Put a pending event onto the available queue, so it can get reused.
155 * Attention! You must have the pending_event_spinlock before calling!
156 */
157static void free_pending_event(struct pending_event *ev)
158{
159 if (ev != NULL) {
160 ev->next = pending_event_avail;
161 pending_event_avail = ev;
162 }
163}
164
165/*
166 * Enqueue the outbound event onto the stack. If the queue was
167 * empty to begin with, we must also issue it via the Hypervisor
168 * interface. There is a section of code below that will touch
169 * the first stack pointer without the protection of the pending_event_spinlock.
170 * This is OK, because we know that nobody else will be modifying
171 * the first pointer when we do this.
172 */
173static int signal_event(struct pending_event *ev)
174{
175 int rc = 0;
176 unsigned long flags;
177 int go = 1;
178 struct pending_event *ev1;
179 HvLpEvent_Rc hv_rc;
180
181 /* enqueue the event */
182 if (ev != NULL) {
183 ev->next = NULL;
184 spin_lock_irqsave(&pending_event_spinlock, flags);
185 if (pending_event_head == NULL)
186 pending_event_head = ev;
187 else {
188 go = 0;
189 pending_event_tail->next = ev;
190 }
191 pending_event_tail = ev;
192 spin_unlock_irqrestore(&pending_event_spinlock, flags);
193 }
194
195 /* send the event */
196 while (go) {
197 go = 0;
198
199 /* any DMA data to send beforehand? */
200 if (pending_event_head->dma_data_length > 0)
201 HvCallEvent_dmaToSp(pending_event_head->dma_data,
202 pending_event_head->remote_address,
203 pending_event_head->dma_data_length,
204 HvLpDma_Direction_LocalToRemote);
205
206 hv_rc = HvCallEvent_signalLpEvent(
207 &pending_event_head->event.hp_lp_event);
208 if (hv_rc != HvLpEvent_Rc_Good) {
209 printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() "
210 "failed with %d\n", (int)hv_rc);
211
212 spin_lock_irqsave(&pending_event_spinlock, flags);
213 ev1 = pending_event_head;
214 pending_event_head = pending_event_head->next;
215 if (pending_event_head != NULL)
216 go = 1;
217 spin_unlock_irqrestore(&pending_event_spinlock, flags);
218
219 if (ev1 == ev)
220 rc = -EIO;
221 else if (ev1->hdlr != NULL)
222 (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO);
223
224 spin_lock_irqsave(&pending_event_spinlock, flags);
225 free_pending_event(ev1);
226 spin_unlock_irqrestore(&pending_event_spinlock, flags);
227 }
228 }
229
230 return rc;
231}
232
233/*
234 * Allocate a new pending_event structure, and initialize it.
235 */
236static struct pending_event *new_pending_event(void)
237{
238 struct pending_event *ev = NULL;
239 HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex();
240 unsigned long flags;
241 struct HvLpEvent *hev;
242
243 spin_lock_irqsave(&pending_event_spinlock, flags);
244 if (pending_event_avail != NULL) {
245 ev = pending_event_avail;
246 pending_event_avail = pending_event_avail->next;
247 }
248 spin_unlock_irqrestore(&pending_event_spinlock, flags);
249 if (ev == NULL) {
250 ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC);
251 if (ev == NULL) {
252 printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
253 sizeof(struct pending_event));
254 return NULL;
255 }
256 }
257 memset(ev, 0, sizeof(struct pending_event));
258 hev = &ev->event.hp_lp_event;
259 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT;
260 hev->xType = HvLpEvent_Type_MachineFac;
261 hev->xSourceLp = HvLpConfig_getLpIndex();
262 hev->xTargetLp = primary_lp;
263 hev->xSizeMinus1 = sizeof(ev->event) - 1;
264 hev->xRc = HvLpEvent_Rc_Good;
265 hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp,
266 HvLpEvent_Type_MachineFac);
267 hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp,
268 HvLpEvent_Type_MachineFac);
269
270 return ev;
271}
272
273static int __maybe_unused
274signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
275{
276 struct pending_event *ev = new_pending_event();
277 int rc;
278 struct vsp_rsp_data response;
279
280 if (ev == NULL)
281 return -ENOMEM;
282
283 init_completion(&response.com);
284 response.response = vsp_cmd;
285 ev->event.hp_lp_event.xSubtype = 6;
286 ev->event.hp_lp_event.x.xSubtypeData =
287 subtype_data('M', 'F', 'V', 'I');
288 ev->event.data.vsp_cmd.token = (u64)&response;
289 ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd;
290 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
291 ev->event.data.vsp_cmd.result_code = 0xFF;
292 ev->event.data.vsp_cmd.reserved = 0;
293 memcpy(&(ev->event.data.vsp_cmd.sub_data),
294 &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data));
295 mb();
296
297 rc = signal_event(ev);
298 if (rc == 0)
299 wait_for_completion(&response.com);
300 return rc;
301}
302
303
304/*
305 * Send a 12-byte CE message to the primary partition VSP object
306 */
307static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion)
308{
309 struct pending_event *ev = new_pending_event();
310
311 if (ev == NULL)
312 return -ENOMEM;
313
314 ev->event.hp_lp_event.xSubtype = 0;
315 ev->event.hp_lp_event.x.xSubtypeData =
316 subtype_data('M', 'F', 'C', 'E');
317 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
318 ev->event.data.ce_msg.completion = completion;
319 return signal_event(ev);
320}
321
322/*
323 * Send a 12-byte CE message (with no data) to the primary partition VSP object
324 */
325static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion)
326{
327 u8 ce_msg[12];
328
329 memset(ce_msg, 0, sizeof(ce_msg));
330 ce_msg[3] = ce_op;
331 return signal_ce_msg(ce_msg, completion);
332}
333
334/*
335 * Send a 12-byte CE message and DMA data to the primary partition VSP object
336 */
337static int dma_and_signal_ce_msg(char *ce_msg,
338 struct ce_msg_comp_data *completion, void *dma_data,
339 unsigned dma_data_length, unsigned remote_address)
340{
341 struct pending_event *ev = new_pending_event();
342
343 if (ev == NULL)
344 return -ENOMEM;
345
346 ev->event.hp_lp_event.xSubtype = 0;
347 ev->event.hp_lp_event.x.xSubtypeData =
348 subtype_data('M', 'F', 'C', 'E');
349 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
350 ev->event.data.ce_msg.completion = completion;
351 memcpy(ev->dma_data, dma_data, dma_data_length);
352 ev->dma_data_length = dma_data_length;
353 ev->remote_address = remote_address;
354 return signal_event(ev);
355}
356
357/*
358 * Initiate a nice (hopefully) shutdown of Linux. We simply are
359 * going to try and send the init process a SIGINT signal. If
360 * this fails (why?), we'll simply force it off in a not-so-nice
361 * manner.
362 */
363static int shutdown(void)
364{
365 int rc = kill_cad_pid(SIGINT, 1);
366
367 if (rc) {
368 printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
369 "hard shutdown commencing\n", rc);
370 mf_power_off();
371 } else
372 printk(KERN_INFO "mf.c: init has been successfully notified "
373 "to proceed with shutdown\n");
374 return rc;
375}
376
377/*
378 * The primary partition VSP object is sending us a new
379 * event flow. Handle it...
380 */
381static void handle_int(struct io_mf_lp_event *event)
382{
383 struct ce_msg_data *ce_msg_data;
384 struct ce_msg_data *pce_msg_data;
385 unsigned long flags;
386 struct pending_event *pev;
387
388 /* ack the interrupt */
389 event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
390 HvCallEvent_ackLpEvent(&event->hp_lp_event);
391
392 /* process interrupt */
393 switch (event->hp_lp_event.xSubtype) {
394 case 0: /* CE message */
395 ce_msg_data = &event->data.ce_msg;
396 switch (ce_msg_data->ce_msg[3]) {
397 case 0x5B: /* power control notification */
398 if ((ce_msg_data->ce_msg[5] & 0x20) != 0) {
399 printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
400 if (shutdown() == 0)
401 signal_ce_msg_simple(0xDB, NULL);
402 }
403 break;
404 case 0xC0: /* get time */
405 spin_lock_irqsave(&pending_event_spinlock, flags);
406 pev = pending_event_head;
407 if (pev != NULL)
408 pending_event_head = pending_event_head->next;
409 spin_unlock_irqrestore(&pending_event_spinlock, flags);
410 if (pev == NULL)
411 break;
412 pce_msg_data = &pev->event.data.ce_msg;
413 if (pce_msg_data->ce_msg[3] != 0x40)
414 break;
415 if (pce_msg_data->completion != NULL) {
416 ce_msg_comp_hdlr handler =
417 pce_msg_data->completion->handler;
418 void *token = pce_msg_data->completion->token;
419
420 if (handler != NULL)
421 (*handler)(token, ce_msg_data);
422 }
423 spin_lock_irqsave(&pending_event_spinlock, flags);
424 free_pending_event(pev);
425 spin_unlock_irqrestore(&pending_event_spinlock, flags);
426 /* send next waiting event */
427 if (pending_event_head != NULL)
428 signal_event(NULL);
429 break;
430 }
431 break;
432 case 1: /* IT sys shutdown */
433 printk(KERN_INFO "mf.c: Commencing system shutdown\n");
434 shutdown();
435 break;
436 }
437}
438
439/*
440 * The primary partition VSP object is acknowledging the receipt
441 * of a flow we sent to them. If there are other flows queued
442 * up, we must send another one now...
443 */
444static void handle_ack(struct io_mf_lp_event *event)
445{
446 unsigned long flags;
447 struct pending_event *two = NULL;
448 unsigned long free_it = 0;
449 struct ce_msg_data *ce_msg_data;
450 struct ce_msg_data *pce_msg_data;
451 struct vsp_rsp_data *rsp;
452
453 /* handle current event */
454 if (pending_event_head == NULL) {
455 printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
456 return;
457 }
458
459 switch (event->hp_lp_event.xSubtype) {
460 case 0: /* CE msg */
461 ce_msg_data = &event->data.ce_msg;
462 if (ce_msg_data->ce_msg[3] != 0x40) {
463 free_it = 1;
464 break;
465 }
466 if (ce_msg_data->ce_msg[2] == 0)
467 break;
468 free_it = 1;
469 pce_msg_data = &pending_event_head->event.data.ce_msg;
470 if (pce_msg_data->completion != NULL) {
471 ce_msg_comp_hdlr handler =
472 pce_msg_data->completion->handler;
473 void *token = pce_msg_data->completion->token;
474
475 if (handler != NULL)
476 (*handler)(token, ce_msg_data);
477 }
478 break;
479 case 4: /* allocate */
480 case 5: /* deallocate */
481 if (pending_event_head->hdlr != NULL)
482 (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count);
483 free_it = 1;
484 break;
485 case 6:
486 free_it = 1;
487 rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token;
488 if (rsp == NULL) {
489 printk(KERN_ERR "mf.c: no rsp\n");
490 break;
491 }
492 if (rsp->response != NULL)
493 memcpy(rsp->response, &event->data.vsp_cmd,
494 sizeof(event->data.vsp_cmd));
495 complete(&rsp->com);
496 break;
497 }
498
499 /* remove from queue */
500 spin_lock_irqsave(&pending_event_spinlock, flags);
501 if ((pending_event_head != NULL) && (free_it == 1)) {
502 struct pending_event *oldHead = pending_event_head;
503
504 pending_event_head = pending_event_head->next;
505 two = pending_event_head;
506 free_pending_event(oldHead);
507 }
508 spin_unlock_irqrestore(&pending_event_spinlock, flags);
509
510 /* send next waiting event */
511 if (two != NULL)
512 signal_event(NULL);
513}
514
515/*
516 * This is the generic event handler we are registering with
517 * the Hypervisor. Ensure the flows are for us, and then
518 * parse it enough to know if it is an interrupt or an
519 * acknowledge.
520 */
521static void hv_handler(struct HvLpEvent *event)
522{
523 if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
524 if (hvlpevent_is_ack(event))
525 handle_ack((struct io_mf_lp_event *)event);
526 else
527 handle_int((struct io_mf_lp_event *)event);
528 } else
529 printk(KERN_ERR "mf.c: alien event received\n");
530}
531
532/*
533 * Global kernel interface to allocate and seed events into the
534 * Hypervisor.
535 */
536void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
537 unsigned size, unsigned count, MFCompleteHandler hdlr,
538 void *user_token)
539{
540 struct pending_event *ev = new_pending_event();
541 int rc;
542
543 if (ev == NULL) {
544 rc = -ENOMEM;
545 } else {
546 ev->event.hp_lp_event.xSubtype = 4;
547 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
548 ev->event.hp_lp_event.x.xSubtypeData =
549 subtype_data('M', 'F', 'M', 'A');
550 ev->event.data.alloc.target_lp = target_lp;
551 ev->event.data.alloc.type = type;
552 ev->event.data.alloc.size = size;
553 ev->event.data.alloc.count = count;
554 ev->hdlr = hdlr;
555 rc = signal_event(ev);
556 }
557 if ((rc != 0) && (hdlr != NULL))
558 (*hdlr)(user_token, rc);
559}
560EXPORT_SYMBOL(mf_allocate_lp_events);
561
562/*
563 * Global kernel interface to unseed and deallocate events already in
564 * Hypervisor.
565 */
566void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
567 unsigned count, MFCompleteHandler hdlr, void *user_token)
568{
569 struct pending_event *ev = new_pending_event();
570 int rc;
571
572 if (ev == NULL)
573 rc = -ENOMEM;
574 else {
575 ev->event.hp_lp_event.xSubtype = 5;
576 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
577 ev->event.hp_lp_event.x.xSubtypeData =
578 subtype_data('M', 'F', 'M', 'D');
579 ev->event.data.alloc.target_lp = target_lp;
580 ev->event.data.alloc.type = type;
581 ev->event.data.alloc.count = count;
582 ev->hdlr = hdlr;
583 rc = signal_event(ev);
584 }
585 if ((rc != 0) && (hdlr != NULL))
586 (*hdlr)(user_token, rc);
587}
588EXPORT_SYMBOL(mf_deallocate_lp_events);
589
590/*
591 * Global kernel interface to tell the VSP object in the primary
592 * partition to power this partition off.
593 */
594void mf_power_off(void)
595{
596 printk(KERN_INFO "mf.c: Down it goes...\n");
597 signal_ce_msg_simple(0x4d, NULL);
598 for (;;)
599 ;
600}
601
602/*
603 * Global kernel interface to tell the VSP object in the primary
604 * partition to reboot this partition.
605 */
606void mf_reboot(char *cmd)
607{
608 printk(KERN_INFO "mf.c: Preparing to bounce...\n");
609 signal_ce_msg_simple(0x4e, NULL);
610 for (;;)
611 ;
612}
613
614/*
615 * Display a single word SRC onto the VSP control panel.
616 */
617void mf_display_src(u32 word)
618{
619 u8 ce[12];
620
621 memset(ce, 0, sizeof(ce));
622 ce[3] = 0x4a;
623 ce[7] = 0x01;
624 ce[8] = word >> 24;
625 ce[9] = word >> 16;
626 ce[10] = word >> 8;
627 ce[11] = word;
628 signal_ce_msg(ce, NULL);
629}
630
631/*
632 * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
633 */
634static __init void mf_display_progress_src(u16 value)
635{
636 u8 ce[12];
637 u8 src[72];
638
639 memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
640 memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
641 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
642 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
643 "\x00\x00\x00\x00PROGxxxx ",
644 72);
645 src[6] = value >> 8;
646 src[7] = value & 255;
647 src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
648 src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
649 src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
650 src[47] = "0123456789ABCDEF"[value & 15];
651 dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
652}
653
654/*
655 * Clear the VSP control panel. Used to "erase" an SRC that was
656 * previously displayed.
657 */
658static void mf_clear_src(void)
659{
660 signal_ce_msg_simple(0x4b, NULL);
661}
662
663void __init mf_display_progress(u16 value)
664{
665 if (!mf_initialized)
666 return;
667
668 if (0xFFFF == value)
669 mf_clear_src();
670 else
671 mf_display_progress_src(value);
672}
673
674/*
675 * Initialization code here.
676 */
677void __init mf_init(void)
678{
679 int i;
680
681 spin_lock_init(&pending_event_spinlock);
682
683 for (i = 0; i < PENDING_EVENT_PREALLOC_LEN; i++)
684 free_pending_event(&pending_event_prealloc[i]);
685
686 HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler);
687
688 /* virtual continue ack */
689 signal_ce_msg_simple(0x57, NULL);
690
691 mf_initialized = 1;
692 mb();
693
694 printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities "
695 "initialized\n");
696}
697
698struct rtc_time_data {
699 struct completion com;
700 struct ce_msg_data ce_msg;
701 int rc;
702};
703
704static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
705{
706 struct rtc_time_data *rtc = token;
707
708 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
709 rtc->rc = 0;
710 complete(&rtc->com);
711}
712
713static int mf_set_rtc(struct rtc_time *tm)
714{
715 char ce_time[12];
716 u8 day, mon, hour, min, sec, y1, y2;
717 unsigned year;
718
719 year = 1900 + tm->tm_year;
720 y1 = year / 100;
721 y2 = year % 100;
722
723 sec = tm->tm_sec;
724 min = tm->tm_min;
725 hour = tm->tm_hour;
726 day = tm->tm_mday;
727 mon = tm->tm_mon + 1;
728
729 sec = bin2bcd(sec);
730 min = bin2bcd(min);
731 hour = bin2bcd(hour);
732 mon = bin2bcd(mon);
733 day = bin2bcd(day);
734 y1 = bin2bcd(y1);
735 y2 = bin2bcd(y2);
736
737 memset(ce_time, 0, sizeof(ce_time));
738 ce_time[3] = 0x41;
739 ce_time[4] = y1;
740 ce_time[5] = y2;
741 ce_time[6] = sec;
742 ce_time[7] = min;
743 ce_time[8] = hour;
744 ce_time[10] = day;
745 ce_time[11] = mon;
746
747 return signal_ce_msg(ce_time, NULL);
748}
749
750static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
751{
752 tm->tm_wday = 0;
753 tm->tm_yday = 0;
754 tm->tm_isdst = 0;
755 if (rc) {
756 tm->tm_sec = 0;
757 tm->tm_min = 0;
758 tm->tm_hour = 0;
759 tm->tm_mday = 15;
760 tm->tm_mon = 5;
761 tm->tm_year = 52;
762 return rc;
763 }
764
765 if ((ce_msg[2] == 0xa9) ||
766 (ce_msg[2] == 0xaf)) {
767 /* TOD clock is not set */
768 tm->tm_sec = 1;
769 tm->tm_min = 1;
770 tm->tm_hour = 1;
771 tm->tm_mday = 10;
772 tm->tm_mon = 8;
773 tm->tm_year = 71;
774 mf_set_rtc(tm);
775 }
776 {
777 u8 year = ce_msg[5];
778 u8 sec = ce_msg[6];
779 u8 min = ce_msg[7];
780 u8 hour = ce_msg[8];
781 u8 day = ce_msg[10];
782 u8 mon = ce_msg[11];
783
784 sec = bcd2bin(sec);
785 min = bcd2bin(min);
786 hour = bcd2bin(hour);
787 day = bcd2bin(day);
788 mon = bcd2bin(mon);
789 year = bcd2bin(year);
790
791 if (year <= 69)
792 year += 100;
793
794 tm->tm_sec = sec;
795 tm->tm_min = min;
796 tm->tm_hour = hour;
797 tm->tm_mday = day;
798 tm->tm_mon = mon;
799 tm->tm_year = year;
800 }
801
802 return 0;
803}
804
805static int mf_get_rtc(struct rtc_time *tm)
806{
807 struct ce_msg_comp_data ce_complete;
808 struct rtc_time_data rtc_data;
809 int rc;
810
811 memset(&ce_complete, 0, sizeof(ce_complete));
812 memset(&rtc_data, 0, sizeof(rtc_data));
813 init_completion(&rtc_data.com);
814 ce_complete.handler = &get_rtc_time_complete;
815 ce_complete.token = &rtc_data;
816 rc = signal_ce_msg_simple(0x40, &ce_complete);
817 if (rc)
818 return rc;
819 wait_for_completion(&rtc_data.com);
820 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
821}
822
823struct boot_rtc_time_data {
824 int busy;
825 struct ce_msg_data ce_msg;
826 int rc;
827};
828
829static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
830{
831 struct boot_rtc_time_data *rtc = token;
832
833 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
834 rtc->rc = 0;
835 rtc->busy = 0;
836}
837
838static int mf_get_boot_rtc(struct rtc_time *tm)
839{
840 struct ce_msg_comp_data ce_complete;
841 struct boot_rtc_time_data rtc_data;
842 int rc;
843
844 memset(&ce_complete, 0, sizeof(ce_complete));
845 memset(&rtc_data, 0, sizeof(rtc_data));
846 rtc_data.busy = 1;
847 ce_complete.handler = &get_boot_rtc_time_complete;
848 ce_complete.token = &rtc_data;
849 rc = signal_ce_msg_simple(0x40, &ce_complete);
850 if (rc)
851 return rc;
852 /* We need to poll here as we are not yet taking interrupts */
853 while (rtc_data.busy) {
854 if (hvlpevent_is_pending())
855 process_hvlpevents();
856 }
857 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
858}
859
860#ifdef CONFIG_PROC_FS
861static int mf_cmdline_proc_show(struct seq_file *m, void *v)
862{
863 char *page, *p;
864 struct vsp_cmd_data vsp_cmd;
865 int rc;
866 dma_addr_t dma_addr;
867
868 /* The HV appears to return no more than 256 bytes of command line */
869 page = kmalloc(256, GFP_KERNEL);
870 if (!page)
871 return -ENOMEM;
872
873 dma_addr = iseries_hv_map(page, 256, DMA_FROM_DEVICE);
874 if (dma_addr == DMA_ERROR_CODE) {
875 kfree(page);
876 return -ENOMEM;
877 }
878 memset(page, 0, 256);
879 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
880 vsp_cmd.cmd = 33;
881 vsp_cmd.sub_data.kern.token = dma_addr;
882 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
883 vsp_cmd.sub_data.kern.side = (u64)m->private;
884 vsp_cmd.sub_data.kern.length = 256;
885 mb();
886 rc = signal_vsp_instruction(&vsp_cmd);
887 iseries_hv_unmap(dma_addr, 256, DMA_FROM_DEVICE);
888 if (rc) {
889 kfree(page);
890 return rc;
891 }
892 if (vsp_cmd.result_code != 0) {
893 kfree(page);
894 return -ENOMEM;
895 }
896 p = page;
897 while (p - page < 256) {
898 if (*p == '\0' || *p == '\n') {
899 *p = '\n';
900 break;
901 }
902 p++;
903
904 }
905 seq_write(m, page, p - page);
906 kfree(page);
907 return 0;
908}
909
910static int mf_cmdline_proc_open(struct inode *inode, struct file *file)
911{
912 return single_open(file, mf_cmdline_proc_show, PDE(inode)->data);
913}
914
915#if 0
916static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
917{
918 struct vsp_cmd_data vsp_cmd;
919 int rc;
920 int len = *size;
921 dma_addr_t dma_addr;
922
923 dma_addr = iseries_hv_map(buffer, len, DMA_FROM_DEVICE);
924 memset(buffer, 0, len);
925 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
926 vsp_cmd.cmd = 32;
927 vsp_cmd.sub_data.kern.token = dma_addr;
928 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
929 vsp_cmd.sub_data.kern.side = side;
930 vsp_cmd.sub_data.kern.offset = offset;
931 vsp_cmd.sub_data.kern.length = len;
932 mb();
933 rc = signal_vsp_instruction(&vsp_cmd);
934 if (rc == 0) {
935 if (vsp_cmd.result_code == 0)
936 *size = vsp_cmd.sub_data.length_out;
937 else
938 rc = -ENOMEM;
939 }
940
941 iseries_hv_unmap(dma_addr, len, DMA_FROM_DEVICE);
942
943 return rc;
944}
945
946static int proc_mf_dump_vmlinux(char *page, char **start, off_t off,
947 int count, int *eof, void *data)
948{
949 int sizeToGet = count;
950
951 if (!capable(CAP_SYS_ADMIN))
952 return -EACCES;
953
954 if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) {
955 if (sizeToGet != 0) {
956 *start = page + off;
957 return sizeToGet;
958 }
959 *eof = 1;
960 return 0;
961 }
962 *eof = 1;
963 return 0;
964}
965#endif
966
967static int mf_side_proc_show(struct seq_file *m, void *v)
968{
969 char mf_current_side = ' ';
970 struct vsp_cmd_data vsp_cmd;
971
972 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
973 vsp_cmd.cmd = 2;
974 vsp_cmd.sub_data.ipl_type = 0;
975 mb();
976
977 if (signal_vsp_instruction(&vsp_cmd) == 0) {
978 if (vsp_cmd.result_code == 0) {
979 switch (vsp_cmd.sub_data.ipl_type) {
980 case 0: mf_current_side = 'A';
981 break;
982 case 1: mf_current_side = 'B';
983 break;
984 case 2: mf_current_side = 'C';
985 break;
986 default: mf_current_side = 'D';
987 break;
988 }
989 }
990 }
991
992 seq_printf(m, "%c\n", mf_current_side);
993 return 0;
994}
995
996static int mf_side_proc_open(struct inode *inode, struct file *file)
997{
998 return single_open(file, mf_side_proc_show, NULL);
999}
1000
1001static ssize_t mf_side_proc_write(struct file *file, const char __user *buffer,
1002 size_t count, loff_t *pos)
1003{
1004 char side;
1005 u64 newSide;
1006 struct vsp_cmd_data vsp_cmd;
1007
1008 if (!capable(CAP_SYS_ADMIN))
1009 return -EACCES;
1010
1011 if (count == 0)
1012 return 0;
1013
1014 if (get_user(side, buffer))
1015 return -EFAULT;
1016
1017 switch (side) {
1018 case 'A': newSide = 0;
1019 break;
1020 case 'B': newSide = 1;
1021 break;
1022 case 'C': newSide = 2;
1023 break;
1024 case 'D': newSide = 3;
1025 break;
1026 default:
1027 printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
1028 return -EINVAL;
1029 }
1030
1031 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1032 vsp_cmd.sub_data.ipl_type = newSide;
1033 vsp_cmd.cmd = 10;
1034
1035 (void)signal_vsp_instruction(&vsp_cmd);
1036
1037 return count;
1038}
1039
1040static const struct file_operations mf_side_proc_fops = {
1041 .owner = THIS_MODULE,
1042 .open = mf_side_proc_open,
1043 .read = seq_read,
1044 .llseek = seq_lseek,
1045 .release = single_release,
1046 .write = mf_side_proc_write,
1047};
1048
1049static int mf_src_proc_show(struct seq_file *m, void *v)
1050{
1051 return 0;
1052}
1053
1054static int mf_src_proc_open(struct inode *inode, struct file *file)
1055{
1056 return single_open(file, mf_src_proc_show, NULL);
1057}
1058
1059static ssize_t mf_src_proc_write(struct file *file, const char __user *buffer,
1060 size_t count, loff_t *pos)
1061{
1062 char stkbuf[10];
1063
1064 if (!capable(CAP_SYS_ADMIN))
1065 return -EACCES;
1066
1067 if ((count < 4) && (count != 1)) {
1068 printk(KERN_ERR "mf_proc: invalid src\n");
1069 return -EINVAL;
1070 }
1071
1072 if (count > (sizeof(stkbuf) - 1))
1073 count = sizeof(stkbuf) - 1;
1074 if (copy_from_user(stkbuf, buffer, count))
1075 return -EFAULT;
1076
1077 if ((count == 1) && (*stkbuf == '\0'))
1078 mf_clear_src();
1079 else
1080 mf_display_src(*(u32 *)stkbuf);
1081
1082 return count;
1083}
1084
1085static const struct file_operations mf_src_proc_fops = {
1086 .owner = THIS_MODULE,
1087 .open = mf_src_proc_open,
1088 .read = seq_read,
1089 .llseek = seq_lseek,
1090 .release = single_release,
1091 .write = mf_src_proc_write,
1092};
1093
1094static ssize_t mf_cmdline_proc_write(struct file *file, const char __user *buffer,
1095 size_t count, loff_t *pos)
1096{
1097 void *data = PDE(file->f_path.dentry->d_inode)->data;
1098 struct vsp_cmd_data vsp_cmd;
1099 dma_addr_t dma_addr;
1100 char *page;
1101 int ret = -EACCES;
1102
1103 if (!capable(CAP_SYS_ADMIN))
1104 goto out;
1105
1106 dma_addr = 0;
1107 page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
1108 ret = -ENOMEM;
1109 if (page == NULL)
1110 goto out;
1111
1112 ret = -EFAULT;
1113 if (copy_from_user(page, buffer, count))
1114 goto out_free;
1115
1116 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1117 vsp_cmd.cmd = 31;
1118 vsp_cmd.sub_data.kern.token = dma_addr;
1119 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1120 vsp_cmd.sub_data.kern.side = (u64)data;
1121 vsp_cmd.sub_data.kern.length = count;
1122 mb();
1123 (void)signal_vsp_instruction(&vsp_cmd);
1124 ret = count;
1125
1126out_free:
1127 iseries_hv_free(count, page, dma_addr);
1128out:
1129 return ret;
1130}
1131
1132static const struct file_operations mf_cmdline_proc_fops = {
1133 .owner = THIS_MODULE,
1134 .open = mf_cmdline_proc_open,
1135 .read = seq_read,
1136 .llseek = seq_lseek,
1137 .release = single_release,
1138 .write = mf_cmdline_proc_write,
1139};
1140
1141static ssize_t proc_mf_change_vmlinux(struct file *file,
1142 const char __user *buf,
1143 size_t count, loff_t *ppos)
1144{
1145 struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
1146 ssize_t rc;
1147 dma_addr_t dma_addr;
1148 char *page;
1149 struct vsp_cmd_data vsp_cmd;
1150
1151 rc = -EACCES;
1152 if (!capable(CAP_SYS_ADMIN))
1153 goto out;
1154
1155 dma_addr = 0;
1156 page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
1157 rc = -ENOMEM;
1158 if (page == NULL) {
1159 printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
1160 goto out;
1161 }
1162 rc = -EFAULT;
1163 if (copy_from_user(page, buf, count))
1164 goto out_free;
1165
1166 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1167 vsp_cmd.cmd = 30;
1168 vsp_cmd.sub_data.kern.token = dma_addr;
1169 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1170 vsp_cmd.sub_data.kern.side = (u64)dp->data;
1171 vsp_cmd.sub_data.kern.offset = *ppos;
1172 vsp_cmd.sub_data.kern.length = count;
1173 mb();
1174 rc = signal_vsp_instruction(&vsp_cmd);
1175 if (rc)
1176 goto out_free;
1177 rc = -ENOMEM;
1178 if (vsp_cmd.result_code != 0)
1179 goto out_free;
1180
1181 *ppos += count;
1182 rc = count;
1183out_free:
1184 iseries_hv_free(count, page, dma_addr);
1185out:
1186 return rc;
1187}
1188
1189static const struct file_operations proc_vmlinux_operations = {
1190 .write = proc_mf_change_vmlinux,
1191 .llseek = default_llseek,
1192};
1193
1194static int __init mf_proc_init(void)
1195{
1196 struct proc_dir_entry *mf_proc_root;
1197 struct proc_dir_entry *ent;
1198 struct proc_dir_entry *mf;
1199 char name[2];
1200 int i;
1201
1202 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1203 return 0;
1204
1205 mf_proc_root = proc_mkdir("iSeries/mf", NULL);
1206 if (!mf_proc_root)
1207 return 1;
1208
1209 name[1] = '\0';
1210 for (i = 0; i < 4; i++) {
1211 name[0] = 'A' + i;
1212 mf = proc_mkdir(name, mf_proc_root);
1213 if (!mf)
1214 return 1;
1215
1216 ent = proc_create_data("cmdline", S_IRUSR|S_IWUSR, mf,
1217 &mf_cmdline_proc_fops, (void *)(long)i);
1218 if (!ent)
1219 return 1;
1220
1221 if (i == 3) /* no vmlinux entry for 'D' */
1222 continue;
1223
1224 ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf,
1225 &proc_vmlinux_operations,
1226 (void *)(long)i);
1227 if (!ent)
1228 return 1;
1229 }
1230
1231 ent = proc_create("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
1232 &mf_side_proc_fops);
1233 if (!ent)
1234 return 1;
1235
1236 ent = proc_create("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
1237 &mf_src_proc_fops);
1238 if (!ent)
1239 return 1;
1240
1241 return 0;
1242}
1243
1244__initcall(mf_proc_init);
1245
1246#endif /* CONFIG_PROC_FS */
1247
1248/*
1249 * Get the RTC from the virtual service processor
1250 * This requires flowing LpEvents to the primary partition
1251 */
1252void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
1253{
1254 mf_get_rtc(rtc_tm);
1255 rtc_tm->tm_mon--;
1256}
1257
1258/*
1259 * Set the RTC in the virtual service processor
1260 * This requires flowing LpEvents to the primary partition
1261 */
1262int iSeries_set_rtc_time(struct rtc_time *tm)
1263{
1264 mf_set_rtc(tm);
1265 return 0;
1266}
1267
1268unsigned long iSeries_get_boot_time(void)
1269{
1270 struct rtc_time tm;
1271
1272 mf_get_boot_rtc(&tm);
1273 return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
1274 tm.tm_hour, tm.tm_min, tm.tm_sec);
1275}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
deleted file mode 100644
index 2c6ff0fdac98..000000000000
--- a/arch/powerpc/platforms/iseries/misc.S
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-2005 IBM Corp
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/processor.h>
17#include <asm/asm-offsets.h>
18#include <asm/ppc_asm.h>
19
20 .text
21
22/* Handle pending interrupts in interrupt context */
23_GLOBAL(iseries_handle_interrupts)
24 li r0,0x5555
25 sc
26 blr
diff --git a/arch/powerpc/platforms/iseries/naca.h b/arch/powerpc/platforms/iseries/naca.h
deleted file mode 100644
index f01708e12862..000000000000
--- a/arch/powerpc/platforms/iseries/naca.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _PLATFORMS_ISERIES_NACA_H
2#define _PLATFORMS_ISERIES_NACA_H
3
4/*
5 * c 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/types.h>
14
15struct naca_struct {
16 /* Kernel only data - undefined for user space */
17 const void *xItVpdAreas; /* VPD Data 0x00 */
18 void *xRamDisk; /* iSeries ramdisk 0x08 */
19 u64 xRamDiskSize; /* In pages 0x10 */
20};
21
22extern struct naca_struct naca;
23
24#endif /* _PLATFORMS_ISERIES_NACA_H */
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
deleted file mode 100644
index c75412884625..000000000000
--- a/arch/powerpc/platforms/iseries/pci.c
+++ /dev/null
@@ -1,919 +0,0 @@
1/*
2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
3 * Copyright (C) 2005,2007 Stephen Rothwell, IBM Corp
4 *
5 * iSeries specific routines for PCI.
6 *
7 * Based on code from pci.c and iSeries_pci.c 32bit
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#undef DEBUG
25
26#include <linux/jiffies.h>
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/string.h>
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/pci.h>
33#include <linux/of.h>
34#include <linux/ratelimit.h>
35
36#include <asm/types.h>
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/prom.h>
40#include <asm/machdep.h>
41#include <asm/pci-bridge.h>
42#include <asm/iommu.h>
43#include <asm/abs_addr.h>
44#include <asm/firmware.h>
45
46#include <asm/iseries/hv_types.h>
47#include <asm/iseries/hv_call_xm.h>
48#include <asm/iseries/mf.h>
49#include <asm/iseries/iommu.h>
50
51#include <asm/ppc-pci.h>
52
53#include "irq.h"
54#include "pci.h"
55#include "call_pci.h"
56
57#define PCI_RETRY_MAX 3
58static int limit_pci_retries = 1; /* Set Retry Error on. */
59
60/*
61 * Table defines
62 * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
63 */
64#define IOMM_TABLE_MAX_ENTRIES 1024
65#define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
66#define BASE_IO_MEMORY 0xE000000000000000UL
67#define END_IO_MEMORY 0xEFFFFFFFFFFFFFFFUL
68
69static unsigned long max_io_memory = BASE_IO_MEMORY;
70static long current_iomm_table_entry;
71
72/*
73 * Lookup Tables.
74 */
75static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
76static u64 ds_addr_table[IOMM_TABLE_MAX_ENTRIES];
77
78static DEFINE_SPINLOCK(iomm_table_lock);
79
80/*
81 * Generate a Direct Select Address for the Hypervisor
82 */
83static inline u64 iseries_ds_addr(struct device_node *node)
84{
85 struct pci_dn *pdn = PCI_DN(node);
86 const u32 *sbp = of_get_property(node, "linux,subbus", NULL);
87
88 return ((u64)pdn->busno << 48) + ((u64)(sbp ? *sbp : 0) << 40)
89 + ((u64)0x10 << 32);
90}
91
92/*
93 * Size of Bus VPD data
94 */
95#define BUS_VPDSIZE 1024
96
97/*
98 * Bus Vpd Tags
99 */
100#define VPD_END_OF_AREA 0x79
101#define VPD_ID_STRING 0x82
102#define VPD_VENDOR_AREA 0x84
103
104/*
105 * Mfg Area Tags
106 */
107#define VPD_FRU_FRAME_ID 0x4649 /* "FI" */
108#define VPD_SLOT_MAP_FORMAT 0x4D46 /* "MF" */
109#define VPD_SLOT_MAP 0x534D /* "SM" */
110
111/*
112 * Structures of the areas
113 */
114struct mfg_vpd_area {
115 u16 tag;
116 u8 length;
117 u8 data1;
118 u8 data2;
119};
120#define MFG_ENTRY_SIZE 3
121
122struct slot_map {
123 u8 agent;
124 u8 secondary_agent;
125 u8 phb;
126 char card_location[3];
127 char parms[8];
128 char reserved[2];
129};
130#define SLOT_ENTRY_SIZE 16
131
132/*
133 * Parse the Slot Area
134 */
135static void __init iseries_parse_slot_area(struct slot_map *map, int len,
136 HvAgentId agent, u8 *phb, char card[4])
137{
138 /*
139 * Parse Slot label until we find the one requested
140 */
141 while (len > 0) {
142 if (map->agent == agent) {
143 /*
144 * If Phb wasn't found, grab the entry first one found.
145 */
146 if (*phb == 0xff)
147 *phb = map->phb;
148 /* Found it, extract the data. */
149 if (map->phb == *phb) {
150 memcpy(card, &map->card_location, 3);
151 card[3] = 0;
152 break;
153 }
154 }
155 /* Point to the next Slot */
156 map = (struct slot_map *)((char *)map + SLOT_ENTRY_SIZE);
157 len -= SLOT_ENTRY_SIZE;
158 }
159}
160
161/*
162 * Parse the Mfg Area
163 */
164static void __init iseries_parse_mfg_area(struct mfg_vpd_area *area, int len,
165 HvAgentId agent, u8 *phb, u8 *frame, char card[4])
166{
167 u16 slot_map_fmt = 0;
168
169 /* Parse Mfg Data */
170 while (len > 0) {
171 int mfg_tag_len = area->length;
172 /* Frame ID (FI 4649020310 ) */
173 if (area->tag == VPD_FRU_FRAME_ID)
174 *frame = area->data1;
175 /* Slot Map Format (MF 4D46020004 ) */
176 else if (area->tag == VPD_SLOT_MAP_FORMAT)
177 slot_map_fmt = (area->data1 * 256)
178 + area->data2;
179 /* Slot Map (SM 534D90 */
180 else if (area->tag == VPD_SLOT_MAP) {
181 struct slot_map *slot_map;
182
183 if (slot_map_fmt == 0x1004)
184 slot_map = (struct slot_map *)((char *)area
185 + MFG_ENTRY_SIZE + 1);
186 else
187 slot_map = (struct slot_map *)((char *)area
188 + MFG_ENTRY_SIZE);
189 iseries_parse_slot_area(slot_map, mfg_tag_len,
190 agent, phb, card);
191 }
192 /*
193 * Point to the next Mfg Area
194 * Use defined size, sizeof give wrong answer
195 */
196 area = (struct mfg_vpd_area *)((char *)area + mfg_tag_len
197 + MFG_ENTRY_SIZE);
198 len -= (mfg_tag_len + MFG_ENTRY_SIZE);
199 }
200}
201
202/*
203 * Look for "BUS".. Data is not Null terminated.
204 * PHBID of 0xFF indicates PHB was not found in VPD Data.
205 */
206static u8 __init iseries_parse_phbid(u8 *area, int len)
207{
208 while (len > 0) {
209 if ((*area == 'B') && (*(area + 1) == 'U')
210 && (*(area + 2) == 'S')) {
211 area += 3;
212 while (*area == ' ')
213 area++;
214 return *area & 0x0F;
215 }
216 area++;
217 len--;
218 }
219 return 0xff;
220}
221
222/*
223 * Parse out the VPD Areas
224 */
225static void __init iseries_parse_vpd(u8 *data, int data_len,
226 HvAgentId agent, u8 *frame, char card[4])
227{
228 u8 phb = 0xff;
229
230 while (data_len > 0) {
231 int len;
232 u8 tag = *data;
233
234 if (tag == VPD_END_OF_AREA)
235 break;
236 len = *(data + 1) + (*(data + 2) * 256);
237 data += 3;
238 data_len -= 3;
239 if (tag == VPD_ID_STRING)
240 phb = iseries_parse_phbid(data, len);
241 else if (tag == VPD_VENDOR_AREA)
242 iseries_parse_mfg_area((struct mfg_vpd_area *)data, len,
243 agent, &phb, frame, card);
244 /* Point to next Area. */
245 data += len;
246 data_len -= len;
247 }
248}
249
250static int __init iseries_get_location_code(u16 bus, HvAgentId agent,
251 u8 *frame, char card[4])
252{
253 int status = 0;
254 int bus_vpd_len = 0;
255 u8 *bus_vpd = kmalloc(BUS_VPDSIZE, GFP_KERNEL);
256
257 if (bus_vpd == NULL) {
258 printk("PCI: Bus VPD Buffer allocation failure.\n");
259 return 0;
260 }
261 bus_vpd_len = HvCallPci_getBusVpd(bus, iseries_hv_addr(bus_vpd),
262 BUS_VPDSIZE);
263 if (bus_vpd_len == 0) {
264 printk("PCI: Bus VPD Buffer zero length.\n");
265 goto out_free;
266 }
267 /* printk("PCI: bus_vpd: %p, %d\n",bus_vpd, bus_vpd_len); */
268 /* Make sure this is what I think it is */
269 if (*bus_vpd != VPD_ID_STRING) {
270 printk("PCI: Bus VPD Buffer missing starting tag.\n");
271 goto out_free;
272 }
273 iseries_parse_vpd(bus_vpd, bus_vpd_len, agent, frame, card);
274 status = 1;
275out_free:
276 kfree(bus_vpd);
277 return status;
278}
279
280/*
281 * Prints the device information.
282 * - Pass in pci_dev* pointer to the device.
283 * - Pass in the device count
284 *
285 * Format:
286 * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet
287 * controller
288 */
289static void __init iseries_device_information(struct pci_dev *pdev,
290 u16 bus, HvSubBusNumber subbus)
291{
292 u8 frame = 0;
293 char card[4];
294 HvAgentId agent;
295
296 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
297 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
298
299 if (iseries_get_location_code(bus, agent, &frame, card)) {
300 printk(KERN_INFO "PCI: %s, Vendor %04X Frame%3d, "
301 "Card %4s 0x%04X\n", pci_name(pdev), pdev->vendor,
302 frame, card, (int)(pdev->class >> 8));
303 }
304}
305
306/*
307 * iomm_table_allocate_entry
308 *
309 * Adds pci_dev entry in address translation table
310 *
311 * - Allocates the number of entries required in table base on BAR
312 * size.
313 * - Allocates starting at BASE_IO_MEMORY and increases.
314 * - The size is round up to be a multiple of entry size.
315 * - CurrentIndex is incremented to keep track of the last entry.
316 * - Builds the resource entry for allocated BARs.
317 */
318static void __init iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
319{
320 struct resource *bar_res = &dev->resource[bar_num];
321 long bar_size = pci_resource_len(dev, bar_num);
322 struct device_node *dn = pci_device_to_OF_node(dev);
323
324 /*
325 * No space to allocate, quick exit, skip Allocation.
326 */
327 if (bar_size == 0)
328 return;
329 /*
330 * Set Resource values.
331 */
332 spin_lock(&iomm_table_lock);
333 bar_res->start = BASE_IO_MEMORY +
334 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
335 bar_res->end = bar_res->start + bar_size - 1;
336 /*
337 * Allocate the number of table entries needed for BAR.
338 */
339 while (bar_size > 0 ) {
340 iomm_table[current_iomm_table_entry] = dn;
341 ds_addr_table[current_iomm_table_entry] =
342 iseries_ds_addr(dn) | (bar_num << 24);
343 bar_size -= IOMM_TABLE_ENTRY_SIZE;
344 ++current_iomm_table_entry;
345 }
346 max_io_memory = BASE_IO_MEMORY +
347 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
348 spin_unlock(&iomm_table_lock);
349}
350
351/*
352 * allocate_device_bars
353 *
354 * - Allocates ALL pci_dev BAR's and updates the resources with the
355 * BAR value. BARS with zero length will have the resources
356 * The HvCallPci_getBarParms is used to get the size of the BAR
357 * space. It calls iomm_table_allocate_entry to allocate
358 * each entry.
359 * - Loops through The Bar resources(0 - 5) including the ROM
360 * is resource(6).
361 */
362static void __init allocate_device_bars(struct pci_dev *dev)
363{
364 int bar_num;
365
366 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
367 iomm_table_allocate_entry(dev, bar_num);
368}
369
370/*
371 * Log error information to system console.
372 * Filter out the device not there errors.
373 * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
374 * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
375 * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
376 */
377static void pci_log_error(char *error, int bus, int subbus,
378 int agent, int hv_res)
379{
380 if (hv_res == 0x0302)
381 return;
382 printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
383 error, bus, subbus, agent, hv_res);
384}
385
386/*
387 * Look down the chain to find the matching Device Device
388 */
389static struct device_node *find_device_node(int bus, int devfn)
390{
391 struct device_node *node;
392
393 for (node = NULL; (node = of_find_all_nodes(node)); ) {
394 struct pci_dn *pdn = PCI_DN(node);
395
396 if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
397 return node;
398 }
399 return NULL;
400}
401
402/*
403 * iSeries_pcibios_fixup_resources
404 *
405 * Fixes up all resources for devices
406 */
407void __init iSeries_pcibios_fixup_resources(struct pci_dev *pdev)
408{
409 const u32 *agent;
410 const u32 *sub_bus;
411 unsigned char bus = pdev->bus->number;
412 struct device_node *node;
413 int i;
414
415 node = pci_device_to_OF_node(pdev);
416 pr_debug("PCI: iSeries %s, pdev %p, node %p\n",
417 pci_name(pdev), pdev, node);
418 if (!node) {
419 printk("PCI: %s disabled, device tree entry not found !\n",
420 pci_name(pdev));
421 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
422 pdev->resource[i].flags = 0;
423 return;
424 }
425 sub_bus = of_get_property(node, "linux,subbus", NULL);
426 agent = of_get_property(node, "linux,agent-id", NULL);
427 if (agent && sub_bus) {
428 u8 irq = iSeries_allocate_IRQ(bus, 0, *sub_bus);
429 int err;
430
431 err = HvCallXm_connectBusUnit(bus, *sub_bus, *agent, irq);
432 if (err)
433 pci_log_error("Connect Bus Unit",
434 bus, *sub_bus, *agent, err);
435 else {
436 err = HvCallPci_configStore8(bus, *sub_bus,
437 *agent, PCI_INTERRUPT_LINE, irq);
438 if (err)
439 pci_log_error("PciCfgStore Irq Failed!",
440 bus, *sub_bus, *agent, err);
441 else
442 pdev->irq = irq;
443 }
444 }
445
446 allocate_device_bars(pdev);
447 if (likely(sub_bus))
448 iseries_device_information(pdev, bus, *sub_bus);
449 else
450 printk(KERN_ERR "PCI: Device node %s has missing or invalid "
451 "linux,subbus property\n", node->full_name);
452}
453
454/*
455 * iSeries_pci_final_fixup(void)
456 */
457void __init iSeries_pci_final_fixup(void)
458{
459 /* Fix up at the device node and pci_dev relationship */
460 mf_display_src(0xC9000100);
461 iSeries_activate_IRQs();
462 mf_display_src(0xC9000200);
463}
464
465/*
466 * Config space read and write functions.
467 * For now at least, we look for the device node for the bus and devfn
468 * that we are asked to access. It may be possible to translate the devfn
469 * to a subbus and deviceid more directly.
470 */
471static u64 hv_cfg_read_func[4] = {
472 HvCallPciConfigLoad8, HvCallPciConfigLoad16,
473 HvCallPciConfigLoad32, HvCallPciConfigLoad32
474};
475
476static u64 hv_cfg_write_func[4] = {
477 HvCallPciConfigStore8, HvCallPciConfigStore16,
478 HvCallPciConfigStore32, HvCallPciConfigStore32
479};
480
481/*
482 * Read PCI config space
483 */
484static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
485 int offset, int size, u32 *val)
486{
487 struct device_node *node = find_device_node(bus->number, devfn);
488 u64 fn;
489 struct HvCallPci_LoadReturn ret;
490
491 if (node == NULL)
492 return PCIBIOS_DEVICE_NOT_FOUND;
493 if (offset > 255) {
494 *val = ~0;
495 return PCIBIOS_BAD_REGISTER_NUMBER;
496 }
497
498 fn = hv_cfg_read_func[(size - 1) & 3];
499 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
500
501 if (ret.rc != 0) {
502 *val = ~0;
503 return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
504 }
505
506 *val = ret.value;
507 return 0;
508}
509
510/*
511 * Write PCI config space
512 */
513
514static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
515 int offset, int size, u32 val)
516{
517 struct device_node *node = find_device_node(bus->number, devfn);
518 u64 fn;
519 u64 ret;
520
521 if (node == NULL)
522 return PCIBIOS_DEVICE_NOT_FOUND;
523 if (offset > 255)
524 return PCIBIOS_BAD_REGISTER_NUMBER;
525
526 fn = hv_cfg_write_func[(size - 1) & 3];
527 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
528
529 if (ret != 0)
530 return PCIBIOS_DEVICE_NOT_FOUND;
531
532 return 0;
533}
534
535static struct pci_ops iSeries_pci_ops = {
536 .read = iSeries_pci_read_config,
537 .write = iSeries_pci_write_config
538};
539
540/*
541 * Check Return Code
542 * -> On Failure, print and log information.
543 * Increment Retry Count, if exceeds max, panic partition.
544 *
545 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
546 * PCI: Device 23.90 ReadL Retry( 1)
547 * PCI: Device 23.90 ReadL Retry Successful(1)
548 */
549static int check_return_code(char *type, struct device_node *dn,
550 int *retry, u64 ret)
551{
552 if (ret != 0) {
553 struct pci_dn *pdn = PCI_DN(dn);
554
555 (*retry)++;
556 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
557 type, pdn->busno, pdn->devfn,
558 *retry, (int)ret);
559 /*
560 * Bump the retry and check for retry count exceeded.
561 * If, Exceeded, panic the system.
562 */
563 if (((*retry) > PCI_RETRY_MAX) &&
564 (limit_pci_retries > 0)) {
565 mf_display_src(0xB6000103);
566 panic_timeout = 0;
567 panic("PCI: Hardware I/O Error, SRC B6000103, "
568 "Automatic Reboot Disabled.\n");
569 }
570 return -1; /* Retry Try */
571 }
572 return 0;
573}
574
575/*
576 * Translate the I/O Address into a device node, bar, and bar offset.
577 * Note: Make sure the passed variable end up on the stack to avoid
578 * the exposure of being device global.
579 */
580static inline struct device_node *xlate_iomm_address(
581 const volatile void __iomem *addr,
582 u64 *dsaptr, u64 *bar_offset, const char *func)
583{
584 unsigned long orig_addr;
585 unsigned long base_addr;
586 unsigned long ind;
587 struct device_node *dn;
588
589 orig_addr = (unsigned long __force)addr;
590 if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
591 static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
592
593 if (__ratelimit(&ratelimit))
594 printk(KERN_ERR
595 "iSeries_%s: invalid access at IO address %p\n",
596 func, addr);
597 return NULL;
598 }
599 base_addr = orig_addr - BASE_IO_MEMORY;
600 ind = base_addr / IOMM_TABLE_ENTRY_SIZE;
601 dn = iomm_table[ind];
602
603 if (dn != NULL) {
604 *dsaptr = ds_addr_table[ind];
605 *bar_offset = base_addr % IOMM_TABLE_ENTRY_SIZE;
606 } else
607 panic("PCI: Invalid PCI IO address detected!\n");
608 return dn;
609}
610
611/*
612 * Read MM I/O Instructions for the iSeries
613 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
614 * else, data is returned in Big Endian format.
615 */
616static u8 iseries_readb(const volatile void __iomem *addr)
617{
618 u64 bar_offset;
619 u64 dsa;
620 int retry = 0;
621 struct HvCallPci_LoadReturn ret;
622 struct device_node *dn =
623 xlate_iomm_address(addr, &dsa, &bar_offset, "read_byte");
624
625 if (dn == NULL)
626 return 0xff;
627 do {
628 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, bar_offset, 0);
629 } while (check_return_code("RDB", dn, &retry, ret.rc) != 0);
630
631 return ret.value;
632}
633
634static u16 iseries_readw_be(const volatile void __iomem *addr)
635{
636 u64 bar_offset;
637 u64 dsa;
638 int retry = 0;
639 struct HvCallPci_LoadReturn ret;
640 struct device_node *dn =
641 xlate_iomm_address(addr, &dsa, &bar_offset, "read_word");
642
643 if (dn == NULL)
644 return 0xffff;
645 do {
646 HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
647 bar_offset, 0);
648 } while (check_return_code("RDW", dn, &retry, ret.rc) != 0);
649
650 return ret.value;
651}
652
653static u32 iseries_readl_be(const volatile void __iomem *addr)
654{
655 u64 bar_offset;
656 u64 dsa;
657 int retry = 0;
658 struct HvCallPci_LoadReturn ret;
659 struct device_node *dn =
660 xlate_iomm_address(addr, &dsa, &bar_offset, "read_long");
661
662 if (dn == NULL)
663 return 0xffffffff;
664 do {
665 HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
666 bar_offset, 0);
667 } while (check_return_code("RDL", dn, &retry, ret.rc) != 0);
668
669 return ret.value;
670}
671
672/*
673 * Write MM I/O Instructions for the iSeries
674 *
675 */
676static void iseries_writeb(u8 data, volatile void __iomem *addr)
677{
678 u64 bar_offset;
679 u64 dsa;
680 int retry = 0;
681 u64 rc;
682 struct device_node *dn =
683 xlate_iomm_address(addr, &dsa, &bar_offset, "write_byte");
684
685 if (dn == NULL)
686 return;
687 do {
688 rc = HvCall4(HvCallPciBarStore8, dsa, bar_offset, data, 0);
689 } while (check_return_code("WWB", dn, &retry, rc) != 0);
690}
691
692static void iseries_writew_be(u16 data, volatile void __iomem *addr)
693{
694 u64 bar_offset;
695 u64 dsa;
696 int retry = 0;
697 u64 rc;
698 struct device_node *dn =
699 xlate_iomm_address(addr, &dsa, &bar_offset, "write_word");
700
701 if (dn == NULL)
702 return;
703 do {
704 rc = HvCall4(HvCallPciBarStore16, dsa, bar_offset, data, 0);
705 } while (check_return_code("WWW", dn, &retry, rc) != 0);
706}
707
708static void iseries_writel_be(u32 data, volatile void __iomem *addr)
709{
710 u64 bar_offset;
711 u64 dsa;
712 int retry = 0;
713 u64 rc;
714 struct device_node *dn =
715 xlate_iomm_address(addr, &dsa, &bar_offset, "write_long");
716
717 if (dn == NULL)
718 return;
719 do {
720 rc = HvCall4(HvCallPciBarStore32, dsa, bar_offset, data, 0);
721 } while (check_return_code("WWL", dn, &retry, rc) != 0);
722}
723
724static u16 iseries_readw(const volatile void __iomem *addr)
725{
726 return le16_to_cpu(iseries_readw_be(addr));
727}
728
729static u32 iseries_readl(const volatile void __iomem *addr)
730{
731 return le32_to_cpu(iseries_readl_be(addr));
732}
733
734static void iseries_writew(u16 data, volatile void __iomem *addr)
735{
736 iseries_writew_be(cpu_to_le16(data), addr);
737}
738
739static void iseries_writel(u32 data, volatile void __iomem *addr)
740{
741 iseries_writel(cpu_to_le32(data), addr);
742}
743
744static void iseries_readsb(const volatile void __iomem *addr, void *buf,
745 unsigned long count)
746{
747 u8 *dst = buf;
748 while(count-- > 0)
749 *(dst++) = iseries_readb(addr);
750}
751
752static void iseries_readsw(const volatile void __iomem *addr, void *buf,
753 unsigned long count)
754{
755 u16 *dst = buf;
756 while(count-- > 0)
757 *(dst++) = iseries_readw_be(addr);
758}
759
760static void iseries_readsl(const volatile void __iomem *addr, void *buf,
761 unsigned long count)
762{
763 u32 *dst = buf;
764 while(count-- > 0)
765 *(dst++) = iseries_readl_be(addr);
766}
767
768static void iseries_writesb(volatile void __iomem *addr, const void *buf,
769 unsigned long count)
770{
771 const u8 *src = buf;
772 while(count-- > 0)
773 iseries_writeb(*(src++), addr);
774}
775
776static void iseries_writesw(volatile void __iomem *addr, const void *buf,
777 unsigned long count)
778{
779 const u16 *src = buf;
780 while(count-- > 0)
781 iseries_writew_be(*(src++), addr);
782}
783
784static void iseries_writesl(volatile void __iomem *addr, const void *buf,
785 unsigned long count)
786{
787 const u32 *src = buf;
788 while(count-- > 0)
789 iseries_writel_be(*(src++), addr);
790}
791
792static void iseries_memset_io(volatile void __iomem *addr, int c,
793 unsigned long n)
794{
795 volatile char __iomem *d = addr;
796
797 while (n-- > 0)
798 iseries_writeb(c, d++);
799}
800
801static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
802 unsigned long n)
803{
804 char *d = dest;
805 const volatile char __iomem *s = src;
806
807 while (n-- > 0)
808 *d++ = iseries_readb(s++);
809}
810
811static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
812 unsigned long n)
813{
814 const char *s = src;
815 volatile char __iomem *d = dest;
816
817 while (n-- > 0)
818 iseries_writeb(*s++, d++);
819}
820
821/* We only set MMIO ops. The default PIO ops will be default
822 * to the MMIO ops + pci_io_base which is 0 on iSeries as
823 * expected so both should work.
824 *
825 * Note that we don't implement the readq/writeq versions as
826 * I don't know of an HV call for doing so. Thus, the default
827 * operation will be used instead, which will fault a the value
828 * return by iSeries for MMIO addresses always hits a non mapped
829 * area. This is as good as the BUG() we used to have there.
830 */
831static struct ppc_pci_io __initdata iseries_pci_io = {
832 .readb = iseries_readb,
833 .readw = iseries_readw,
834 .readl = iseries_readl,
835 .readw_be = iseries_readw_be,
836 .readl_be = iseries_readl_be,
837 .writeb = iseries_writeb,
838 .writew = iseries_writew,
839 .writel = iseries_writel,
840 .writew_be = iseries_writew_be,
841 .writel_be = iseries_writel_be,
842 .readsb = iseries_readsb,
843 .readsw = iseries_readsw,
844 .readsl = iseries_readsl,
845 .writesb = iseries_writesb,
846 .writesw = iseries_writesw,
847 .writesl = iseries_writesl,
848 .memset_io = iseries_memset_io,
849 .memcpy_fromio = iseries_memcpy_fromio,
850 .memcpy_toio = iseries_memcpy_toio,
851};
852
853/*
854 * iSeries_pcibios_init
855 *
856 * Description:
857 * This function checks for all possible system PCI host bridges that connect
858 * PCI buses. The system hypervisor is queried as to the guest partition
859 * ownership status. A pci_controller is built for any bus which is partially
860 * owned or fully owned by this guest partition.
861 */
862void __init iSeries_pcibios_init(void)
863{
864 struct pci_controller *phb;
865 struct device_node *root = of_find_node_by_path("/");
866 struct device_node *node = NULL;
867
868 /* Install IO hooks */
869 ppc_pci_io = iseries_pci_io;
870
871 pci_probe_only = 1;
872
873 /* iSeries has no IO space in the common sense, it needs to set
874 * the IO base to 0
875 */
876 pci_io_base = 0;
877
878 if (root == NULL) {
879 printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
880 "of device tree\n");
881 return;
882 }
883 while ((node = of_get_next_child(root, node)) != NULL) {
884 HvBusNumber bus;
885 const u32 *busp;
886
887 if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
888 continue;
889
890 busp = of_get_property(node, "bus-range", NULL);
891 if (busp == NULL)
892 continue;
893 bus = *busp;
894 printk("bus %d appears to exist\n", bus);
895 phb = pcibios_alloc_controller(node);
896 if (phb == NULL)
897 continue;
898 /* All legacy iSeries PHBs are in domain zero */
899 phb->global_number = 0;
900
901 phb->first_busno = bus;
902 phb->last_busno = bus;
903 phb->ops = &iSeries_pci_ops;
904 phb->io_base_virt = (void __iomem *)_IO_BASE;
905 phb->io_resource.flags = IORESOURCE_IO;
906 phb->io_resource.start = BASE_IO_MEMORY;
907 phb->io_resource.end = END_IO_MEMORY;
908 phb->io_resource.name = "iSeries PCI IO";
909 phb->mem_resources[0].flags = IORESOURCE_MEM;
910 phb->mem_resources[0].start = BASE_IO_MEMORY;
911 phb->mem_resources[0].end = END_IO_MEMORY;
912 phb->mem_resources[0].name = "Series PCI MEM";
913 }
914
915 of_node_put(root);
916
917 pci_devs_phb_init();
918}
919
diff --git a/arch/powerpc/platforms/iseries/pci.h b/arch/powerpc/platforms/iseries/pci.h
deleted file mode 100644
index d9cf974c2718..000000000000
--- a/arch/powerpc/platforms/iseries/pci.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _PLATFORMS_ISERIES_PCI_H
2#define _PLATFORMS_ISERIES_PCI_H
3
4/*
5 * Created by Allan Trautman on Tue Feb 20, 2001.
6 *
7 * Define some useful macros for the iSeries pci routines.
8 * Copyright (C) 2001 Allan H Trautman, IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the:
22 * Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330,
24 * Boston, MA 02111-1307 USA
25 *
26 * Change Activity:
27 * Created Feb 20, 2001
28 * Added device reset, March 22, 2001
29 * Ported to ppc64, May 25, 2001
30 * End Change Activity
31 */
32
33/*
34 * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
35 * For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h
36 */
37
38#define ISERIES_PCI_AGENTID(idsel, func) \
39 (((idsel & 0x0F) << 4) | (func & 0x07))
40#define ISERIES_ENCODE_DEVICE(agentid) \
41 ((0x10) | ((agentid & 0x20) >> 2) | (agentid & 0x07))
42
43#define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus) ((subbus >> 5) & 0x7)
44#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7)
45
46struct pci_dev;
47
48#ifdef CONFIG_PCI
49extern void iSeries_pcibios_init(void);
50extern void iSeries_pci_final_fixup(void);
51extern void iSeries_pcibios_fixup_resources(struct pci_dev *dev);
52#else
53static inline void iSeries_pcibios_init(void) { }
54static inline void iSeries_pci_final_fixup(void) { }
55static inline void iSeries_pcibios_fixup_resources(struct pci_dev *dev) {}
56#endif
57
58#endif /* _PLATFORMS_ISERIES_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c
deleted file mode 100644
index 06763682db47..000000000000
--- a/arch/powerpc/platforms/iseries/proc.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
22#include <linux/param.h> /* for HZ */
23#include <asm/paca.h>
24#include <asm/processor.h>
25#include <asm/time.h>
26#include <asm/lppaca.h>
27#include <asm/firmware.h>
28#include <asm/iseries/hv_call_xm.h>
29
30#include "processor_vpd.h"
31#include "main_store.h"
32
33static int __init iseries_proc_create(void)
34{
35 struct proc_dir_entry *e;
36
37 if (!firmware_has_feature(FW_FEATURE_ISERIES))
38 return 0;
39
40 e = proc_mkdir("iSeries", 0);
41 if (!e)
42 return 1;
43
44 return 0;
45}
46core_initcall(iseries_proc_create);
47
48static unsigned long startTitan = 0;
49static unsigned long startTb = 0;
50
51static int proc_titantod_show(struct seq_file *m, void *v)
52{
53 unsigned long tb0, titan_tod;
54
55 tb0 = get_tb();
56 titan_tod = HvCallXm_loadTod();
57
58 seq_printf(m, "Titan\n" );
59 seq_printf(m, " time base = %016lx\n", tb0);
60 seq_printf(m, " titan tod = %016lx\n", titan_tod);
61 seq_printf(m, " xProcFreq = %016x\n",
62 xIoHriProcessorVpd[0].xProcFreq);
63 seq_printf(m, " xTimeBaseFreq = %016x\n",
64 xIoHriProcessorVpd[0].xTimeBaseFreq);
65 seq_printf(m, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy);
66 seq_printf(m, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec);
67
68 if (!startTitan) {
69 startTitan = titan_tod;
70 startTb = tb0;
71 } else {
72 unsigned long titan_usec = (titan_tod - startTitan) >> 12;
73 unsigned long tb_ticks = (tb0 - startTb);
74 unsigned long titan_jiffies = titan_usec / (1000000/HZ);
75 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
76 unsigned long titan_jiff_rem_usec =
77 titan_usec - titan_jiff_usec;
78 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
79 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
80 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
81 unsigned long tb_jiff_rem_usec =
82 tb_jiff_rem_ticks / tb_ticks_per_usec;
83 unsigned long new_tb_ticks_per_jiffy =
84 (tb_ticks * (1000000/HZ))/titan_usec;
85
86 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
87 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
88 seq_printf(m, " titan jiffies = %lu.%04lu\n", titan_jiffies,
89 titan_jiff_rem_usec);
90 seq_printf(m, " tb jiffies = %lu.%04lu\n", tb_jiffies,
91 tb_jiff_rem_usec);
92 seq_printf(m, " new tb_ticks_per_jiffy = %lu\n",
93 new_tb_ticks_per_jiffy);
94 }
95
96 return 0;
97}
98
99static int proc_titantod_open(struct inode *inode, struct file *file)
100{
101 return single_open(file, proc_titantod_show, NULL);
102}
103
104static const struct file_operations proc_titantod_operations = {
105 .open = proc_titantod_open,
106 .read = seq_read,
107 .llseek = seq_lseek,
108 .release = single_release,
109};
110
111static int __init iseries_proc_init(void)
112{
113 if (!firmware_has_feature(FW_FEATURE_ISERIES))
114 return 0;
115
116 proc_create("iSeries/titanTod", S_IFREG|S_IRUGO, NULL,
117 &proc_titantod_operations);
118 return 0;
119}
120__initcall(iseries_proc_init);
diff --git a/arch/powerpc/platforms/iseries/processor_vpd.h b/arch/powerpc/platforms/iseries/processor_vpd.h
deleted file mode 100644
index 7ac5d0d0dbfa..000000000000
--- a/arch/powerpc/platforms/iseries/processor_vpd.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_PROCESSOR_VPD_H
19#define _ISERIES_PROCESSOR_VPD_H
20
21#include <asm/types.h>
22
23/*
24 * This struct maps Processor Vpd that is DMAd to SLIC by CSP
25 */
26struct IoHriProcessorVpd {
27 u8 xFormat; // VPD format indicator x00-x00
28 u8 xProcStatus:8; // Processor State x01-x01
29 u8 xSecondaryThreadCount; // Secondary thread cnt x02-x02
30 u8 xSrcType:1; // Src Type x03-x03
31 u8 xSrcSoft:1; // Src stay soft ...
32 u8 xSrcParable:1; // Src parable ...
33 u8 xRsvd1:5; // Reserved ...
34 u16 xHvPhysicalProcIndex; // Hypervisor physical proc index04-x05
35 u16 xRsvd2; // Reserved x06-x07
36 u32 xHwNodeId; // Hardware node id x08-x0B
37 u32 xHwProcId; // Hardware processor id x0C-x0F
38
39 u32 xTypeNum; // Card Type/CCIN number x10-x13
40 u32 xModelNum; // Model/Feature number x14-x17
41 u64 xSerialNum; // Serial number x18-x1F
42 char xPartNum[12]; // Book Part or FPU number x20-x2B
43 char xMfgID[4]; // Manufacturing ID x2C-x2F
44
45 u32 xProcFreq; // Processor Frequency x30-x33
46 u32 xTimeBaseFreq; // Time Base Frequency x34-x37
47
48 u32 xChipEcLevel; // Chip EC Levels x38-x3B
49 u32 xProcIdReg; // PIR SPR value x3C-x3F
50 u32 xPVR; // PVR value x40-x43
51 u8 xRsvd3[12]; // Reserved x44-x4F
52
53 u32 xInstCacheSize; // Instruction cache size in KB x50-x53
54 u32 xInstBlockSize; // Instruction cache block size x54-x57
55 u32 xDataCacheOperandSize; // Data cache operand size x58-x5B
56 u32 xInstCacheOperandSize; // Inst cache operand size x5C-x5F
57
58 u32 xDataL1CacheSizeKB; // L1 data cache size in KB x60-x63
59 u32 xDataL1CacheLineSize; // L1 data cache block size x64-x67
60 u64 xRsvd4; // Reserved x68-x6F
61
62 u32 xDataL2CacheSizeKB; // L2 data cache size in KB x70-x73
63 u32 xDataL2CacheLineSize; // L2 data cache block size x74-x77
64 u64 xRsvd5; // Reserved x78-x7F
65
66 u32 xDataL3CacheSizeKB; // L3 data cache size in KB x80-x83
67 u32 xDataL3CacheLineSize; // L3 data cache block size x84-x87
68 u64 xRsvd6; // Reserved x88-x8F
69
70 u64 xFruLabel; // Card Location Label x90-x97
71 u8 xSlotsOnCard; // Slots on card (0=no slots) x98-x98
72 u8 xPartLocFlag; // Location flag (0-pluggable 1-imbedded) x99-x99
73 u16 xSlotMapIndex; // Index in slot map table x9A-x9B
74 u8 xSmartCardPortNo; // Smart card port number x9C-x9C
75 u8 xRsvd7; // Reserved x9D-x9D
76 u16 xFrameIdAndRackUnit; // Frame ID and rack unit adr x9E-x9F
77
78 u8 xRsvd8[24]; // Reserved xA0-xB7
79
80 char xProcSrc[72]; // CSP format SRC xB8-xFF
81};
82
83extern struct IoHriProcessorVpd xIoHriProcessorVpd[];
84
85#endif /* _ISERIES_PROCESSOR_VPD_H */
diff --git a/arch/powerpc/platforms/iseries/release_data.h b/arch/powerpc/platforms/iseries/release_data.h
deleted file mode 100644
index 6ad7d843e8fc..000000000000
--- a/arch/powerpc/platforms/iseries/release_data.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_RELEASE_DATA_H
19#define _ISERIES_RELEASE_DATA_H
20
21/*
22 * This control block contains the critical information about the
23 * release so that it can be changed in the future (ie, the virtual
24 * address of the OS's NACA).
25 */
26#include <asm/types.h>
27#include "naca.h"
28
29/*
30 * When we IPL a secondary partition, we will check if if the
31 * secondary xMinPlicVrmIndex > the primary xVrmIndex.
32 * If it is then this tells PLIC that this secondary is not
33 * supported running on this "old" of a level of PLIC.
34 *
35 * Likewise, we will compare the primary xMinSlicVrmIndex to
36 * the secondary xVrmIndex.
37 * If the primary xMinSlicVrmDelta > secondary xVrmDelta then we
38 * know that this PLIC does not support running an OS "that old".
39 */
40
41#define HVREL_TAGSINACTIVE 0x8000
42#define HVREL_32BIT 0x4000
43#define HVREL_NOSHAREDPROCS 0x2000
44#define HVREL_NOHMT 0x1000
45
46struct HvReleaseData {
47 u32 xDesc; /* Descriptor "HvRD" ebcdic x00-x03 */
48 u16 xSize; /* Size of this control block x04-x05 */
49 u16 xVpdAreasPtrOffset; /* Offset in NACA of ItVpdAreas x06-x07 */
50 struct naca_struct *xSlicNacaAddr; /* Virt addr of SLIC NACA x08-x0F */
51 u32 xMsNucDataOffset; /* Offset of Linux Mapping Data x10-x13 */
52 u32 xRsvd1; /* Reserved x14-x17 */
53 u16 xFlags;
54 u16 xVrmIndex; /* VRM Index of OS image x1A-x1B */
55 u16 xMinSupportedPlicVrmIndex; /* Min PLIC level (soft) x1C-x1D */
56 u16 xMinCompatablePlicVrmIndex; /* Min PLIC levelP (hard) x1E-x1F */
57 char xVrmName[12]; /* Displayable name x20-x2B */
58 char xRsvd3[20]; /* Reserved x2C-x3F */
59};
60
61extern const struct HvReleaseData hvReleaseData;
62
63#endif /* _ISERIES_RELEASE_DATA_H */
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
deleted file mode 100644
index 8fc62586a973..000000000000
--- a/arch/powerpc/platforms/iseries/setup.c
+++ /dev/null
@@ -1,722 +0,0 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Description:
6 * Architecture- / platform-specific boot-time initialization code for
7 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
8 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
9 * <dan@net4x.com>.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#undef DEBUG
18
19#include <linux/init.h>
20#include <linux/threads.h>
21#include <linux/smp.h>
22#include <linux/param.h>
23#include <linux/string.h>
24#include <linux/export.h>
25#include <linux/seq_file.h>
26#include <linux/kdev_t.h>
27#include <linux/kexec.h>
28#include <linux/major.h>
29#include <linux/root_dev.h>
30#include <linux/kernel.h>
31#include <linux/hrtimer.h>
32#include <linux/tick.h>
33
34#include <asm/processor.h>
35#include <asm/machdep.h>
36#include <asm/page.h>
37#include <asm/mmu.h>
38#include <asm/pgtable.h>
39#include <asm/mmu_context.h>
40#include <asm/cputable.h>
41#include <asm/sections.h>
42#include <asm/iommu.h>
43#include <asm/firmware.h>
44#include <asm/system.h>
45#include <asm/time.h>
46#include <asm/paca.h>
47#include <asm/cache.h>
48#include <asm/abs_addr.h>
49#include <asm/iseries/hv_lp_config.h>
50#include <asm/iseries/hv_call_event.h>
51#include <asm/iseries/hv_call_xm.h>
52#include <asm/iseries/it_lp_queue.h>
53#include <asm/iseries/mf.h>
54#include <asm/iseries/hv_lp_event.h>
55#include <asm/iseries/lpar_map.h>
56#include <asm/udbg.h>
57#include <asm/irq.h>
58
59#include "naca.h"
60#include "setup.h"
61#include "irq.h"
62#include "vpd_areas.h"
63#include "processor_vpd.h"
64#include "it_lp_naca.h"
65#include "main_store.h"
66#include "call_sm.h"
67#include "call_hpt.h"
68#include "pci.h"
69
70#ifdef DEBUG
71#define DBG(fmt...) udbg_printf(fmt)
72#else
73#define DBG(fmt...)
74#endif
75
76/* Function Prototypes */
77static unsigned long build_iSeries_Memory_Map(void);
78static void iseries_shared_idle(void);
79static void iseries_dedicated_idle(void);
80
81
82struct MemoryBlock {
83 unsigned long absStart;
84 unsigned long absEnd;
85 unsigned long logicalStart;
86 unsigned long logicalEnd;
87};
88
89/*
90 * Process the main store vpd to determine where the holes in memory are
91 * and return the number of physical blocks and fill in the array of
92 * block data.
93 */
94static unsigned long iSeries_process_Condor_mainstore_vpd(
95 struct MemoryBlock *mb_array, unsigned long max_entries)
96{
97 unsigned long holeFirstChunk, holeSizeChunks;
98 unsigned long numMemoryBlocks = 1;
99 struct IoHriMainStoreSegment4 *msVpd =
100 (struct IoHriMainStoreSegment4 *)xMsVpd;
101 unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
102 unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
103 unsigned long holeSize = holeEnd - holeStart;
104
105 printk("Mainstore_VPD: Condor\n");
106 /*
107 * Determine if absolute memory has any
108 * holes so that we can interpret the
109 * access map we get back from the hypervisor
110 * correctly.
111 */
112 mb_array[0].logicalStart = 0;
113 mb_array[0].logicalEnd = 0x100000000UL;
114 mb_array[0].absStart = 0;
115 mb_array[0].absEnd = 0x100000000UL;
116
117 if (holeSize) {
118 numMemoryBlocks = 2;
119 holeStart = holeStart & 0x000fffffffffffffUL;
120 holeStart = addr_to_chunk(holeStart);
121 holeFirstChunk = holeStart;
122 holeSize = addr_to_chunk(holeSize);
123 holeSizeChunks = holeSize;
124 printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
125 holeFirstChunk, holeSizeChunks );
126 mb_array[0].logicalEnd = holeFirstChunk;
127 mb_array[0].absEnd = holeFirstChunk;
128 mb_array[1].logicalStart = holeFirstChunk;
129 mb_array[1].logicalEnd = 0x100000000UL - holeSizeChunks;
130 mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
131 mb_array[1].absEnd = 0x100000000UL;
132 }
133 return numMemoryBlocks;
134}
135
136#define MaxSegmentAreas 32
137#define MaxSegmentAdrRangeBlocks 128
138#define MaxAreaRangeBlocks 4
139
140static unsigned long iSeries_process_Regatta_mainstore_vpd(
141 struct MemoryBlock *mb_array, unsigned long max_entries)
142{
143 struct IoHriMainStoreSegment5 *msVpdP =
144 (struct IoHriMainStoreSegment5 *)xMsVpd;
145 unsigned long numSegmentBlocks = 0;
146 u32 existsBits = msVpdP->msAreaExists;
147 unsigned long area_num;
148
149 printk("Mainstore_VPD: Regatta\n");
150
151 for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
152 unsigned long numAreaBlocks;
153 struct IoHriMainStoreArea4 *currentArea;
154
155 if (existsBits & 0x80000000) {
156 unsigned long block_num;
157
158 currentArea = &msVpdP->msAreaArray[area_num];
159 numAreaBlocks = currentArea->numAdrRangeBlocks;
160 printk("ms_vpd: processing area %2ld blocks=%ld",
161 area_num, numAreaBlocks);
162 for (block_num = 0; block_num < numAreaBlocks;
163 ++block_num ) {
164 /* Process an address range block */
165 struct MemoryBlock tempBlock;
166 unsigned long i;
167
168 tempBlock.absStart =
169 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
170 tempBlock.absEnd =
171 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
172 tempBlock.logicalStart = 0;
173 tempBlock.logicalEnd = 0;
174 printk("\n block %ld absStart=%016lx absEnd=%016lx",
175 block_num, tempBlock.absStart,
176 tempBlock.absEnd);
177
178 for (i = 0; i < numSegmentBlocks; ++i) {
179 if (mb_array[i].absStart ==
180 tempBlock.absStart)
181 break;
182 }
183 if (i == numSegmentBlocks) {
184 if (numSegmentBlocks == max_entries)
185 panic("iSeries_process_mainstore_vpd: too many memory blocks");
186 mb_array[numSegmentBlocks] = tempBlock;
187 ++numSegmentBlocks;
188 } else
189 printk(" (duplicate)");
190 }
191 printk("\n");
192 }
193 existsBits <<= 1;
194 }
195 /* Now sort the blocks found into ascending sequence */
196 if (numSegmentBlocks > 1) {
197 unsigned long m, n;
198
199 for (m = 0; m < numSegmentBlocks - 1; ++m) {
200 for (n = numSegmentBlocks - 1; m < n; --n) {
201 if (mb_array[n].absStart <
202 mb_array[n-1].absStart) {
203 struct MemoryBlock tempBlock;
204
205 tempBlock = mb_array[n];
206 mb_array[n] = mb_array[n-1];
207 mb_array[n-1] = tempBlock;
208 }
209 }
210 }
211 }
212 /*
213 * Assign "logical" addresses to each block. These
214 * addresses correspond to the hypervisor "bitmap" space.
215 * Convert all addresses into units of 256K chunks.
216 */
217 {
218 unsigned long i, nextBitmapAddress;
219
220 printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
221 nextBitmapAddress = 0;
222 for (i = 0; i < numSegmentBlocks; ++i) {
223 unsigned long length = mb_array[i].absEnd -
224 mb_array[i].absStart;
225
226 mb_array[i].logicalStart = nextBitmapAddress;
227 mb_array[i].logicalEnd = nextBitmapAddress + length;
228 nextBitmapAddress += length;
229 printk(" Bitmap range: %016lx - %016lx\n"
230 " Absolute range: %016lx - %016lx\n",
231 mb_array[i].logicalStart,
232 mb_array[i].logicalEnd,
233 mb_array[i].absStart, mb_array[i].absEnd);
234 mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
235 0x000fffffffffffffUL);
236 mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
237 0x000fffffffffffffUL);
238 mb_array[i].logicalStart =
239 addr_to_chunk(mb_array[i].logicalStart);
240 mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
241 }
242 }
243
244 return numSegmentBlocks;
245}
246
247static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
248 unsigned long max_entries)
249{
250 unsigned long i;
251 unsigned long mem_blocks = 0;
252
253 if (mmu_has_feature(MMU_FTR_SLB))
254 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
255 max_entries);
256 else
257 mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
258 max_entries);
259
260 printk("Mainstore_VPD: numMemoryBlocks = %ld\n", mem_blocks);
261 for (i = 0; i < mem_blocks; ++i) {
262 printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
263 " abs chunks %016lx - %016lx\n",
264 i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
265 mb_array[i].absStart, mb_array[i].absEnd);
266 }
267 return mem_blocks;
268}
269
270static void __init iSeries_get_cmdline(void)
271{
272 char *p, *q;
273
274 /* copy the command line parameter from the primary VSP */
275 HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
276 HvLpDma_Direction_RemoteToLocal);
277
278 p = cmd_line;
279 q = cmd_line + 255;
280 while(p < q) {
281 if (!*p || *p == '\n')
282 break;
283 ++p;
284 }
285 *p = 0;
286}
287
288static void __init iSeries_init_early(void)
289{
290 DBG(" -> iSeries_init_early()\n");
291
292 /* Snapshot the timebase, for use in later recalibration */
293 iSeries_time_init_early();
294
295 /*
296 * Initialize the DMA/TCE management
297 */
298 iommu_init_early_iSeries();
299
300 /* Initialize machine-dependency vectors */
301#ifdef CONFIG_SMP
302 smp_init_iSeries();
303#endif
304
305 /* Associate Lp Event Queue 0 with processor 0 */
306 HvCallEvent_setLpEventQueueInterruptProc(0, 0);
307
308 mf_init();
309
310 DBG(" <- iSeries_init_early()\n");
311}
312
313struct mschunks_map mschunks_map = {
314 /* XXX We don't use these, but Piranha might need them. */
315 .chunk_size = MSCHUNKS_CHUNK_SIZE,
316 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
317 .chunk_mask = MSCHUNKS_OFFSET_MASK,
318};
319EXPORT_SYMBOL(mschunks_map);
320
321static void mschunks_alloc(unsigned long num_chunks)
322{
323 klimit = _ALIGN(klimit, sizeof(u32));
324 mschunks_map.mapping = (u32 *)klimit;
325 klimit += num_chunks * sizeof(u32);
326 mschunks_map.num_chunks = num_chunks;
327}
328
329/*
330 * The iSeries may have very large memories ( > 128 GB ) and a partition
331 * may get memory in "chunks" that may be anywhere in the 2**52 real
332 * address space. The chunks are 256K in size. To map this to the
333 * memory model Linux expects, the AS/400 specific code builds a
334 * translation table to translate what Linux thinks are "physical"
335 * addresses to the actual real addresses. This allows us to make
336 * it appear to Linux that we have contiguous memory starting at
337 * physical address zero while in fact this could be far from the truth.
338 * To avoid confusion, I'll let the words physical and/or real address
339 * apply to the Linux addresses while I'll use "absolute address" to
340 * refer to the actual hardware real address.
341 *
342 * build_iSeries_Memory_Map gets information from the Hypervisor and
343 * looks at the Main Store VPD to determine the absolute addresses
344 * of the memory that has been assigned to our partition and builds
345 * a table used to translate Linux's physical addresses to these
346 * absolute addresses. Absolute addresses are needed when
347 * communicating with the hypervisor (e.g. to build HPT entries)
348 *
349 * Returns the physical memory size
350 */
351
352static unsigned long __init build_iSeries_Memory_Map(void)
353{
354 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
355 u32 nextPhysChunk;
356 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
357 u32 totalChunks,moreChunks;
358 u32 currChunk, thisChunk, absChunk;
359 u32 currDword;
360 u32 chunkBit;
361 u64 map;
362 struct MemoryBlock mb[32];
363 unsigned long numMemoryBlocks, curBlock;
364
365 /* Chunk size on iSeries is 256K bytes */
366 totalChunks = (u32)HvLpConfig_getMsChunks();
367 mschunks_alloc(totalChunks);
368
369 /*
370 * Get absolute address of our load area
371 * and map it to physical address 0
372 * This guarantees that the loadarea ends up at physical 0
373 * otherwise, it might not be returned by PLIC as the first
374 * chunks
375 */
376
377 loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
378 loadAreaSize = itLpNaca.xLoadAreaChunks;
379
380 /*
381 * Only add the pages already mapped here.
382 * Otherwise we might add the hpt pages
383 * The rest of the pages of the load area
384 * aren't in the HPT yet and can still
385 * be assigned an arbitrary physical address
386 */
387 if ((loadAreaSize * 64) > HvPagesToMap)
388 loadAreaSize = HvPagesToMap / 64;
389
390 loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
391
392 /*
393 * TODO Do we need to do something if the HPT is in the 64MB load area?
394 * This would be required if the itLpNaca.xLoadAreaChunks includes
395 * the HPT size
396 */
397
398 printk("Mapping load area - physical addr = 0000000000000000\n"
399 " absolute addr = %016lx\n",
400 chunk_to_addr(loadAreaFirstChunk));
401 printk("Load area size %dK\n", loadAreaSize * 256);
402
403 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
404 mschunks_map.mapping[nextPhysChunk] =
405 loadAreaFirstChunk + nextPhysChunk;
406
407 /*
408 * Get absolute address of our HPT and remember it so
409 * we won't map it to any physical address
410 */
411 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
412 hptSizePages = (u32)HvCallHpt_getHptPages();
413 hptSizeChunks = hptSizePages >>
414 (MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT);
415 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
416
417 printk("HPT absolute addr = %016lx, size = %dK\n",
418 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
419
420 /*
421 * Determine if absolute memory has any
422 * holes so that we can interpret the
423 * access map we get back from the hypervisor
424 * correctly.
425 */
426 numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
427
428 /*
429 * Process the main store access map from the hypervisor
430 * to build up our physical -> absolute translation table
431 */
432 curBlock = 0;
433 currChunk = 0;
434 currDword = 0;
435 moreChunks = totalChunks;
436
437 while (moreChunks) {
438 map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
439 currDword);
440 thisChunk = currChunk;
441 while (map) {
442 chunkBit = map >> 63;
443 map <<= 1;
444 if (chunkBit) {
445 --moreChunks;
446 while (thisChunk >= mb[curBlock].logicalEnd) {
447 ++curBlock;
448 if (curBlock >= numMemoryBlocks)
449 panic("out of memory blocks");
450 }
451 if (thisChunk < mb[curBlock].logicalStart)
452 panic("memory block error");
453
454 absChunk = mb[curBlock].absStart +
455 (thisChunk - mb[curBlock].logicalStart);
456 if (((absChunk < hptFirstChunk) ||
457 (absChunk > hptLastChunk)) &&
458 ((absChunk < loadAreaFirstChunk) ||
459 (absChunk > loadAreaLastChunk))) {
460 mschunks_map.mapping[nextPhysChunk] =
461 absChunk;
462 ++nextPhysChunk;
463 }
464 }
465 ++thisChunk;
466 }
467 ++currDword;
468 currChunk += 64;
469 }
470
471 /*
472 * main store size (in chunks) is
473 * totalChunks - hptSizeChunks
474 * which should be equal to
475 * nextPhysChunk
476 */
477 return chunk_to_addr(nextPhysChunk);
478}
479
480/*
481 * Document me.
482 */
483static void __init iSeries_setup_arch(void)
484{
485 if (get_lppaca()->shared_proc) {
486 ppc_md.idle_loop = iseries_shared_idle;
487 printk(KERN_DEBUG "Using shared processor idle loop\n");
488 } else {
489 ppc_md.idle_loop = iseries_dedicated_idle;
490 printk(KERN_DEBUG "Using dedicated idle loop\n");
491 }
492
493 /* Setup the Lp Event Queue */
494 setup_hvlpevent_queue();
495
496 printk("Max logical processors = %d\n",
497 itVpdAreas.xSlicMaxLogicalProcs);
498 printk("Max physical processors = %d\n",
499 itVpdAreas.xSlicMaxPhysicalProcs);
500
501 iSeries_pcibios_init();
502}
503
504static void iSeries_show_cpuinfo(struct seq_file *m)
505{
506 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
507}
508
509static void __init iSeries_progress(char * st, unsigned short code)
510{
511 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
512 mf_display_progress(code);
513}
514
515static void __init iSeries_fixup_klimit(void)
516{
517 /*
518 * Change klimit to take into account any ram disk
519 * that may be included
520 */
521 if (naca.xRamDisk)
522 klimit = KERNELBASE + (u64)naca.xRamDisk +
523 (naca.xRamDiskSize * HW_PAGE_SIZE);
524}
525
526static int __init iSeries_src_init(void)
527{
528 /* clear the progress line */
529 if (firmware_has_feature(FW_FEATURE_ISERIES))
530 ppc_md.progress(" ", 0xffff);
531 return 0;
532}
533
534late_initcall(iSeries_src_init);
535
536static inline void process_iSeries_events(void)
537{
538 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
539}
540
541static void yield_shared_processor(void)
542{
543 unsigned long tb;
544
545 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
546 HvCall_MaskLpEvent |
547 HvCall_MaskLpProd |
548 HvCall_MaskTimeout);
549
550 tb = get_tb();
551 /* Compute future tb value when yield should expire */
552 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
553
554 /*
555 * The decrementer stops during the yield. Force a fake decrementer
556 * here and let the timer_interrupt code sort out the actual time.
557 */
558 get_lppaca()->int_dword.fields.decr_int = 1;
559 ppc64_runlatch_on();
560 process_iSeries_events();
561}
562
563static void iseries_shared_idle(void)
564{
565 while (1) {
566 tick_nohz_idle_enter();
567 rcu_idle_enter();
568 while (!need_resched() && !hvlpevent_is_pending()) {
569 local_irq_disable();
570 ppc64_runlatch_off();
571
572 /* Recheck with irqs off */
573 if (!need_resched() && !hvlpevent_is_pending())
574 yield_shared_processor();
575
576 HMT_medium();
577 local_irq_enable();
578 }
579
580 ppc64_runlatch_on();
581 rcu_idle_exit();
582 tick_nohz_idle_exit();
583
584 if (hvlpevent_is_pending())
585 process_iSeries_events();
586
587 preempt_enable_no_resched();
588 schedule();
589 preempt_disable();
590 }
591}
592
593static void iseries_dedicated_idle(void)
594{
595 set_thread_flag(TIF_POLLING_NRFLAG);
596
597 while (1) {
598 tick_nohz_idle_enter();
599 rcu_idle_enter();
600 if (!need_resched()) {
601 while (!need_resched()) {
602 ppc64_runlatch_off();
603 HMT_low();
604
605 if (hvlpevent_is_pending()) {
606 HMT_medium();
607 ppc64_runlatch_on();
608 process_iSeries_events();
609 }
610 }
611
612 HMT_medium();
613 }
614
615 ppc64_runlatch_on();
616 rcu_idle_exit();
617 tick_nohz_idle_exit();
618 preempt_enable_no_resched();
619 schedule();
620 preempt_disable();
621 }
622}
623
624static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
625 unsigned long flags, void *caller)
626{
627 return (void __iomem *)address;
628}
629
630static void iseries_iounmap(volatile void __iomem *token)
631{
632}
633
634static int __init iseries_probe(void)
635{
636 unsigned long root = of_get_flat_dt_root();
637 if (!of_flat_dt_is_compatible(root, "IBM,iSeries"))
638 return 0;
639
640 hpte_init_iSeries();
641 /* iSeries does not support 16M pages */
642 cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE;
643
644 return 1;
645}
646
647#ifdef CONFIG_KEXEC
648static int iseries_kexec_prepare(struct kimage *image)
649{
650 return -ENOSYS;
651}
652#endif
653
654define_machine(iseries) {
655 .name = "iSeries",
656 .setup_arch = iSeries_setup_arch,
657 .show_cpuinfo = iSeries_show_cpuinfo,
658 .init_IRQ = iSeries_init_IRQ,
659 .get_irq = iSeries_get_irq,
660 .init_early = iSeries_init_early,
661 .pcibios_fixup = iSeries_pci_final_fixup,
662 .pcibios_fixup_resources= iSeries_pcibios_fixup_resources,
663 .restart = mf_reboot,
664 .power_off = mf_power_off,
665 .halt = mf_power_off,
666 .get_boot_time = iSeries_get_boot_time,
667 .set_rtc_time = iSeries_set_rtc_time,
668 .get_rtc_time = iSeries_get_rtc_time,
669 .calibrate_decr = generic_calibrate_decr,
670 .progress = iSeries_progress,
671 .probe = iseries_probe,
672 .ioremap = iseries_ioremap,
673 .iounmap = iseries_iounmap,
674#ifdef CONFIG_KEXEC
675 .machine_kexec_prepare = iseries_kexec_prepare,
676#endif
677 /* XXX Implement enable_pmcs for iSeries */
678};
679
680void * __init iSeries_early_setup(void)
681{
682 unsigned long phys_mem_size;
683
684 /* Identify CPU type. This is done again by the common code later
685 * on but calling this function multiple times is fine.
686 */
687 identify_cpu(0, mfspr(SPRN_PVR));
688 initialise_paca(&boot_paca, 0);
689
690 powerpc_firmware_features |= FW_FEATURE_ISERIES;
691 powerpc_firmware_features |= FW_FEATURE_LPAR;
692
693#ifdef CONFIG_SMP
694 /* On iSeries we know we can never have more than 64 cpus */
695 nr_cpu_ids = max(nr_cpu_ids, 64);
696#endif
697
698 iSeries_fixup_klimit();
699
700 /*
701 * Initialize the table which translate Linux physical addresses to
702 * AS/400 absolute addresses
703 */
704 phys_mem_size = build_iSeries_Memory_Map();
705
706 iSeries_get_cmdline();
707
708 return (void *) __pa(build_flat_dt(phys_mem_size));
709}
710
711static void hvputc(char c)
712{
713 if (c == '\n')
714 hvputc('\r');
715
716 HvCall_writeLogBuffer(&c, 1);
717}
718
719void __init udbg_init_iseries(void)
720{
721 udbg_putc = hvputc;
722}
diff --git a/arch/powerpc/platforms/iseries/setup.h b/arch/powerpc/platforms/iseries/setup.h
deleted file mode 100644
index 729754bbb018..000000000000
--- a/arch/powerpc/platforms/iseries/setup.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Description:
6 * Architecture- / platform-specific boot-time initialization code for
7 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
8 * code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek
9 * <dan@netx4.com>.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#ifndef __ISERIES_SETUP_H__
18#define __ISERIES_SETUP_H__
19
20extern void *iSeries_early_setup(void);
21extern unsigned long iSeries_get_boot_time(void);
22extern int iSeries_set_rtc_time(struct rtc_time *tm);
23extern void iSeries_get_rtc_time(struct rtc_time *tm);
24
25extern void *build_flat_dt(unsigned long phys_mem_size);
26
27#endif /* __ISERIES_SETUP_H__ */
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
deleted file mode 100644
index 02df49fb59f0..000000000000
--- a/arch/powerpc/platforms/iseries/smp.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * SMP support for iSeries machines.
3 *
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
6 *
7 * Plus various changes from other IBM teams...
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/smp.h>
20#include <linux/interrupt.h>
21#include <linux/kernel_stat.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/spinlock.h>
25#include <linux/cache.h>
26#include <linux/err.h>
27#include <linux/device.h>
28#include <linux/cpu.h>
29
30#include <asm/ptrace.h>
31#include <linux/atomic.h>
32#include <asm/irq.h>
33#include <asm/page.h>
34#include <asm/pgtable.h>
35#include <asm/io.h>
36#include <asm/smp.h>
37#include <asm/paca.h>
38#include <asm/iseries/hv_call.h>
39#include <asm/time.h>
40#include <asm/machdep.h>
41#include <asm/cputable.h>
42#include <asm/system.h>
43
44static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
45{
46 HvCall_sendIPI(&(paca[cpu]));
47}
48
49static int smp_iSeries_probe(void)
50{
51 return cpumask_weight(cpu_possible_mask);
52}
53
54static int smp_iSeries_kick_cpu(int nr)
55{
56 BUG_ON((nr < 0) || (nr >= NR_CPUS));
57
58 /* Verify that our partition has a processor nr */
59 if (lppaca_of(nr).dyn_proc_status >= 2)
60 return -ENOENT;
61
62 /* The processor is currently spinning, waiting
63 * for the cpu_start field to become non-zero
64 * After we set cpu_start, the processor will
65 * continue on to secondary_start in iSeries_head.S
66 */
67 paca[nr].cpu_start = 1;
68
69 return 0;
70}
71
72static void __devinit smp_iSeries_setup_cpu(int nr)
73{
74}
75
76static struct smp_ops_t iSeries_smp_ops = {
77 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
78 .cause_ipi = smp_iSeries_cause_ipi,
79 .probe = smp_iSeries_probe,
80 .kick_cpu = smp_iSeries_kick_cpu,
81 .setup_cpu = smp_iSeries_setup_cpu,
82};
83
84/* This is called very early. */
85void __init smp_init_iSeries(void)
86{
87 smp_ops = &iSeries_smp_ops;
88}
diff --git a/arch/powerpc/platforms/iseries/spcomm_area.h b/arch/powerpc/platforms/iseries/spcomm_area.h
deleted file mode 100644
index 598b7c14573a..000000000000
--- a/arch/powerpc/platforms/iseries/spcomm_area.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ISERIES_SPCOMM_AREA_H
20#define _ISERIES_SPCOMM_AREA_H
21
22
23struct SpCommArea {
24 u32 xDesc; // Descriptor (only in new formats) 000-003
25 u8 xFormat; // Format (only in new formats) 004-004
26 u8 xRsvd1[11]; // Reserved 005-00F
27 u64 xRawTbAtIplStart; // Raw HW TB value when IPL is started 010-017
28 u64 xRawTodAtIplStart; // Raw HW TOD value when IPL is started 018-01F
29 u64 xBcdTimeAtIplStart; // BCD time when IPL is started 020-027
30 u64 xBcdTimeAtOsStart; // BCD time when OS passed control 028-02F
31 u8 xRsvd2[80]; // Reserved 030-07F
32};
33
34#endif /* _ISERIES_SPCOMM_AREA_H */
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
deleted file mode 100644
index 04be62d368a6..000000000000
--- a/arch/powerpc/platforms/iseries/vio.c
+++ /dev/null
@@ -1,556 +0,0 @@
1/*
2 * Legacy iSeries specific vio initialisation
3 * that needs to be built in (not a module).
4 *
5 * © Copyright 2007 IBM Corporation
6 * Author: Stephen Rothwell
7 * Some parts collected from various other files
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/of.h>
24#include <linux/init.h>
25#include <linux/slab.h>
26#include <linux/completion.h>
27#include <linux/proc_fs.h>
28#include <linux/export.h>
29
30#include <asm/firmware.h>
31#include <asm/vio.h>
32#include <asm/iseries/vio.h>
33#include <asm/iseries/iommu.h>
34#include <asm/iseries/hv_types.h>
35#include <asm/iseries/hv_lp_event.h>
36
37#define FIRST_VTY 0
38#define NUM_VTYS 1
39#define FIRST_VSCSI (FIRST_VTY + NUM_VTYS)
40#define NUM_VSCSIS 1
41#define FIRST_VLAN (FIRST_VSCSI + NUM_VSCSIS)
42#define NUM_VLANS HVMAXARCHITECTEDVIRTUALLANS
43#define FIRST_VIODASD (FIRST_VLAN + NUM_VLANS)
44#define NUM_VIODASDS HVMAXARCHITECTEDVIRTUALDISKS
45#define FIRST_VIOCD (FIRST_VIODASD + NUM_VIODASDS)
46#define NUM_VIOCDS HVMAXARCHITECTEDVIRTUALCDROMS
47#define FIRST_VIOTAPE (FIRST_VIOCD + NUM_VIOCDS)
48#define NUM_VIOTAPES HVMAXARCHITECTEDVIRTUALTAPES
49
50struct vio_waitevent {
51 struct completion com;
52 int rc;
53 u16 sub_result;
54};
55
56struct vio_resource {
57 char rsrcname[10];
58 char type[4];
59 char model[3];
60};
61
62static struct property *new_property(const char *name, int length,
63 const void *value)
64{
65 struct property *np = kzalloc(sizeof(*np) + strlen(name) + 1 + length,
66 GFP_KERNEL);
67
68 if (!np)
69 return NULL;
70 np->name = (char *)(np + 1);
71 np->value = np->name + strlen(name) + 1;
72 strcpy(np->name, name);
73 memcpy(np->value, value, length);
74 np->length = length;
75 return np;
76}
77
78static void free_property(struct property *np)
79{
80 kfree(np);
81}
82
83static struct device_node *new_node(const char *path,
84 struct device_node *parent)
85{
86 struct device_node *np = kzalloc(sizeof(*np), GFP_KERNEL);
87
88 if (!np)
89 return NULL;
90 np->full_name = kstrdup(path, GFP_KERNEL);
91 if (!np->full_name) {
92 kfree(np);
93 return NULL;
94 }
95 of_node_set_flag(np, OF_DYNAMIC);
96 kref_init(&np->kref);
97 np->parent = of_node_get(parent);
98 return np;
99}
100
101static void free_node(struct device_node *np)
102{
103 struct property *next;
104 struct property *prop;
105
106 next = np->properties;
107 while (next) {
108 prop = next;
109 next = prop->next;
110 free_property(prop);
111 }
112 of_node_put(np->parent);
113 kfree(np->full_name);
114 kfree(np);
115}
116
117static int add_string_property(struct device_node *np, const char *name,
118 const char *value)
119{
120 struct property *nprop = new_property(name, strlen(value) + 1, value);
121
122 if (!nprop)
123 return 0;
124 prom_add_property(np, nprop);
125 return 1;
126}
127
128static int add_raw_property(struct device_node *np, const char *name,
129 int length, const void *value)
130{
131 struct property *nprop = new_property(name, length, value);
132
133 if (!nprop)
134 return 0;
135 prom_add_property(np, nprop);
136 return 1;
137}
138
139static struct device_node *do_device_node(struct device_node *parent,
140 const char *name, u32 reg, u32 unit, const char *type,
141 const char *compat, struct vio_resource *res)
142{
143 struct device_node *np;
144 char path[32];
145
146 snprintf(path, sizeof(path), "/vdevice/%s@%08x", name, reg);
147 np = new_node(path, parent);
148 if (!np)
149 return NULL;
150 if (!add_string_property(np, "name", name) ||
151 !add_string_property(np, "device_type", type) ||
152 !add_string_property(np, "compatible", compat) ||
153 !add_raw_property(np, "reg", sizeof(reg), &reg) ||
154 !add_raw_property(np, "linux,unit_address",
155 sizeof(unit), &unit)) {
156 goto node_free;
157 }
158 if (res) {
159 if (!add_raw_property(np, "linux,vio_rsrcname",
160 sizeof(res->rsrcname), res->rsrcname) ||
161 !add_raw_property(np, "linux,vio_type",
162 sizeof(res->type), res->type) ||
163 !add_raw_property(np, "linux,vio_model",
164 sizeof(res->model), res->model))
165 goto node_free;
166 }
167 np->name = of_get_property(np, "name", NULL);
168 np->type = of_get_property(np, "device_type", NULL);
169 of_attach_node(np);
170#ifdef CONFIG_PROC_DEVICETREE
171 if (parent->pde) {
172 struct proc_dir_entry *ent;
173
174 ent = proc_mkdir(strrchr(np->full_name, '/') + 1, parent->pde);
175 if (ent)
176 proc_device_tree_add_node(np, ent);
177 }
178#endif
179 return np;
180
181 node_free:
182 free_node(np);
183 return NULL;
184}
185
186/*
187 * This is here so that we can dynamically add viodasd
188 * devices without exposing all the above infrastructure.
189 */
190struct vio_dev *vio_create_viodasd(u32 unit)
191{
192 struct device_node *vio_root;
193 struct device_node *np;
194 struct vio_dev *vdev = NULL;
195
196 vio_root = of_find_node_by_path("/vdevice");
197 if (!vio_root)
198 return NULL;
199 np = do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
200 "block", "IBM,iSeries-viodasd", NULL);
201 of_node_put(vio_root);
202 if (np) {
203 vdev = vio_register_device_node(np);
204 if (!vdev)
205 free_node(np);
206 }
207 return vdev;
208}
209EXPORT_SYMBOL_GPL(vio_create_viodasd);
210
211static void __init handle_block_event(struct HvLpEvent *event)
212{
213 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
214 struct vio_waitevent *pwe;
215
216 if (event == NULL)
217 /* Notification that a partition went away! */
218 return;
219 /* First, we should NEVER get an int here...only acks */
220 if (hvlpevent_is_int(event)) {
221 printk(KERN_WARNING "handle_viod_request: "
222 "Yikes! got an int in viodasd event handler!\n");
223 if (hvlpevent_need_ack(event)) {
224 event->xRc = HvLpEvent_Rc_InvalidSubtype;
225 HvCallEvent_ackLpEvent(event);
226 }
227 return;
228 }
229
230 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
231 case vioblockopen:
232 /*
233 * Handle a response to an open request. We get all the
234 * disk information in the response, so update it. The
235 * correlation token contains a pointer to a waitevent
236 * structure that has a completion in it. update the
237 * return code in the waitevent structure and post the
238 * completion to wake up the guy who sent the request
239 */
240 pwe = (struct vio_waitevent *)event->xCorrelationToken;
241 pwe->rc = event->xRc;
242 pwe->sub_result = bevent->sub_result;
243 complete(&pwe->com);
244 break;
245 case vioblockclose:
246 break;
247 default:
248 printk(KERN_WARNING "handle_viod_request: unexpected subtype!");
249 if (hvlpevent_need_ack(event)) {
250 event->xRc = HvLpEvent_Rc_InvalidSubtype;
251 HvCallEvent_ackLpEvent(event);
252 }
253 }
254}
255
256static void __init probe_disk(struct device_node *vio_root, u32 unit)
257{
258 HvLpEvent_Rc hvrc;
259 struct vio_waitevent we;
260 u16 flags = 0;
261
262retry:
263 init_completion(&we.com);
264
265 /* Send the open event to OS/400 */
266 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
267 HvLpEvent_Type_VirtualIo,
268 viomajorsubtype_blockio | vioblockopen,
269 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
270 viopath_sourceinst(viopath_hostLp),
271 viopath_targetinst(viopath_hostLp),
272 (u64)(unsigned long)&we, VIOVERSION << 16,
273 ((u64)unit << 48) | ((u64)flags<< 32),
274 0, 0, 0);
275 if (hvrc != 0) {
276 printk(KERN_WARNING "probe_disk: bad rc on HV open %d\n",
277 (int)hvrc);
278 return;
279 }
280
281 wait_for_completion(&we.com);
282
283 if (we.rc != 0) {
284 if (flags != 0)
285 return;
286 /* try again with read only flag set */
287 flags = vioblockflags_ro;
288 goto retry;
289 }
290
291 /* Send the close event to OS/400. We DON'T expect a response */
292 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
293 HvLpEvent_Type_VirtualIo,
294 viomajorsubtype_blockio | vioblockclose,
295 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
296 viopath_sourceinst(viopath_hostLp),
297 viopath_targetinst(viopath_hostLp),
298 0, VIOVERSION << 16,
299 ((u64)unit << 48) | ((u64)flags << 32),
300 0, 0, 0);
301 if (hvrc != 0) {
302 printk(KERN_WARNING "probe_disk: "
303 "bad rc sending event to OS/400 %d\n", (int)hvrc);
304 return;
305 }
306
307 do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
308 "block", "IBM,iSeries-viodasd", NULL);
309}
310
311static void __init get_viodasd_info(struct device_node *vio_root)
312{
313 int rc;
314 u32 unit;
315
316 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, 2);
317 if (rc) {
318 printk(KERN_WARNING "get_viodasd_info: "
319 "error opening path to host partition %d\n",
320 viopath_hostLp);
321 return;
322 }
323
324 /* Initialize our request handler */
325 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
326
327 for (unit = 0; unit < HVMAXARCHITECTEDVIRTUALDISKS; unit++)
328 probe_disk(vio_root, unit);
329
330 vio_clearHandler(viomajorsubtype_blockio);
331 viopath_close(viopath_hostLp, viomajorsubtype_blockio, 2);
332}
333
334static void __init handle_cd_event(struct HvLpEvent *event)
335{
336 struct viocdlpevent *bevent;
337 struct vio_waitevent *pwe;
338
339 if (!event)
340 /* Notification that a partition went away! */
341 return;
342
343 /* First, we should NEVER get an int here...only acks */
344 if (hvlpevent_is_int(event)) {
345 printk(KERN_WARNING "handle_cd_event: got an unexpected int\n");
346 if (hvlpevent_need_ack(event)) {
347 event->xRc = HvLpEvent_Rc_InvalidSubtype;
348 HvCallEvent_ackLpEvent(event);
349 }
350 return;
351 }
352
353 bevent = (struct viocdlpevent *)event;
354
355 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
356 case viocdgetinfo:
357 pwe = (struct vio_waitevent *)event->xCorrelationToken;
358 pwe->rc = event->xRc;
359 pwe->sub_result = bevent->sub_result;
360 complete(&pwe->com);
361 break;
362
363 default:
364 printk(KERN_WARNING "handle_cd_event: "
365 "message with unexpected subtype %0x04X!\n",
366 event->xSubtype & VIOMINOR_SUBTYPE_MASK);
367 if (hvlpevent_need_ack(event)) {
368 event->xRc = HvLpEvent_Rc_InvalidSubtype;
369 HvCallEvent_ackLpEvent(event);
370 }
371 }
372}
373
374static void __init get_viocd_info(struct device_node *vio_root)
375{
376 HvLpEvent_Rc hvrc;
377 u32 unit;
378 struct vio_waitevent we;
379 struct vio_resource *unitinfo;
380 dma_addr_t unitinfo_dmaaddr;
381 int ret;
382
383 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio, 2);
384 if (ret) {
385 printk(KERN_WARNING
386 "get_viocd_info: error opening path to host partition %d\n",
387 viopath_hostLp);
388 return;
389 }
390
391 /* Initialize our request handler */
392 vio_setHandler(viomajorsubtype_cdio, handle_cd_event);
393
394 unitinfo = iseries_hv_alloc(
395 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
396 &unitinfo_dmaaddr, GFP_ATOMIC);
397 if (!unitinfo) {
398 printk(KERN_WARNING
399 "get_viocd_info: error allocating unitinfo\n");
400 goto clear_handler;
401 }
402
403 memset(unitinfo, 0, sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS);
404
405 init_completion(&we.com);
406
407 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
408 HvLpEvent_Type_VirtualIo,
409 viomajorsubtype_cdio | viocdgetinfo,
410 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
411 viopath_sourceinst(viopath_hostLp),
412 viopath_targetinst(viopath_hostLp),
413 (u64)&we, VIOVERSION << 16, unitinfo_dmaaddr, 0,
414 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS, 0);
415 if (hvrc != HvLpEvent_Rc_Good) {
416 printk(KERN_WARNING
417 "get_viocd_info: cdrom error sending event. rc %d\n",
418 (int)hvrc);
419 goto hv_free;
420 }
421
422 wait_for_completion(&we.com);
423
424 if (we.rc) {
425 printk(KERN_WARNING "get_viocd_info: bad rc %d:0x%04X\n",
426 we.rc, we.sub_result);
427 goto hv_free;
428 }
429
430 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALCDROMS) &&
431 unitinfo[unit].rsrcname[0]; unit++) {
432 if (!do_device_node(vio_root, "viocd", FIRST_VIOCD + unit, unit,
433 "block", "IBM,iSeries-viocd", &unitinfo[unit]))
434 break;
435 }
436
437 hv_free:
438 iseries_hv_free(sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
439 unitinfo, unitinfo_dmaaddr);
440 clear_handler:
441 vio_clearHandler(viomajorsubtype_cdio);
442 viopath_close(viopath_hostLp, viomajorsubtype_cdio, 2);
443}
444
445/* Handle interrupt events for tape */
446static void __init handle_tape_event(struct HvLpEvent *event)
447{
448 struct vio_waitevent *we;
449 struct viotapelpevent *tevent = (struct viotapelpevent *)event;
450
451 if (event == NULL)
452 /* Notification that a partition went away! */
453 return;
454
455 we = (struct vio_waitevent *)event->xCorrelationToken;
456 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
457 case viotapegetinfo:
458 we->rc = tevent->sub_type_result;
459 complete(&we->com);
460 break;
461 default:
462 printk(KERN_WARNING "handle_tape_event: weird ack\n");
463 }
464}
465
466static void __init get_viotape_info(struct device_node *vio_root)
467{
468 HvLpEvent_Rc hvrc;
469 u32 unit;
470 struct vio_resource *unitinfo;
471 dma_addr_t unitinfo_dmaaddr;
472 size_t len = sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALTAPES;
473 struct vio_waitevent we;
474 int ret;
475
476 init_completion(&we.com);
477
478 ret = viopath_open(viopath_hostLp, viomajorsubtype_tape, 2);
479 if (ret) {
480 printk(KERN_WARNING "get_viotape_info: "
481 "error on viopath_open to hostlp %d\n", ret);
482 return;
483 }
484
485 vio_setHandler(viomajorsubtype_tape, handle_tape_event);
486
487 unitinfo = iseries_hv_alloc(len, &unitinfo_dmaaddr, GFP_ATOMIC);
488 if (!unitinfo)
489 goto clear_handler;
490
491 memset(unitinfo, 0, len);
492
493 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
494 HvLpEvent_Type_VirtualIo,
495 viomajorsubtype_tape | viotapegetinfo,
496 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
497 viopath_sourceinst(viopath_hostLp),
498 viopath_targetinst(viopath_hostLp),
499 (u64)(unsigned long)&we, VIOVERSION << 16,
500 unitinfo_dmaaddr, len, 0, 0);
501 if (hvrc != HvLpEvent_Rc_Good) {
502 printk(KERN_WARNING "get_viotape_info: hv error on op %d\n",
503 (int)hvrc);
504 goto hv_free;
505 }
506
507 wait_for_completion(&we.com);
508
509 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALTAPES) &&
510 unitinfo[unit].rsrcname[0]; unit++) {
511 if (!do_device_node(vio_root, "viotape", FIRST_VIOTAPE + unit,
512 unit, "byte", "IBM,iSeries-viotape",
513 &unitinfo[unit]))
514 break;
515 }
516
517 hv_free:
518 iseries_hv_free(len, unitinfo, unitinfo_dmaaddr);
519 clear_handler:
520 vio_clearHandler(viomajorsubtype_tape);
521 viopath_close(viopath_hostLp, viomajorsubtype_tape, 2);
522}
523
524static int __init iseries_vio_init(void)
525{
526 struct device_node *vio_root;
527 int ret = -ENODEV;
528
529 if (!firmware_has_feature(FW_FEATURE_ISERIES))
530 goto out;
531
532 iommu_vio_init();
533
534 vio_root = of_find_node_by_path("/vdevice");
535 if (!vio_root)
536 goto out;
537
538 if (viopath_hostLp == HvLpIndexInvalid) {
539 vio_set_hostlp();
540 /* If we don't have a host, bail out */
541 if (viopath_hostLp == HvLpIndexInvalid)
542 goto put_node;
543 }
544
545 get_viodasd_info(vio_root);
546 get_viocd_info(vio_root);
547 get_viotape_info(vio_root);
548
549 ret = 0;
550
551 put_node:
552 of_node_put(vio_root);
553 out:
554 return ret;
555}
556arch_initcall(iseries_vio_init);
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
deleted file mode 100644
index 40dad0840eb3..000000000000
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ /dev/null
@@ -1,677 +0,0 @@
1/* -*- linux-c -*-
2 *
3 * iSeries Virtual I/O Message Path code
4 *
5 * Authors: Dave Boutcher <boutcher@us.ibm.com>
6 * Ryan Arnold <ryanarn@us.ibm.com>
7 * Colin Devilbiss <devilbis@us.ibm.com>
8 *
9 * (C) Copyright 2000-2005 IBM Corporation
10 *
11 * This code is used by the iSeries virtual disk, cd,
12 * tape, and console to communicate with OS/400 in another
13 * partition.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) anyu later version.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software Foundation,
27 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 *
29 */
30#include <linux/export.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/vmalloc.h>
35#include <linux/string.h>
36#include <linux/proc_fs.h>
37#include <linux/dma-mapping.h>
38#include <linux/wait.h>
39#include <linux/seq_file.h>
40#include <linux/interrupt.h>
41#include <linux/completion.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/prom.h>
46#include <asm/firmware.h>
47#include <asm/iseries/hv_types.h>
48#include <asm/iseries/hv_lp_event.h>
49#include <asm/iseries/hv_lp_config.h>
50#include <asm/iseries/mf.h>
51#include <asm/iseries/vio.h>
52
53/* Status of the path to each other partition in the system.
54 * This is overkill, since we will only ever establish connections
55 * to our hosting partition and the primary partition on the system.
56 * But this allows for other support in the future.
57 */
58static struct viopathStatus {
59 int isOpen; /* Did we open the path? */
60 int isActive; /* Do we have a mon msg outstanding */
61 int users[VIO_MAX_SUBTYPES];
62 HvLpInstanceId mSourceInst;
63 HvLpInstanceId mTargetInst;
64 int numberAllocated;
65} viopathStatus[HVMAXARCHITECTEDLPS];
66
67static DEFINE_SPINLOCK(statuslock);
68
69/*
70 * For each kind of event we allocate a buffer that is
71 * guaranteed not to cross a page boundary
72 */
73static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
74 __attribute__((__aligned__(4096)));
75static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
76static int event_buffer_initialised;
77
78static void handleMonitorEvent(struct HvLpEvent *event);
79
80/*
81 * We use this structure to handle asynchronous responses. The caller
82 * blocks on the semaphore and the handler posts the semaphore. However,
83 * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
84 */
85struct alloc_parms {
86 struct completion done;
87 int number;
88 atomic_t wait_atomic;
89 int used_wait_atomic;
90};
91
92/* Put a sequence number in each mon msg. The value is not
93 * important. Start at something other than 0 just for
94 * readability. wrapping this is ok.
95 */
96static u8 viomonseq = 22;
97
98/* Our hosting logical partition. We get this at startup
99 * time, and different modules access this variable directly.
100 */
101HvLpIndex viopath_hostLp = HvLpIndexInvalid;
102EXPORT_SYMBOL(viopath_hostLp);
103HvLpIndex viopath_ourLp = HvLpIndexInvalid;
104EXPORT_SYMBOL(viopath_ourLp);
105
106/* For each kind of incoming event we set a pointer to a
107 * routine to call.
108 */
109static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
110
111#define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
112#define VIOPATH_KERN_INFO KERN_INFO "viopath: "
113
114static int proc_viopath_show(struct seq_file *m, void *v)
115{
116 char *buf;
117 u16 vlanMap;
118 dma_addr_t handle;
119 HvLpEvent_Rc hvrc;
120 DECLARE_COMPLETION_ONSTACK(done);
121 struct device_node *node;
122 const char *sysid;
123
124 buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
125 if (!buf)
126 return 0;
127
128 handle = iseries_hv_map(buf, HW_PAGE_SIZE, DMA_FROM_DEVICE);
129
130 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
131 HvLpEvent_Type_VirtualIo,
132 viomajorsubtype_config | vioconfigget,
133 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
134 viopath_sourceinst(viopath_hostLp),
135 viopath_targetinst(viopath_hostLp),
136 (u64)(unsigned long)&done, VIOVERSION << 16,
137 ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
138
139 if (hvrc != HvLpEvent_Rc_Good)
140 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
141
142 wait_for_completion(&done);
143
144 vlanMap = HvLpConfig_getVirtualLanIndexMap();
145
146 buf[HW_PAGE_SIZE-1] = '\0';
147 seq_printf(m, "%s", buf);
148
149 iseries_hv_unmap(handle, HW_PAGE_SIZE, DMA_FROM_DEVICE);
150 kfree(buf);
151
152 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
153
154 node = of_find_node_by_path("/");
155 sysid = NULL;
156 if (node != NULL)
157 sysid = of_get_property(node, "system-id", NULL);
158
159 if (sysid == NULL)
160 seq_printf(m, "SRLNBR=<UNKNOWN>\n");
161 else
162 /* Skip "IBM," on front of serial number, see dt.c */
163 seq_printf(m, "SRLNBR=%s\n", sysid + 4);
164
165 of_node_put(node);
166
167 return 0;
168}
169
170static int proc_viopath_open(struct inode *inode, struct file *file)
171{
172 return single_open(file, proc_viopath_show, NULL);
173}
174
175static const struct file_operations proc_viopath_operations = {
176 .open = proc_viopath_open,
177 .read = seq_read,
178 .llseek = seq_lseek,
179 .release = single_release,
180};
181
182static int __init vio_proc_init(void)
183{
184 if (!firmware_has_feature(FW_FEATURE_ISERIES))
185 return 0;
186
187 proc_create("iSeries/config", 0, NULL, &proc_viopath_operations);
188 return 0;
189}
190__initcall(vio_proc_init);
191
192/* See if a given LP is active. Allow for invalid lps to be passed in
193 * and just return invalid
194 */
195int viopath_isactive(HvLpIndex lp)
196{
197 if (lp == HvLpIndexInvalid)
198 return 0;
199 if (lp < HVMAXARCHITECTEDLPS)
200 return viopathStatus[lp].isActive;
201 else
202 return 0;
203}
204EXPORT_SYMBOL(viopath_isactive);
205
206/*
207 * We cache the source and target instance ids for each
208 * partition.
209 */
210HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
211{
212 return viopathStatus[lp].mSourceInst;
213}
214EXPORT_SYMBOL(viopath_sourceinst);
215
216HvLpInstanceId viopath_targetinst(HvLpIndex lp)
217{
218 return viopathStatus[lp].mTargetInst;
219}
220EXPORT_SYMBOL(viopath_targetinst);
221
222/*
223 * Send a monitor message. This is a message with the acknowledge
224 * bit on that the other side will NOT explicitly acknowledge. When
225 * the other side goes down, the hypervisor will acknowledge any
226 * outstanding messages....so we will know when the other side dies.
227 */
228static void sendMonMsg(HvLpIndex remoteLp)
229{
230 HvLpEvent_Rc hvrc;
231
232 viopathStatus[remoteLp].mSourceInst =
233 HvCallEvent_getSourceLpInstanceId(remoteLp,
234 HvLpEvent_Type_VirtualIo);
235 viopathStatus[remoteLp].mTargetInst =
236 HvCallEvent_getTargetLpInstanceId(remoteLp,
237 HvLpEvent_Type_VirtualIo);
238
239 /*
240 * Deliberately ignore the return code here. if we call this
241 * more than once, we don't care.
242 */
243 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
244
245 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
246 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
247 HvLpEvent_AckType_DeferredAck,
248 viopathStatus[remoteLp].mSourceInst,
249 viopathStatus[remoteLp].mTargetInst,
250 viomonseq++, 0, 0, 0, 0, 0);
251
252 if (hvrc == HvLpEvent_Rc_Good)
253 viopathStatus[remoteLp].isActive = 1;
254 else {
255 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
256 remoteLp);
257 viopathStatus[remoteLp].isActive = 0;
258 }
259}
260
261static void handleMonitorEvent(struct HvLpEvent *event)
262{
263 HvLpIndex remoteLp;
264 int i;
265
266 /*
267 * This handler is _also_ called as part of the loop
268 * at the end of this routine, so it must be able to
269 * ignore NULL events...
270 */
271 if (!event)
272 return;
273
274 /*
275 * First see if this is just a normal monitor message from the
276 * other partition
277 */
278 if (hvlpevent_is_int(event)) {
279 remoteLp = event->xSourceLp;
280 if (!viopathStatus[remoteLp].isActive)
281 sendMonMsg(remoteLp);
282 return;
283 }
284
285 /*
286 * This path is for an acknowledgement; the other partition
287 * died
288 */
289 remoteLp = event->xTargetLp;
290 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
291 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
292 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
293 return;
294 }
295
296 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
297
298 viopathStatus[remoteLp].isActive = 0;
299
300 /*
301 * For each active handler, pass them a NULL
302 * message to indicate that the other partition
303 * died
304 */
305 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
306 if (vio_handler[i] != NULL)
307 (*vio_handler[i])(NULL);
308 }
309}
310
311int vio_setHandler(int subtype, vio_event_handler_t *beh)
312{
313 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
314 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
315 return -EINVAL;
316 if (vio_handler[subtype] != NULL)
317 return -EBUSY;
318 vio_handler[subtype] = beh;
319 return 0;
320}
321EXPORT_SYMBOL(vio_setHandler);
322
323int vio_clearHandler(int subtype)
324{
325 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
326 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
327 return -EINVAL;
328 if (vio_handler[subtype] == NULL)
329 return -EAGAIN;
330 vio_handler[subtype] = NULL;
331 return 0;
332}
333EXPORT_SYMBOL(vio_clearHandler);
334
335static void handleConfig(struct HvLpEvent *event)
336{
337 if (!event)
338 return;
339 if (hvlpevent_is_int(event)) {
340 printk(VIOPATH_KERN_WARN
341 "unexpected config request from partition %d",
342 event->xSourceLp);
343
344 if (hvlpevent_need_ack(event)) {
345 event->xRc = HvLpEvent_Rc_InvalidSubtype;
346 HvCallEvent_ackLpEvent(event);
347 }
348 return;
349 }
350
351 complete((struct completion *)event->xCorrelationToken);
352}
353
354/*
355 * Initialization of the hosting partition
356 */
357void vio_set_hostlp(void)
358{
359 /*
360 * If this has already been set then we DON'T want to either change
361 * it or re-register the proc file system
362 */
363 if (viopath_hostLp != HvLpIndexInvalid)
364 return;
365
366 /*
367 * Figure out our hosting partition. This isn't allowed to change
368 * while we're active
369 */
370 viopath_ourLp = HvLpConfig_getLpIndex();
371 viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
372
373 if (viopath_hostLp != HvLpIndexInvalid)
374 vio_setHandler(viomajorsubtype_config, handleConfig);
375}
376EXPORT_SYMBOL(vio_set_hostlp);
377
378static void vio_handleEvent(struct HvLpEvent *event)
379{
380 HvLpIndex remoteLp;
381 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
382 >> VIOMAJOR_SUBTYPE_SHIFT;
383
384 if (hvlpevent_is_int(event)) {
385 remoteLp = event->xSourceLp;
386 /*
387 * The isActive is checked because if the hosting partition
388 * went down and came back up it would not be active but it
389 * would have different source and target instances, in which
390 * case we'd want to reset them. This case really protects
391 * against an unauthorized active partition sending interrupts
392 * or acks to this linux partition.
393 */
394 if (viopathStatus[remoteLp].isActive
395 && (event->xSourceInstanceId !=
396 viopathStatus[remoteLp].mTargetInst)) {
397 printk(VIOPATH_KERN_WARN
398 "message from invalid partition. "
399 "int msg rcvd, source inst (%d) doesn't match (%d)\n",
400 viopathStatus[remoteLp].mTargetInst,
401 event->xSourceInstanceId);
402 return;
403 }
404
405 if (viopathStatus[remoteLp].isActive
406 && (event->xTargetInstanceId !=
407 viopathStatus[remoteLp].mSourceInst)) {
408 printk(VIOPATH_KERN_WARN
409 "message from invalid partition. "
410 "int msg rcvd, target inst (%d) doesn't match (%d)\n",
411 viopathStatus[remoteLp].mSourceInst,
412 event->xTargetInstanceId);
413 return;
414 }
415 } else {
416 remoteLp = event->xTargetLp;
417 if (event->xSourceInstanceId !=
418 viopathStatus[remoteLp].mSourceInst) {
419 printk(VIOPATH_KERN_WARN
420 "message from invalid partition. "
421 "ack msg rcvd, source inst (%d) doesn't match (%d)\n",
422 viopathStatus[remoteLp].mSourceInst,
423 event->xSourceInstanceId);
424 return;
425 }
426
427 if (event->xTargetInstanceId !=
428 viopathStatus[remoteLp].mTargetInst) {
429 printk(VIOPATH_KERN_WARN
430 "message from invalid partition. "
431 "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n",
432 viopathStatus[remoteLp].mTargetInst,
433 event->xTargetInstanceId);
434 return;
435 }
436 }
437
438 if (vio_handler[subtype] == NULL) {
439 printk(VIOPATH_KERN_WARN
440 "unexpected virtual io event subtype %d from partition %d\n",
441 event->xSubtype, remoteLp);
442 /* No handler. Ack if necessary */
443 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
444 event->xRc = HvLpEvent_Rc_InvalidSubtype;
445 HvCallEvent_ackLpEvent(event);
446 }
447 return;
448 }
449
450 /* This innocuous little line is where all the real work happens */
451 (*vio_handler[subtype])(event);
452}
453
454static void viopath_donealloc(void *parm, int number)
455{
456 struct alloc_parms *parmsp = parm;
457
458 parmsp->number = number;
459 if (parmsp->used_wait_atomic)
460 atomic_set(&parmsp->wait_atomic, 0);
461 else
462 complete(&parmsp->done);
463}
464
465static int allocateEvents(HvLpIndex remoteLp, int numEvents)
466{
467 struct alloc_parms parms;
468
469 if (system_state != SYSTEM_RUNNING) {
470 parms.used_wait_atomic = 1;
471 atomic_set(&parms.wait_atomic, 1);
472 } else {
473 parms.used_wait_atomic = 0;
474 init_completion(&parms.done);
475 }
476 mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
477 numEvents, &viopath_donealloc, &parms);
478 if (system_state != SYSTEM_RUNNING) {
479 while (atomic_read(&parms.wait_atomic))
480 mb();
481 } else
482 wait_for_completion(&parms.done);
483 return parms.number;
484}
485
486int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
487{
488 int i;
489 unsigned long flags;
490 int tempNumAllocated;
491
492 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
493 return -EINVAL;
494
495 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
496 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
497 return -EINVAL;
498
499 spin_lock_irqsave(&statuslock, flags);
500
501 if (!event_buffer_initialised) {
502 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
503 atomic_set(&event_buffer_available[i], 1);
504 event_buffer_initialised = 1;
505 }
506
507 viopathStatus[remoteLp].users[subtype]++;
508
509 if (!viopathStatus[remoteLp].isOpen) {
510 viopathStatus[remoteLp].isOpen = 1;
511 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
512
513 /*
514 * Don't hold the spinlock during an operation that
515 * can sleep.
516 */
517 spin_unlock_irqrestore(&statuslock, flags);
518 tempNumAllocated = allocateEvents(remoteLp, 1);
519 spin_lock_irqsave(&statuslock, flags);
520
521 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
522
523 if (viopathStatus[remoteLp].numberAllocated == 0) {
524 HvCallEvent_closeLpEventPath(remoteLp,
525 HvLpEvent_Type_VirtualIo);
526
527 spin_unlock_irqrestore(&statuslock, flags);
528 return -ENOMEM;
529 }
530
531 viopathStatus[remoteLp].mSourceInst =
532 HvCallEvent_getSourceLpInstanceId(remoteLp,
533 HvLpEvent_Type_VirtualIo);
534 viopathStatus[remoteLp].mTargetInst =
535 HvCallEvent_getTargetLpInstanceId(remoteLp,
536 HvLpEvent_Type_VirtualIo);
537 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
538 &vio_handleEvent);
539 sendMonMsg(remoteLp);
540 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
541 "setting sinst %d, tinst %d\n",
542 remoteLp, viopathStatus[remoteLp].mSourceInst,
543 viopathStatus[remoteLp].mTargetInst);
544 }
545
546 spin_unlock_irqrestore(&statuslock, flags);
547 tempNumAllocated = allocateEvents(remoteLp, numReq);
548 spin_lock_irqsave(&statuslock, flags);
549 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
550 spin_unlock_irqrestore(&statuslock, flags);
551
552 return 0;
553}
554EXPORT_SYMBOL(viopath_open);
555
556int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
557{
558 unsigned long flags;
559 int i;
560 int numOpen;
561 struct alloc_parms parms;
562
563 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
564 return -EINVAL;
565
566 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
567 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
568 return -EINVAL;
569
570 spin_lock_irqsave(&statuslock, flags);
571 /*
572 * If the viopath_close somehow gets called before a
573 * viopath_open it could decrement to -1 which is a non
574 * recoverable state so we'll prevent this from
575 * happening.
576 */
577 if (viopathStatus[remoteLp].users[subtype] > 0)
578 viopathStatus[remoteLp].users[subtype]--;
579
580 spin_unlock_irqrestore(&statuslock, flags);
581
582 parms.used_wait_atomic = 0;
583 init_completion(&parms.done);
584 mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
585 numReq, &viopath_donealloc, &parms);
586 wait_for_completion(&parms.done);
587
588 spin_lock_irqsave(&statuslock, flags);
589 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
590 numOpen += viopathStatus[remoteLp].users[i];
591
592 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
593 printk(VIOPATH_KERN_INFO "closing connection to partition %d\n",
594 remoteLp);
595
596 HvCallEvent_closeLpEventPath(remoteLp,
597 HvLpEvent_Type_VirtualIo);
598 viopathStatus[remoteLp].isOpen = 0;
599 viopathStatus[remoteLp].isActive = 0;
600
601 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
602 atomic_set(&event_buffer_available[i], 0);
603 event_buffer_initialised = 0;
604 }
605 spin_unlock_irqrestore(&statuslock, flags);
606 return 0;
607}
608EXPORT_SYMBOL(viopath_close);
609
610void *vio_get_event_buffer(int subtype)
611{
612 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
613 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
614 return NULL;
615
616 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
617 return &event_buffer[subtype * 256];
618 else
619 return NULL;
620}
621EXPORT_SYMBOL(vio_get_event_buffer);
622
623void vio_free_event_buffer(int subtype, void *buffer)
624{
625 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
626 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
627 printk(VIOPATH_KERN_WARN
628 "unexpected subtype %d freeing event buffer\n", subtype);
629 return;
630 }
631
632 if (atomic_read(&event_buffer_available[subtype]) != 0) {
633 printk(VIOPATH_KERN_WARN
634 "freeing unallocated event buffer, subtype %d\n",
635 subtype);
636 return;
637 }
638
639 if (buffer != &event_buffer[subtype * 256]) {
640 printk(VIOPATH_KERN_WARN
641 "freeing invalid event buffer, subtype %d\n", subtype);
642 }
643
644 atomic_set(&event_buffer_available[subtype], 1);
645}
646EXPORT_SYMBOL(vio_free_event_buffer);
647
648static const struct vio_error_entry vio_no_error =
649 { 0, 0, "Non-VIO Error" };
650static const struct vio_error_entry vio_unknown_error =
651 { 0, EIO, "Unknown Error" };
652
653static const struct vio_error_entry vio_default_errors[] = {
654 {0x0001, EIO, "No Connection"},
655 {0x0002, EIO, "No Receiver"},
656 {0x0003, EIO, "No Buffer Available"},
657 {0x0004, EBADRQC, "Invalid Message Type"},
658 {0x0000, 0, NULL},
659};
660
661const struct vio_error_entry *vio_lookup_rc(
662 const struct vio_error_entry *local_table, u16 rc)
663{
664 const struct vio_error_entry *cur;
665
666 if (!rc)
667 return &vio_no_error;
668 if (local_table)
669 for (cur = local_table; cur->rc; ++cur)
670 if (cur->rc == rc)
671 return cur;
672 for (cur = vio_default_errors; cur->rc; ++cur)
673 if (cur->rc == rc)
674 return cur;
675 return &vio_unknown_error;
676}
677EXPORT_SYMBOL(vio_lookup_rc);
diff --git a/arch/powerpc/platforms/iseries/vpd_areas.h b/arch/powerpc/platforms/iseries/vpd_areas.h
deleted file mode 100644
index feb001f3a5fe..000000000000
--- a/arch/powerpc/platforms/iseries/vpd_areas.h
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_VPD_AREAS_H
19#define _ISERIES_VPD_AREAS_H
20
21/*
22 * This file defines the address and length of all of the VPD area passed to
23 * the OS from PLIC (most of which start from the SP).
24 */
25
26#include <asm/types.h>
27
28/* VPD Entry index is carved in stone - cannot be changed (easily). */
29#define ItVpdCecVpd 0
30#define ItVpdDynamicSpace 1
31#define ItVpdExtVpd 2
32#define ItVpdExtVpdOnPanel 3
33#define ItVpdFirstPaca 4
34#define ItVpdIoVpd 5
35#define ItVpdIplParms 6
36#define ItVpdMsVpd 7
37#define ItVpdPanelVpd 8
38#define ItVpdLpNaca 9
39#define ItVpdBackplaneAndMaybeClockCardVpd 10
40#define ItVpdRecoveryLogBuffer 11
41#define ItVpdSpCommArea 12
42#define ItVpdSpLogBuffer 13
43#define ItVpdSpLogBufferSave 14
44#define ItVpdSpCardVpd 15
45#define ItVpdFirstProcVpd 16
46#define ItVpdApModelVpd 17
47#define ItVpdClockCardVpd 18
48#define ItVpdBusExtCardVpd 19
49#define ItVpdProcCapacityVpd 20
50#define ItVpdInteractiveCapacityVpd 21
51#define ItVpdFirstSlotLabel 22
52#define ItVpdFirstLpQueue 23
53#define ItVpdFirstL3CacheVpd 24
54#define ItVpdFirstProcFruVpd 25
55
56#define ItVpdMaxEntries 26
57
58#define ItDmaMaxEntries 10
59
60#define ItVpdAreasMaxSlotLabels 192
61
62
63struct ItVpdAreas {
64 u32 xSlicDesc; // Descriptor 000-003
65 u16 xSlicSize; // Size of this control block 004-005
66 u16 xPlicAdjustVpdLens:1; // Flag to indicate new interface006-007
67 u16 xRsvd1:15; // Reserved bits ...
68 u16 xSlicVpdEntries; // Number of VPD entries 008-009
69 u16 xSlicDmaEntries; // Number of DMA entries 00A-00B
70 u16 xSlicMaxLogicalProcs; // Maximum logical processors 00C-00D
71 u16 xSlicMaxPhysicalProcs; // Maximum physical processors 00E-00F
72 u16 xSlicDmaToksOffset; // Offset into this of array 010-011
73 u16 xSlicVpdAdrsOffset; // Offset into this of array 012-013
74 u16 xSlicDmaLensOffset; // Offset into this of array 014-015
75 u16 xSlicVpdLensOffset; // Offset into this of array 016-017
76 u16 xSlicMaxSlotLabels; // Maximum number of slot labels018-019
77 u16 xSlicMaxLpQueues; // Maximum number of LP Queues 01A-01B
78 u8 xRsvd2[4]; // Reserved 01C-01F
79 u64 xRsvd3[12]; // Reserved 020-07F
80 u32 xPlicDmaLens[ItDmaMaxEntries];// Array of DMA lengths 080-0A7
81 u32 xPlicDmaToks[ItDmaMaxEntries];// Array of DMA tokens 0A8-0CF
82 u32 xSlicVpdLens[ItVpdMaxEntries];// Array of VPD lengths 0D0-12F
83 const void *xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF
84};
85
86extern const struct ItVpdAreas itVpdAreas;
87
88#endif /* _ISERIES_VPD_AREAS_H */
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 401e3f3f74c8..465ee8f5c086 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -620,7 +620,7 @@ void __init maple_pci_init(void)
620 } 620 }
621 621
622 /* Tell pci.c to not change any resource allocations. */ 622 /* Tell pci.c to not change any resource allocations. */
623 pci_probe_only = 1; 623 pci_add_flags(PCI_PROBE_ONLY);
624} 624}
625 625
626int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel) 626int maple_pci_get_legacy_ide_irq(struct pci_dev *pdev, int channel)
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index 0bcbfe7b2c55..3b7545a51aa9 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -262,7 +262,7 @@ static void __init maple_init_IRQ(void)
262 flags |= MPIC_BIG_ENDIAN; 262 flags |= MPIC_BIG_ENDIAN;
263 263
264 /* XXX Maple specific bits */ 264 /* XXX Maple specific bits */
265 flags |= MPIC_U3_HT_IRQS | MPIC_WANTS_RESET; 265 flags |= MPIC_U3_HT_IRQS;
266 /* All U3/U4 are big-endian, older SLOF firmware doesn't encode this */ 266 /* All U3/U4 are big-endian, older SLOF firmware doesn't encode this */
267 flags |= MPIC_BIG_ENDIAN; 267 flags |= MPIC_BIG_ENDIAN;
268 268
diff --git a/arch/powerpc/platforms/pasemi/pci.c b/arch/powerpc/platforms/pasemi/pci.c
index b6a0ec45c695..aa862713258c 100644
--- a/arch/powerpc/platforms/pasemi/pci.c
+++ b/arch/powerpc/platforms/pasemi/pci.c
@@ -229,9 +229,6 @@ void __init pas_pci_init(void)
229 229
230 /* Setup the linkage between OF nodes and PHBs */ 230 /* Setup the linkage between OF nodes and PHBs */
231 pci_devs_phb_init(); 231 pci_devs_phb_init();
232
233 /* Use the common resource allocation mechanism */
234 pci_probe_only = 1;
235} 232}
236 233
237void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset) 234void __iomem *pasemi_pci_getcfgaddr(struct pci_dev *dev, int offset)
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c
index 98b7a7c13176..e777ad471a48 100644
--- a/arch/powerpc/platforms/pasemi/setup.c
+++ b/arch/powerpc/platforms/pasemi/setup.c
@@ -224,7 +224,7 @@ static __init void pas_init_IRQ(void)
224 openpic_addr = of_read_number(opprop, naddr); 224 openpic_addr = of_read_number(opprop, naddr);
225 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr); 225 printk(KERN_DEBUG "OpenPIC addr: %lx\n", openpic_addr);
226 226
227 mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS; 227 mpic_flags = MPIC_LARGE_VECTORS | MPIC_NO_BIAS | MPIC_NO_RESET;
228 228
229 nmiprop = of_get_property(mpic_node, "nmi-source", NULL); 229 nmiprop = of_get_property(mpic_node, "nmi-source", NULL);
230 if (nmiprop) 230 if (nmiprop)
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 54d227127c9f..da18b26dcc6f 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -279,7 +279,7 @@ static u32 core99_check(u8* datas)
279 279
280static int sm_erase_bank(int bank) 280static int sm_erase_bank(int bank)
281{ 281{
282 int stat, i; 282 int stat;
283 unsigned long timeout; 283 unsigned long timeout;
284 284
285 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; 285 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
@@ -301,11 +301,10 @@ static int sm_erase_bank(int bank)
301 out_8(base, SM_FLASH_CMD_CLEAR_STATUS); 301 out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
302 out_8(base, SM_FLASH_CMD_RESET); 302 out_8(base, SM_FLASH_CMD_RESET);
303 303
304 for (i=0; i<NVRAM_SIZE; i++) 304 if (memchr_inv(base, 0xff, NVRAM_SIZE)) {
305 if (base[i] != 0xff) { 305 printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n");
306 printk(KERN_ERR "nvram: Sharp/Micron flash erase failed !\n"); 306 return -ENXIO;
307 return -ENXIO; 307 }
308 }
309 return 0; 308 return 0;
310} 309}
311 310
@@ -336,17 +335,16 @@ static int sm_write_bank(int bank, u8* datas)
336 } 335 }
337 out_8(base, SM_FLASH_CMD_CLEAR_STATUS); 336 out_8(base, SM_FLASH_CMD_CLEAR_STATUS);
338 out_8(base, SM_FLASH_CMD_RESET); 337 out_8(base, SM_FLASH_CMD_RESET);
339 for (i=0; i<NVRAM_SIZE; i++) 338 if (memcmp(base, datas, NVRAM_SIZE)) {
340 if (base[i] != datas[i]) { 339 printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n");
341 printk(KERN_ERR "nvram: Sharp/Micron flash write failed !\n"); 340 return -ENXIO;
342 return -ENXIO; 341 }
343 }
344 return 0; 342 return 0;
345} 343}
346 344
347static int amd_erase_bank(int bank) 345static int amd_erase_bank(int bank)
348{ 346{
349 int i, stat = 0; 347 int stat = 0;
350 unsigned long timeout; 348 unsigned long timeout;
351 349
352 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE; 350 u8 __iomem *base = (u8 __iomem *)nvram_data + core99_bank*NVRAM_SIZE;
@@ -382,12 +380,11 @@ static int amd_erase_bank(int bank)
382 /* Reset */ 380 /* Reset */
383 out_8(base, 0xf0); 381 out_8(base, 0xf0);
384 udelay(1); 382 udelay(1);
385 383
386 for (i=0; i<NVRAM_SIZE; i++) 384 if (memchr_inv(base, 0xff, NVRAM_SIZE)) {
387 if (base[i] != 0xff) { 385 printk(KERN_ERR "nvram: AMD flash erase failed !\n");
388 printk(KERN_ERR "nvram: AMD flash erase failed !\n"); 386 return -ENXIO;
389 return -ENXIO; 387 }
390 }
391 return 0; 388 return 0;
392} 389}
393 390
@@ -429,11 +426,10 @@ static int amd_write_bank(int bank, u8* datas)
429 out_8(base, 0xf0); 426 out_8(base, 0xf0);
430 udelay(1); 427 udelay(1);
431 428
432 for (i=0; i<NVRAM_SIZE; i++) 429 if (memcmp(base, datas, NVRAM_SIZE)) {
433 if (base[i] != datas[i]) { 430 printk(KERN_ERR "nvram: AMD flash write failed !\n");
434 printk(KERN_ERR "nvram: AMD flash write failed !\n"); 431 return -ENXIO;
435 return -ENXIO; 432 }
436 }
437 return 0; 433 return 0;
438} 434}
439 435
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 31a7d3a7ce25..43bbe1bda939 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -1059,9 +1059,6 @@ void __init pmac_pci_init(void)
1059 } 1059 }
1060 /* pmac_check_ht_link(); */ 1060 /* pmac_check_ht_link(); */
1061 1061
1062 /* We can allocate missing resources if any */
1063 pci_probe_only = 0;
1064
1065#else /* CONFIG_PPC64 */ 1062#else /* CONFIG_PPC64 */
1066 init_p2pbridge(); 1063 init_p2pbridge();
1067 init_second_ohare(); 1064 init_second_ohare();
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 7761aabfc293..66ad93de1d55 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -61,7 +61,7 @@ static DEFINE_RAW_SPINLOCK(pmac_pic_lock);
61static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; 61static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
62static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 62static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
63static int pmac_irq_cascade = -1; 63static int pmac_irq_cascade = -1;
64static struct irq_host *pmac_pic_host; 64static struct irq_domain *pmac_pic_host;
65 65
66static void __pmac_retrigger(unsigned int irq_nr) 66static void __pmac_retrigger(unsigned int irq_nr)
67{ 67{
@@ -268,13 +268,13 @@ static struct irqaction gatwick_cascade_action = {
268 .name = "cascade", 268 .name = "cascade",
269}; 269};
270 270
271static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) 271static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node)
272{ 272{
273 /* We match all, we don't always have a node anyway */ 273 /* We match all, we don't always have a node anyway */
274 return 1; 274 return 1;
275} 275}
276 276
277static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, 277static int pmac_pic_host_map(struct irq_domain *h, unsigned int virq,
278 irq_hw_number_t hw) 278 irq_hw_number_t hw)
279{ 279{
280 if (hw >= max_irqs) 280 if (hw >= max_irqs)
@@ -288,21 +288,10 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq,
288 return 0; 288 return 0;
289} 289}
290 290
291static int pmac_pic_host_xlate(struct irq_host *h, struct device_node *ct, 291static const struct irq_domain_ops pmac_pic_host_ops = {
292 const u32 *intspec, unsigned int intsize,
293 irq_hw_number_t *out_hwirq,
294 unsigned int *out_flags)
295
296{
297 *out_flags = IRQ_TYPE_NONE;
298 *out_hwirq = *intspec;
299 return 0;
300}
301
302static struct irq_host_ops pmac_pic_host_ops = {
303 .match = pmac_pic_host_match, 292 .match = pmac_pic_host_match,
304 .map = pmac_pic_host_map, 293 .map = pmac_pic_host_map,
305 .xlate = pmac_pic_host_xlate, 294 .xlate = irq_domain_xlate_onecell,
306}; 295};
307 296
308static void __init pmac_pic_probe_oldstyle(void) 297static void __init pmac_pic_probe_oldstyle(void)
@@ -352,9 +341,8 @@ static void __init pmac_pic_probe_oldstyle(void)
352 /* 341 /*
353 * Allocate an irq host 342 * Allocate an irq host
354 */ 343 */
355 pmac_pic_host = irq_alloc_host(master, IRQ_HOST_MAP_LINEAR, max_irqs, 344 pmac_pic_host = irq_domain_add_linear(master, max_irqs,
356 &pmac_pic_host_ops, 345 &pmac_pic_host_ops, NULL);
357 max_irqs);
358 BUG_ON(pmac_pic_host == NULL); 346 BUG_ON(pmac_pic_host == NULL);
359 irq_set_default_host(pmac_pic_host); 347 irq_set_default_host(pmac_pic_host);
360 348
@@ -469,7 +457,6 @@ static struct mpic * __init pmac_setup_one_mpic(struct device_node *np,
469 457
470 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); 458 pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0);
471 459
472 flags |= MPIC_WANTS_RESET;
473 if (of_get_property(np, "big-endian", NULL)) 460 if (of_get_property(np, "big-endian", NULL))
474 flags |= MPIC_BIG_ENDIAN; 461 flags |= MPIC_BIG_ENDIAN;
475 462
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index 44d769258ebf..a81e5a88fbdf 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -125,7 +125,7 @@ static volatile u32 __iomem *psurge_start;
125static int psurge_type = PSURGE_NONE; 125static int psurge_type = PSURGE_NONE;
126 126
127/* irq for secondary cpus to report */ 127/* irq for secondary cpus to report */
128static struct irq_host *psurge_host; 128static struct irq_domain *psurge_host;
129int psurge_secondary_virq; 129int psurge_secondary_virq;
130 130
131/* 131/*
@@ -176,7 +176,7 @@ static void smp_psurge_cause_ipi(int cpu, unsigned long data)
176 psurge_set_ipi(cpu); 176 psurge_set_ipi(cpu);
177} 177}
178 178
179static int psurge_host_map(struct irq_host *h, unsigned int virq, 179static int psurge_host_map(struct irq_domain *h, unsigned int virq,
180 irq_hw_number_t hw) 180 irq_hw_number_t hw)
181{ 181{
182 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq); 182 irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq);
@@ -184,7 +184,7 @@ static int psurge_host_map(struct irq_host *h, unsigned int virq,
184 return 0; 184 return 0;
185} 185}
186 186
187struct irq_host_ops psurge_host_ops = { 187static const struct irq_domain_ops psurge_host_ops = {
188 .map = psurge_host_map, 188 .map = psurge_host_map,
189}; 189};
190 190
@@ -192,8 +192,7 @@ static int psurge_secondary_ipi_init(void)
192{ 192{
193 int rc = -ENOMEM; 193 int rc = -ENOMEM;
194 194
195 psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, 195 psurge_host = irq_domain_add_nomap(NULL, &psurge_host_ops, NULL);
196 &psurge_host_ops, 0);
197 196
198 if (psurge_host) 197 if (psurge_host)
199 psurge_secondary_virq = irq_create_direct_mapping(psurge_host); 198 psurge_secondary_virq = irq_create_direct_mapping(psurge_host);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index f31162cfdaa9..fbdd74dac3ac 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -204,11 +204,10 @@ static void __devinit pnv_ioda_offset_bus(struct pci_bus *bus,
204 pr_devel(" -> OBR %s [%x] +%016llx\n", 204 pr_devel(" -> OBR %s [%x] +%016llx\n",
205 bus->self ? pci_name(bus->self) : "root", flags, offset); 205 bus->self ? pci_name(bus->self) : "root", flags, offset);
206 206
207 for (i = 0; i < 2; i++) { 207 pci_bus_for_each_resource(bus, r, i) {
208 r = bus->resource[i];
209 if (r && (r->flags & flags)) { 208 if (r && (r->flags & flags)) {
210 bus->resource[i]->start += offset; 209 r->start += offset;
211 bus->resource[i]->end += offset; 210 r->end += offset;
212 } 211 }
213 } 212 }
214 list_for_each_entry(dev, &bus->devices, bus_list) 213 list_for_each_entry(dev, &bus->devices, bus_list)
@@ -288,12 +287,17 @@ static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
288 * assignment algorithm is going to be uber-trivial for now, we 287 * assignment algorithm is going to be uber-trivial for now, we
289 * can try to be smarter later at filling out holes. 288 * can try to be smarter later at filling out holes.
290 */ 289 */
291 start = bus->self ? 0 : bus->resource[bres]->start; 290 if (bus->self) {
292 291 /* No offset for downstream bridges */
293 /* Don't hand out IO 0 */ 292 start = 0;
294 if ((flags & IORESOURCE_IO) && !bus->self) 293 } else {
295 start += 0x1000; 294 /* Offset from the root */
296 295 if (flags & IORESOURCE_IO)
296 /* Don't hand out IO 0 */
297 start = hose->io_resource.start + 0x1000;
298 else
299 start = hose->mem_resources[0].start;
300 }
297 while(!list_empty(&head)) { 301 while(!list_empty(&head)) {
298 w = list_first_entry(&head, struct resource_wrap, link); 302 w = list_first_entry(&head, struct resource_wrap, link);
299 list_del(&w->link); 303 list_del(&w->link);
@@ -321,13 +325,20 @@ static void __devinit pnv_ioda_calc_bus(struct pci_bus *bus, unsigned int flags,
321 empty: 325 empty:
322 /* Only setup P2P's, not the PHB itself */ 326 /* Only setup P2P's, not the PHB itself */
323 if (bus->self) { 327 if (bus->self) {
324 WARN_ON(bus->resource[bres] == NULL); 328 struct resource *res = bus->resource[bres];
325 bus->resource[bres]->start = 0; 329
326 bus->resource[bres]->flags = (*size) ? flags : 0; 330 if (WARN_ON(res == NULL))
327 bus->resource[bres]->end = (*size) ? (*size - 1) : 0; 331 return;
328 332
329 /* Clear prefetch bus resources for now */ 333 /*
330 bus->resource[2]->flags = 0; 334 * FIXME: We should probably export and call
335 * pci_bridge_check_ranges() to properly re-initialize
336 * the PCI portion of the flags here, and to detect
337 * what the bridge actually supports.
338 */
339 res->start = 0;
340 res->flags = (*size) ? flags : 0;
341 res->end = (*size) ? (*size - 1) : 0;
331 } 342 }
332 343
333 pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n", 344 pr_devel("<- CBR %s [%x] *size=%016llx *align=%016llx\n",
@@ -1288,15 +1299,14 @@ void __init pnv_pci_init_ioda1_phb(struct device_node *np)
1288 /* Setup MSI support */ 1299 /* Setup MSI support */
1289 pnv_pci_init_ioda_msis(phb); 1300 pnv_pci_init_ioda_msis(phb);
1290 1301
1291 /* We set both probe_only and PCI_REASSIGN_ALL_RSRC. This is an 1302 /* We set both PCI_PROBE_ONLY and PCI_REASSIGN_ALL_RSRC. This is an
1292 * odd combination which essentially means that we skip all resource 1303 * odd combination which essentially means that we skip all resource
1293 * fixups and assignments in the generic code, and do it all 1304 * fixups and assignments in the generic code, and do it all
1294 * ourselves here 1305 * ourselves here
1295 */ 1306 */
1296 pci_probe_only = 1;
1297 ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb; 1307 ppc_md.pcibios_fixup_phb = pnv_pci_ioda_fixup_phb;
1298 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook; 1308 ppc_md.pcibios_enable_device_hook = pnv_pci_enable_device_hook;
1299 pci_add_flags(PCI_REASSIGN_ALL_RSRC); 1309 pci_add_flags(PCI_PROBE_ONLY | PCI_REASSIGN_ALL_RSRC);
1300 1310
1301 /* Reset IODA tables to a clean state */ 1311 /* Reset IODA tables to a clean state */
1302 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET); 1312 rc = opal_pci_reset(phb_id, OPAL_PCI_IODA_TABLE_RESET, OPAL_ASSERT_RESET);
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index a70bc1e385eb..be3cfc5ceabb 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -31,6 +31,7 @@
31#include <asm/iommu.h> 31#include <asm/iommu.h>
32#include <asm/tce.h> 32#include <asm/tce.h>
33#include <asm/abs_addr.h> 33#include <asm/abs_addr.h>
34#include <asm/firmware.h>
34 35
35#include "powernv.h" 36#include "powernv.h"
36#include "pci.h" 37#include "pci.h"
@@ -52,32 +53,38 @@ static int pnv_msi_check_device(struct pci_dev* pdev, int nvec, int type)
52 53
53static unsigned int pnv_get_one_msi(struct pnv_phb *phb) 54static unsigned int pnv_get_one_msi(struct pnv_phb *phb)
54{ 55{
55 unsigned int id; 56 unsigned long flags;
57 unsigned int id, rc;
58
59 spin_lock_irqsave(&phb->lock, flags);
56 60
57 spin_lock(&phb->lock);
58 id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next); 61 id = find_next_zero_bit(phb->msi_map, phb->msi_count, phb->msi_next);
59 if (id >= phb->msi_count && phb->msi_next) 62 if (id >= phb->msi_count && phb->msi_next)
60 id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0); 63 id = find_next_zero_bit(phb->msi_map, phb->msi_count, 0);
61 if (id >= phb->msi_count) { 64 if (id >= phb->msi_count) {
62 spin_unlock(&phb->lock); 65 rc = 0;
63 return 0; 66 goto out;
64 } 67 }
65 __set_bit(id, phb->msi_map); 68 __set_bit(id, phb->msi_map);
66 spin_unlock(&phb->lock); 69 rc = id + phb->msi_base;
67 return id + phb->msi_base; 70out:
71 spin_unlock_irqrestore(&phb->lock, flags);
72 return rc;
68} 73}
69 74
70static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq) 75static void pnv_put_msi(struct pnv_phb *phb, unsigned int hwirq)
71{ 76{
77 unsigned long flags;
72 unsigned int id; 78 unsigned int id;
73 79
74 if (WARN_ON(hwirq < phb->msi_base || 80 if (WARN_ON(hwirq < phb->msi_base ||
75 hwirq >= (phb->msi_base + phb->msi_count))) 81 hwirq >= (phb->msi_base + phb->msi_count)))
76 return; 82 return;
77 id = hwirq - phb->msi_base; 83 id = hwirq - phb->msi_base;
78 spin_lock(&phb->lock); 84
85 spin_lock_irqsave(&phb->lock, flags);
79 __clear_bit(id, phb->msi_map); 86 __clear_bit(id, phb->msi_map);
80 spin_unlock(&phb->lock); 87 spin_unlock_irqrestore(&phb->lock, flags);
81} 88}
82 89
83static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) 90static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
@@ -555,10 +562,7 @@ void __init pnv_pci_init(void)
555{ 562{
556 struct device_node *np; 563 struct device_node *np;
557 564
558 pci_set_flags(PCI_CAN_SKIP_ISA_ALIGN); 565 pci_add_flags(PCI_CAN_SKIP_ISA_ALIGN);
559
560 /* We do not want to just probe */
561 pci_probe_only = 0;
562 566
563 /* OPAL absent, try POPAL first then RTAS detection of PHBs */ 567 /* OPAL absent, try POPAL first then RTAS detection of PHBs */
564 if (!firmware_has_feature(FW_FEATURE_OPAL)) { 568 if (!firmware_has_feature(FW_FEATURE_OPAL)) {
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 467bd4ac6824..db1ad1c8f68f 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -31,7 +31,6 @@
31#include <asm/xics.h> 31#include <asm/xics.h>
32#include <asm/rtas.h> 32#include <asm/rtas.h>
33#include <asm/opal.h> 33#include <asm/opal.h>
34#include <asm/xics.h>
35 34
36#include "powernv.h" 35#include "powernv.h"
37 36
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 617efa12a3a5..2a4ff86cc21f 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -667,7 +667,7 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd,
667static void dump_bmp(struct ps3_private* pd) {}; 667static void dump_bmp(struct ps3_private* pd) {};
668#endif /* defined(DEBUG) */ 668#endif /* defined(DEBUG) */
669 669
670static int ps3_host_map(struct irq_host *h, unsigned int virq, 670static int ps3_host_map(struct irq_domain *h, unsigned int virq,
671 irq_hw_number_t hwirq) 671 irq_hw_number_t hwirq)
672{ 672{
673 DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, 673 DBG("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq,
@@ -678,13 +678,13 @@ static int ps3_host_map(struct irq_host *h, unsigned int virq,
678 return 0; 678 return 0;
679} 679}
680 680
681static int ps3_host_match(struct irq_host *h, struct device_node *np) 681static int ps3_host_match(struct irq_domain *h, struct device_node *np)
682{ 682{
683 /* Match all */ 683 /* Match all */
684 return 1; 684 return 1;
685} 685}
686 686
687static struct irq_host_ops ps3_host_ops = { 687static const struct irq_domain_ops ps3_host_ops = {
688 .map = ps3_host_map, 688 .map = ps3_host_map,
689 .match = ps3_host_match, 689 .match = ps3_host_match,
690}; 690};
@@ -751,10 +751,9 @@ void __init ps3_init_IRQ(void)
751{ 751{
752 int result; 752 int result;
753 unsigned cpu; 753 unsigned cpu;
754 struct irq_host *host; 754 struct irq_domain *host;
755 755
756 host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, &ps3_host_ops, 756 host = irq_domain_add_nomap(NULL, &ps3_host_ops, NULL);
757 PS3_INVALID_OUTLET);
758 irq_set_default_host(host); 757 irq_set_default_host(host);
759 irq_set_virq_count(PS3_PLUG_MAX + 1); 758 irq_set_virq_count(PS3_PLUG_MAX + 1);
760 759
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index ae7b6d41fed3..aadbe4f6d537 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -3,6 +3,7 @@ config PPC_PSERIES
3 bool "IBM pSeries & new (POWER5-based) iSeries" 3 bool "IBM pSeries & new (POWER5-based) iSeries"
4 select HAVE_PCSPKR_PLATFORM 4 select HAVE_PCSPKR_PLATFORM
5 select MPIC 5 select MPIC
6 select OF_DYNAMIC
6 select PCI_MSI 7 select PCI_MSI
7 select PPC_XICS 8 select PPC_XICS
8 select PPC_ICP_NATIVE 9 select PPC_ICP_NATIVE
@@ -72,7 +73,7 @@ config IO_EVENT_IRQ
72 73
73config LPARCFG 74config LPARCFG
74 bool "LPAR Configuration Data" 75 bool "LPAR Configuration Data"
75 depends on PPC_PSERIES || PPC_ISERIES 76 depends on PPC_PSERIES
76 help 77 help
77 Provide system capacity information via human readable 78 Provide system capacity information via human readable
78 <key word>=<value> pairs through a /proc/ppc64/lparcfg interface. 79 <key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
@@ -122,7 +123,7 @@ config DTL
122 Say N if you are unsure. 123 Say N if you are unsure.
123 124
124config PSERIES_IDLE 125config PSERIES_IDLE
125 tristate "Cpuidle driver for pSeries platforms" 126 bool "Cpuidle driver for pSeries platforms"
126 depends on CPU_IDLE 127 depends on CPU_IDLE
127 depends on PPC_PSERIES 128 depends on PPC_PSERIES
128 default y 129 default y
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index 236db46b4078..c222189f5bb2 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -6,7 +6,8 @@ obj-y := lpar.o hvCall.o nvram.o reconfig.o \
6 firmware.o power.o dlpar.o mobility.o 6 firmware.o power.o dlpar.o mobility.o
7obj-$(CONFIG_SMP) += smp.o 7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_SCANLOG) += scanlog.o 8obj-$(CONFIG_SCANLOG) += scanlog.o
9obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o 9obj-$(CONFIG_EEH) += eeh.o eeh_dev.o eeh_cache.o eeh_driver.o \
10 eeh_event.o eeh_sysfs.o eeh_pseries.o
10obj-$(CONFIG_KEXEC) += kexec.o 11obj-$(CONFIG_KEXEC) += kexec.o
11obj-$(CONFIG_PCI) += pci.o pci_dlpar.o 12obj-$(CONFIG_PCI) += pci.o pci_dlpar.o
12obj-$(CONFIG_PSERIES_MSI) += msi.o 13obj-$(CONFIG_PSERIES_MSI) += msi.o
@@ -18,7 +19,6 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o
18obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 19obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
19obj-$(CONFIG_HVCS) += hvcserver.o 20obj-$(CONFIG_HVCS) += hvcserver.o
20obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o 21obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o
21obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o
22obj-$(CONFIG_CMM) += cmm.o 22obj-$(CONFIG_CMM) += cmm.o
23obj-$(CONFIG_DTL) += dtl.o 23obj-$(CONFIG_DTL) += dtl.o
24obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o 24obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 565869022e3d..309d38ef7322 100644
--- a/arch/powerpc/platforms/pseries/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1,8 +1,8 @@
1/* 1/*
2 * eeh.c
3 * Copyright IBM Corporation 2001, 2005, 2006 2 * Copyright IBM Corporation 2001, 2005, 2006
4 * Copyright Dave Engebretsen & Todd Inglett 2001 3 * Copyright Dave Engebretsen & Todd Inglett 2001
5 * Copyright Linas Vepstas 2005, 2006 4 * Copyright Linas Vepstas 2005, 2006
5 * Copyright 2001-2012 IBM Corporation.
6 * 6 *
7 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/sched.h> /* for init_mm */ 25#include <linux/sched.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <linux/pci.h> 28#include <linux/pci.h>
@@ -86,16 +86,8 @@
86/* Time to wait for a PCI slot to report status, in milliseconds */ 86/* Time to wait for a PCI slot to report status, in milliseconds */
87#define PCI_BUS_RESET_WAIT_MSEC (60*1000) 87#define PCI_BUS_RESET_WAIT_MSEC (60*1000)
88 88
89/* RTAS tokens */ 89/* Platform dependent EEH operations */
90static int ibm_set_eeh_option; 90struct eeh_ops *eeh_ops = NULL;
91static int ibm_set_slot_reset;
92static int ibm_read_slot_reset_state;
93static int ibm_read_slot_reset_state2;
94static int ibm_slot_error_detail;
95static int ibm_get_config_addr_info;
96static int ibm_get_config_addr_info2;
97static int ibm_configure_bridge;
98static int ibm_configure_pe;
99 91
100int eeh_subsystem_enabled; 92int eeh_subsystem_enabled;
101EXPORT_SYMBOL(eeh_subsystem_enabled); 93EXPORT_SYMBOL(eeh_subsystem_enabled);
@@ -103,14 +95,6 @@ EXPORT_SYMBOL(eeh_subsystem_enabled);
103/* Lock to avoid races due to multiple reports of an error */ 95/* Lock to avoid races due to multiple reports of an error */
104static DEFINE_RAW_SPINLOCK(confirm_error_lock); 96static DEFINE_RAW_SPINLOCK(confirm_error_lock);
105 97
106/* Buffer for reporting slot-error-detail rtas calls. Its here
107 * in BSS, and not dynamically alloced, so that it ends up in
108 * RMO where RTAS can access it.
109 */
110static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
111static DEFINE_SPINLOCK(slot_errbuf_lock);
112static int eeh_error_buf_size;
113
114/* Buffer for reporting pci register dumps. Its here in BSS, and 98/* Buffer for reporting pci register dumps. Its here in BSS, and
115 * not dynamically alloced, so that it ends up in RMO where RTAS 99 * not dynamically alloced, so that it ends up in RMO where RTAS
116 * can access it. 100 * can access it.
@@ -118,74 +102,50 @@ static int eeh_error_buf_size;
118#define EEH_PCI_REGS_LOG_LEN 4096 102#define EEH_PCI_REGS_LOG_LEN 4096
119static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN]; 103static unsigned char pci_regs_buf[EEH_PCI_REGS_LOG_LEN];
120 104
121/* System monitoring statistics */ 105/*
122static unsigned long no_device; 106 * The struct is used to maintain the EEH global statistic
123static unsigned long no_dn; 107 * information. Besides, the EEH global statistics will be
124static unsigned long no_cfg_addr; 108 * exported to user space through procfs
125static unsigned long ignored_check; 109 */
126static unsigned long total_mmio_ffs; 110struct eeh_stats {
127static unsigned long false_positives; 111 u64 no_device; /* PCI device not found */
128static unsigned long slot_resets; 112 u64 no_dn; /* OF node not found */
129 113 u64 no_cfg_addr; /* Config address not found */
130#define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE) 114 u64 ignored_check; /* EEH check skipped */
115 u64 total_mmio_ffs; /* Total EEH checks */
116 u64 false_positives; /* Unnecessary EEH checks */
117 u64 slot_resets; /* PE reset */
118};
131 119
132/* --------------------------------------------------------------- */ 120static struct eeh_stats eeh_stats;
133/* Below lies the EEH event infrastructure */
134 121
135static void rtas_slot_error_detail(struct pci_dn *pdn, int severity, 122#define IS_BRIDGE(class_code) (((class_code)<<16) == PCI_BASE_CLASS_BRIDGE)
136 char *driver_log, size_t loglen)
137{
138 int config_addr;
139 unsigned long flags;
140 int rc;
141
142 /* Log the error with the rtas logger */
143 spin_lock_irqsave(&slot_errbuf_lock, flags);
144 memset(slot_errbuf, 0, eeh_error_buf_size);
145
146 /* Use PE configuration address, if present */
147 config_addr = pdn->eeh_config_addr;
148 if (pdn->eeh_pe_config_addr)
149 config_addr = pdn->eeh_pe_config_addr;
150
151 rc = rtas_call(ibm_slot_error_detail,
152 8, 1, NULL, config_addr,
153 BUID_HI(pdn->phb->buid),
154 BUID_LO(pdn->phb->buid),
155 virt_to_phys(driver_log), loglen,
156 virt_to_phys(slot_errbuf),
157 eeh_error_buf_size,
158 severity);
159
160 if (rc == 0)
161 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
162 spin_unlock_irqrestore(&slot_errbuf_lock, flags);
163}
164 123
165/** 124/**
166 * gather_pci_data - copy assorted PCI config space registers to buff 125 * eeh_gather_pci_data - Copy assorted PCI config space registers to buff
167 * @pdn: device to report data for 126 * @edev: device to report data for
168 * @buf: point to buffer in which to log 127 * @buf: point to buffer in which to log
169 * @len: amount of room in buffer 128 * @len: amount of room in buffer
170 * 129 *
171 * This routine captures assorted PCI configuration space data, 130 * This routine captures assorted PCI configuration space data,
172 * and puts them into a buffer for RTAS error logging. 131 * and puts them into a buffer for RTAS error logging.
173 */ 132 */
174static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len) 133static size_t eeh_gather_pci_data(struct eeh_dev *edev, char * buf, size_t len)
175{ 134{
176 struct pci_dev *dev = pdn->pcidev; 135 struct device_node *dn = eeh_dev_to_of_node(edev);
136 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
177 u32 cfg; 137 u32 cfg;
178 int cap, i; 138 int cap, i;
179 int n = 0; 139 int n = 0;
180 140
181 n += scnprintf(buf+n, len-n, "%s\n", pdn->node->full_name); 141 n += scnprintf(buf+n, len-n, "%s\n", dn->full_name);
182 printk(KERN_WARNING "EEH: of node=%s\n", pdn->node->full_name); 142 printk(KERN_WARNING "EEH: of node=%s\n", dn->full_name);
183 143
184 rtas_read_config(pdn, PCI_VENDOR_ID, 4, &cfg); 144 eeh_ops->read_config(dn, PCI_VENDOR_ID, 4, &cfg);
185 n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg); 145 n += scnprintf(buf+n, len-n, "dev/vend:%08x\n", cfg);
186 printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg); 146 printk(KERN_WARNING "EEH: PCI device/vendor: %08x\n", cfg);
187 147
188 rtas_read_config(pdn, PCI_COMMAND, 4, &cfg); 148 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cfg);
189 n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg); 149 n += scnprintf(buf+n, len-n, "cmd/stat:%x\n", cfg);
190 printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg); 150 printk(KERN_WARNING "EEH: PCI cmd/status register: %08x\n", cfg);
191 151
@@ -196,11 +156,11 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
196 156
197 /* Gather bridge-specific registers */ 157 /* Gather bridge-specific registers */
198 if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) { 158 if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
199 rtas_read_config(pdn, PCI_SEC_STATUS, 2, &cfg); 159 eeh_ops->read_config(dn, PCI_SEC_STATUS, 2, &cfg);
200 n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg); 160 n += scnprintf(buf+n, len-n, "sec stat:%x\n", cfg);
201 printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg); 161 printk(KERN_WARNING "EEH: Bridge secondary status: %04x\n", cfg);
202 162
203 rtas_read_config(pdn, PCI_BRIDGE_CONTROL, 2, &cfg); 163 eeh_ops->read_config(dn, PCI_BRIDGE_CONTROL, 2, &cfg);
204 n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg); 164 n += scnprintf(buf+n, len-n, "brdg ctl:%x\n", cfg);
205 printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg); 165 printk(KERN_WARNING "EEH: Bridge control: %04x\n", cfg);
206 } 166 }
@@ -208,11 +168,11 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
208 /* Dump out the PCI-X command and status regs */ 168 /* Dump out the PCI-X command and status regs */
209 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX); 169 cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
210 if (cap) { 170 if (cap) {
211 rtas_read_config(pdn, cap, 4, &cfg); 171 eeh_ops->read_config(dn, cap, 4, &cfg);
212 n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg); 172 n += scnprintf(buf+n, len-n, "pcix-cmd:%x\n", cfg);
213 printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg); 173 printk(KERN_WARNING "EEH: PCI-X cmd: %08x\n", cfg);
214 174
215 rtas_read_config(pdn, cap+4, 4, &cfg); 175 eeh_ops->read_config(dn, cap+4, 4, &cfg);
216 n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg); 176 n += scnprintf(buf+n, len-n, "pcix-stat:%x\n", cfg);
217 printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg); 177 printk(KERN_WARNING "EEH: PCI-X status: %08x\n", cfg);
218 } 178 }
@@ -225,7 +185,7 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
225 "EEH: PCI-E capabilities and status follow:\n"); 185 "EEH: PCI-E capabilities and status follow:\n");
226 186
227 for (i=0; i<=8; i++) { 187 for (i=0; i<=8; i++) {
228 rtas_read_config(pdn, cap+4*i, 4, &cfg); 188 eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
229 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); 189 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
230 printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg); 190 printk(KERN_WARNING "EEH: PCI-E %02x: %08x\n", i, cfg);
231 } 191 }
@@ -237,7 +197,7 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
237 "EEH: PCI-E AER capability register set follows:\n"); 197 "EEH: PCI-E AER capability register set follows:\n");
238 198
239 for (i=0; i<14; i++) { 199 for (i=0; i<14; i++) {
240 rtas_read_config(pdn, cap+4*i, 4, &cfg); 200 eeh_ops->read_config(dn, cap+4*i, 4, &cfg);
241 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg); 201 n += scnprintf(buf+n, len-n, "%02x:%x\n", 4*i, cfg);
242 printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg); 202 printk(KERN_WARNING "EEH: PCI-E AER %02x: %08x\n", i, cfg);
243 } 203 }
@@ -246,111 +206,46 @@ static size_t gather_pci_data(struct pci_dn *pdn, char * buf, size_t len)
246 206
247 /* Gather status on devices under the bridge */ 207 /* Gather status on devices under the bridge */
248 if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) { 208 if (dev->class >> 16 == PCI_BASE_CLASS_BRIDGE) {
249 struct device_node *dn; 209 struct device_node *child;
250 210
251 for_each_child_of_node(pdn->node, dn) { 211 for_each_child_of_node(dn, child) {
252 pdn = PCI_DN(dn); 212 if (of_node_to_eeh_dev(child))
253 if (pdn) 213 n += eeh_gather_pci_data(of_node_to_eeh_dev(child), buf+n, len-n);
254 n += gather_pci_data(pdn, buf+n, len-n);
255 } 214 }
256 } 215 }
257 216
258 return n; 217 return n;
259} 218}
260 219
261void eeh_slot_error_detail(struct pci_dn *pdn, int severity)
262{
263 size_t loglen = 0;
264 pci_regs_buf[0] = 0;
265
266 rtas_pci_enable(pdn, EEH_THAW_MMIO);
267 rtas_configure_bridge(pdn);
268 eeh_restore_bars(pdn);
269 loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
270
271 rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen);
272}
273
274/**
275 * read_slot_reset_state - Read the reset state of a device node's slot
276 * @dn: device node to read
277 * @rets: array to return results in
278 */
279static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
280{
281 int token, outputs;
282 int config_addr;
283
284 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
285 token = ibm_read_slot_reset_state2;
286 outputs = 4;
287 } else {
288 token = ibm_read_slot_reset_state;
289 rets[2] = 0; /* fake PE Unavailable info */
290 outputs = 3;
291 }
292
293 /* Use PE configuration address, if present */
294 config_addr = pdn->eeh_config_addr;
295 if (pdn->eeh_pe_config_addr)
296 config_addr = pdn->eeh_pe_config_addr;
297
298 return rtas_call(token, 3, outputs, rets, config_addr,
299 BUID_HI(pdn->phb->buid), BUID_LO(pdn->phb->buid));
300}
301
302/** 220/**
303 * eeh_wait_for_slot_status - returns error status of slot 221 * eeh_slot_error_detail - Generate combined log including driver log and error log
304 * @pdn pci device node 222 * @edev: device to report error log for
305 * @max_wait_msecs maximum number to millisecs to wait 223 * @severity: temporary or permanent error log
306 * 224 *
307 * Return negative value if a permanent error, else return 225 * This routine should be called to generate the combined log, which
308 * Partition Endpoint (PE) status value. 226 * is comprised of driver log and error log. The driver log is figured
309 * 227 * out from the config space of the corresponding PCI device, while
310 * If @max_wait_msecs is positive, then this routine will 228 * the error log is fetched through platform dependent function call.
311 * sleep until a valid status can be obtained, or until
312 * the max allowed wait time is exceeded, in which case
313 * a -2 is returned.
314 */ 229 */
315int 230void eeh_slot_error_detail(struct eeh_dev *edev, int severity)
316eeh_wait_for_slot_status(struct pci_dn *pdn, int max_wait_msecs)
317{ 231{
318 int rc; 232 size_t loglen = 0;
319 int rets[3]; 233 pci_regs_buf[0] = 0;
320 int mwait;
321
322 while (1) {
323 rc = read_slot_reset_state(pdn, rets);
324 if (rc) return rc;
325 if (rets[1] == 0) return -1; /* EEH is not supported */
326
327 if (rets[0] != 5) return rets[0]; /* return actual status */
328
329 if (rets[2] == 0) return -1; /* permanently unavailable */
330 234
331 if (max_wait_msecs <= 0) break; 235 eeh_pci_enable(edev, EEH_OPT_THAW_MMIO);
236 eeh_ops->configure_bridge(eeh_dev_to_of_node(edev));
237 eeh_restore_bars(edev);
238 loglen = eeh_gather_pci_data(edev, pci_regs_buf, EEH_PCI_REGS_LOG_LEN);
332 239
333 mwait = rets[2]; 240 eeh_ops->get_log(eeh_dev_to_of_node(edev), severity, pci_regs_buf, loglen);
334 if (mwait <= 0) {
335 printk (KERN_WARNING
336 "EEH: Firmware returned bad wait value=%d\n", mwait);
337 mwait = 1000;
338 } else if (mwait > 300*1000) {
339 printk (KERN_WARNING
340 "EEH: Firmware is taking too long, time=%d\n", mwait);
341 mwait = 300*1000;
342 }
343 max_wait_msecs -= mwait;
344 msleep (mwait);
345 }
346
347 printk(KERN_WARNING "EEH: Timed out waiting for slot status\n");
348 return -2;
349} 241}
350 242
351/** 243/**
352 * eeh_token_to_phys - convert EEH address token to phys address 244 * eeh_token_to_phys - Convert EEH address token to phys address
353 * @token i/o token, should be address in the form 0xA.... 245 * @token: I/O token, should be address in the form 0xA....
246 *
247 * This routine should be called to convert virtual I/O address
248 * to physical one.
354 */ 249 */
355static inline unsigned long eeh_token_to_phys(unsigned long token) 250static inline unsigned long eeh_token_to_phys(unsigned long token)
356{ 251{
@@ -365,36 +260,43 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
365 return pa | (token & (PAGE_SIZE-1)); 260 return pa | (token & (PAGE_SIZE-1));
366} 261}
367 262
368/** 263/**
369 * Return the "partitionable endpoint" (pe) under which this device lies 264 * eeh_find_device_pe - Retrieve the PE for the given device
265 * @dn: device node
266 *
267 * Return the PE under which this device lies
370 */ 268 */
371struct device_node * find_device_pe(struct device_node *dn) 269struct device_node *eeh_find_device_pe(struct device_node *dn)
372{ 270{
373 while ((dn->parent) && PCI_DN(dn->parent) && 271 while (dn->parent && of_node_to_eeh_dev(dn->parent) &&
374 (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) { 272 (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
375 dn = dn->parent; 273 dn = dn->parent;
376 } 274 }
377 return dn; 275 return dn;
378} 276}
379 277
380/** Mark all devices that are children of this device as failed. 278/**
381 * Mark the device driver too, so that it can see the failure 279 * __eeh_mark_slot - Mark all child devices as failed
382 * immediately; this is critical, since some drivers poll 280 * @parent: parent device
383 * status registers in interrupts ... If a driver is polling, 281 * @mode_flag: failure flag
384 * and the slot is frozen, then the driver can deadlock in 282 *
385 * an interrupt context, which is bad. 283 * Mark all devices that are children of this device as failed.
284 * Mark the device driver too, so that it can see the failure
285 * immediately; this is critical, since some drivers poll
286 * status registers in interrupts ... If a driver is polling,
287 * and the slot is frozen, then the driver can deadlock in
288 * an interrupt context, which is bad.
386 */ 289 */
387
388static void __eeh_mark_slot(struct device_node *parent, int mode_flag) 290static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
389{ 291{
390 struct device_node *dn; 292 struct device_node *dn;
391 293
392 for_each_child_of_node(parent, dn) { 294 for_each_child_of_node(parent, dn) {
393 if (PCI_DN(dn)) { 295 if (of_node_to_eeh_dev(dn)) {
394 /* Mark the pci device driver too */ 296 /* Mark the pci device driver too */
395 struct pci_dev *dev = PCI_DN(dn)->pcidev; 297 struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
396 298
397 PCI_DN(dn)->eeh_mode |= mode_flag; 299 of_node_to_eeh_dev(dn)->mode |= mode_flag;
398 300
399 if (dev && dev->driver) 301 if (dev && dev->driver)
400 dev->error_state = pci_channel_io_frozen; 302 dev->error_state = pci_channel_io_frozen;
@@ -404,92 +306,81 @@ static void __eeh_mark_slot(struct device_node *parent, int mode_flag)
404 } 306 }
405} 307}
406 308
407void eeh_mark_slot (struct device_node *dn, int mode_flag) 309/**
310 * eeh_mark_slot - Mark the indicated device and its children as failed
311 * @dn: parent device
312 * @mode_flag: failure flag
313 *
314 * Mark the indicated device and its child devices as failed.
315 * The device drivers are marked as failed as well.
316 */
317void eeh_mark_slot(struct device_node *dn, int mode_flag)
408{ 318{
409 struct pci_dev *dev; 319 struct pci_dev *dev;
410 dn = find_device_pe (dn); 320 dn = eeh_find_device_pe(dn);
411 321
412 /* Back up one, since config addrs might be shared */ 322 /* Back up one, since config addrs might be shared */
413 if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) 323 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
414 dn = dn->parent; 324 dn = dn->parent;
415 325
416 PCI_DN(dn)->eeh_mode |= mode_flag; 326 of_node_to_eeh_dev(dn)->mode |= mode_flag;
417 327
418 /* Mark the pci device too */ 328 /* Mark the pci device too */
419 dev = PCI_DN(dn)->pcidev; 329 dev = of_node_to_eeh_dev(dn)->pdev;
420 if (dev) 330 if (dev)
421 dev->error_state = pci_channel_io_frozen; 331 dev->error_state = pci_channel_io_frozen;
422 332
423 __eeh_mark_slot(dn, mode_flag); 333 __eeh_mark_slot(dn, mode_flag);
424} 334}
425 335
336/**
337 * __eeh_clear_slot - Clear failure flag for the child devices
338 * @parent: parent device
339 * @mode_flag: flag to be cleared
340 *
341 * Clear failure flag for the child devices.
342 */
426static void __eeh_clear_slot(struct device_node *parent, int mode_flag) 343static void __eeh_clear_slot(struct device_node *parent, int mode_flag)
427{ 344{
428 struct device_node *dn; 345 struct device_node *dn;
429 346
430 for_each_child_of_node(parent, dn) { 347 for_each_child_of_node(parent, dn) {
431 if (PCI_DN(dn)) { 348 if (of_node_to_eeh_dev(dn)) {
432 PCI_DN(dn)->eeh_mode &= ~mode_flag; 349 of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
433 PCI_DN(dn)->eeh_check_count = 0; 350 of_node_to_eeh_dev(dn)->check_count = 0;
434 __eeh_clear_slot(dn, mode_flag); 351 __eeh_clear_slot(dn, mode_flag);
435 } 352 }
436 } 353 }
437} 354}
438 355
439void eeh_clear_slot (struct device_node *dn, int mode_flag) 356/**
357 * eeh_clear_slot - Clear failure flag for the indicated device and its children
358 * @dn: parent device
359 * @mode_flag: flag to be cleared
360 *
361 * Clear failure flag for the indicated device and its children.
362 */
363void eeh_clear_slot(struct device_node *dn, int mode_flag)
440{ 364{
441 unsigned long flags; 365 unsigned long flags;
442 raw_spin_lock_irqsave(&confirm_error_lock, flags); 366 raw_spin_lock_irqsave(&confirm_error_lock, flags);
443 367
444 dn = find_device_pe (dn); 368 dn = eeh_find_device_pe(dn);
445 369
446 /* Back up one, since config addrs might be shared */ 370 /* Back up one, since config addrs might be shared */
447 if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) 371 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
448 dn = dn->parent; 372 dn = dn->parent;
449 373
450 PCI_DN(dn)->eeh_mode &= ~mode_flag; 374 of_node_to_eeh_dev(dn)->mode &= ~mode_flag;
451 PCI_DN(dn)->eeh_check_count = 0; 375 of_node_to_eeh_dev(dn)->check_count = 0;
452 __eeh_clear_slot(dn, mode_flag); 376 __eeh_clear_slot(dn, mode_flag);
453 raw_spin_unlock_irqrestore(&confirm_error_lock, flags); 377 raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
454} 378}
455 379
456void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
457{
458 struct device_node *dn;
459
460 for_each_child_of_node(parent, dn) {
461 if (PCI_DN(dn)) {
462
463 struct pci_dev *dev = PCI_DN(dn)->pcidev;
464
465 if (dev && dev->driver)
466 *freset |= dev->needs_freset;
467
468 __eeh_set_pe_freset(dn, freset);
469 }
470 }
471}
472
473void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
474{
475 struct pci_dev *dev;
476 dn = find_device_pe(dn);
477
478 /* Back up one, since config addrs might be shared */
479 if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent))
480 dn = dn->parent;
481
482 dev = PCI_DN(dn)->pcidev;
483 if (dev)
484 *freset |= dev->needs_freset;
485
486 __eeh_set_pe_freset(dn, freset);
487}
488
489/** 380/**
490 * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze 381 * eeh_dn_check_failure - Check if all 1's data is due to EEH slot freeze
491 * @dn device node 382 * @dn: device node
492 * @dev pci device, if known 383 * @dev: pci device, if known
493 * 384 *
494 * Check for an EEH failure for the given device node. Call this 385 * Check for an EEH failure for the given device node. Call this
495 * routine if the result of a read was all 0xff's and you want to 386 * routine if the result of a read was all 0xff's and you want to
@@ -504,35 +395,34 @@ void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
504int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev) 395int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
505{ 396{
506 int ret; 397 int ret;
507 int rets[3];
508 unsigned long flags; 398 unsigned long flags;
509 struct pci_dn *pdn; 399 struct eeh_dev *edev;
510 int rc = 0; 400 int rc = 0;
511 const char *location; 401 const char *location;
512 402
513 total_mmio_ffs++; 403 eeh_stats.total_mmio_ffs++;
514 404
515 if (!eeh_subsystem_enabled) 405 if (!eeh_subsystem_enabled)
516 return 0; 406 return 0;
517 407
518 if (!dn) { 408 if (!dn) {
519 no_dn++; 409 eeh_stats.no_dn++;
520 return 0; 410 return 0;
521 } 411 }
522 dn = find_device_pe(dn); 412 dn = eeh_find_device_pe(dn);
523 pdn = PCI_DN(dn); 413 edev = of_node_to_eeh_dev(dn);
524 414
525 /* Access to IO BARs might get this far and still not want checking. */ 415 /* Access to IO BARs might get this far and still not want checking. */
526 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || 416 if (!(edev->mode & EEH_MODE_SUPPORTED) ||
527 pdn->eeh_mode & EEH_MODE_NOCHECK) { 417 edev->mode & EEH_MODE_NOCHECK) {
528 ignored_check++; 418 eeh_stats.ignored_check++;
529 pr_debug("EEH: Ignored check (%x) for %s %s\n", 419 pr_debug("EEH: Ignored check (%x) for %s %s\n",
530 pdn->eeh_mode, eeh_pci_name(dev), dn->full_name); 420 edev->mode, eeh_pci_name(dev), dn->full_name);
531 return 0; 421 return 0;
532 } 422 }
533 423
534 if (!pdn->eeh_config_addr && !pdn->eeh_pe_config_addr) { 424 if (!edev->config_addr && !edev->pe_config_addr) {
535 no_cfg_addr++; 425 eeh_stats.no_cfg_addr++;
536 return 0; 426 return 0;
537 } 427 }
538 428
@@ -544,16 +434,16 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
544 */ 434 */
545 raw_spin_lock_irqsave(&confirm_error_lock, flags); 435 raw_spin_lock_irqsave(&confirm_error_lock, flags);
546 rc = 1; 436 rc = 1;
547 if (pdn->eeh_mode & EEH_MODE_ISOLATED) { 437 if (edev->mode & EEH_MODE_ISOLATED) {
548 pdn->eeh_check_count ++; 438 edev->check_count++;
549 if (pdn->eeh_check_count % EEH_MAX_FAILS == 0) { 439 if (edev->check_count % EEH_MAX_FAILS == 0) {
550 location = of_get_property(dn, "ibm,loc-code", NULL); 440 location = of_get_property(dn, "ibm,loc-code", NULL);
551 printk (KERN_ERR "EEH: %d reads ignored for recovering device at " 441 printk(KERN_ERR "EEH: %d reads ignored for recovering device at "
552 "location=%s driver=%s pci addr=%s\n", 442 "location=%s driver=%s pci addr=%s\n",
553 pdn->eeh_check_count, location, 443 edev->check_count, location,
554 dev->driver->name, eeh_pci_name(dev)); 444 eeh_driver_name(dev), eeh_pci_name(dev));
555 printk (KERN_ERR "EEH: Might be infinite loop in %s driver\n", 445 printk(KERN_ERR "EEH: Might be infinite loop in %s driver\n",
556 dev->driver->name); 446 eeh_driver_name(dev));
557 dump_stack(); 447 dump_stack();
558 } 448 }
559 goto dn_unlock; 449 goto dn_unlock;
@@ -566,58 +456,39 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
566 * function zero of a multi-function device. 456 * function zero of a multi-function device.
567 * In any case they must share a common PHB. 457 * In any case they must share a common PHB.
568 */ 458 */
569 ret = read_slot_reset_state(pdn, rets); 459 ret = eeh_ops->get_state(dn, NULL);
570
571 /* If the call to firmware failed, punt */
572 if (ret != 0) {
573 printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
574 ret, dn->full_name);
575 false_positives++;
576 pdn->eeh_false_positives ++;
577 rc = 0;
578 goto dn_unlock;
579 }
580 460
581 /* Note that config-io to empty slots may fail; 461 /* Note that config-io to empty slots may fail;
582 * they are empty when they don't have children. */ 462 * they are empty when they don't have children.
583 if ((rets[0] == 5) && (rets[2] == 0) && (dn->child == NULL)) { 463 * We will punt with the following conditions: Failure to get
584 false_positives++; 464 * PE's state, EEH not support and Permanently unavailable
585 pdn->eeh_false_positives ++; 465 * state, PE is in good state.
586 rc = 0; 466 */
587 goto dn_unlock; 467 if ((ret < 0) ||
588 } 468 (ret == EEH_STATE_NOT_SUPPORT) ||
589 469 (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) ==
590 /* If EEH is not supported on this device, punt. */ 470 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
591 if (rets[1] != 1) { 471 eeh_stats.false_positives++;
592 printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n", 472 edev->false_positives ++;
593 ret, dn->full_name);
594 false_positives++;
595 pdn->eeh_false_positives ++;
596 rc = 0;
597 goto dn_unlock;
598 }
599
600 /* If not the kind of error we know about, punt. */
601 if (rets[0] != 1 && rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
602 false_positives++;
603 pdn->eeh_false_positives ++;
604 rc = 0; 473 rc = 0;
605 goto dn_unlock; 474 goto dn_unlock;
606 } 475 }
607 476
608 slot_resets++; 477 eeh_stats.slot_resets++;
609 478
610 /* Avoid repeated reports of this failure, including problems 479 /* Avoid repeated reports of this failure, including problems
611 * with other functions on this device, and functions under 480 * with other functions on this device, and functions under
612 * bridges. */ 481 * bridges.
613 eeh_mark_slot (dn, EEH_MODE_ISOLATED); 482 */
483 eeh_mark_slot(dn, EEH_MODE_ISOLATED);
614 raw_spin_unlock_irqrestore(&confirm_error_lock, flags); 484 raw_spin_unlock_irqrestore(&confirm_error_lock, flags);
615 485
616 eeh_send_failure_event (dn, dev); 486 eeh_send_failure_event(edev);
617 487
618 /* Most EEH events are due to device driver bugs. Having 488 /* Most EEH events are due to device driver bugs. Having
619 * a stack trace will help the device-driver authors figure 489 * a stack trace will help the device-driver authors figure
620 * out what happened. So print that out. */ 490 * out what happened. So print that out.
491 */
621 dump_stack(); 492 dump_stack();
622 return 1; 493 return 1;
623 494
@@ -629,9 +500,9 @@ dn_unlock:
629EXPORT_SYMBOL_GPL(eeh_dn_check_failure); 500EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
630 501
631/** 502/**
632 * eeh_check_failure - check if all 1's data is due to EEH slot freeze 503 * eeh_check_failure - Check if all 1's data is due to EEH slot freeze
633 * @token i/o token, should be address in the form 0xA.... 504 * @token: I/O token, should be address in the form 0xA....
634 * @val value, should be all 1's (XXX why do we need this arg??) 505 * @val: value, should be all 1's (XXX why do we need this arg??)
635 * 506 *
636 * Check for an EEH failure at the given token address. Call this 507 * Check for an EEH failure at the given token address. Call this
637 * routine if the result of a read was all 0xff's and you want to 508 * routine if the result of a read was all 0xff's and you want to
@@ -648,14 +519,14 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
648 519
649 /* Finding the phys addr + pci device; this is pretty quick. */ 520 /* Finding the phys addr + pci device; this is pretty quick. */
650 addr = eeh_token_to_phys((unsigned long __force) token); 521 addr = eeh_token_to_phys((unsigned long __force) token);
651 dev = pci_get_device_by_addr(addr); 522 dev = pci_addr_cache_get_device(addr);
652 if (!dev) { 523 if (!dev) {
653 no_device++; 524 eeh_stats.no_device++;
654 return val; 525 return val;
655 } 526 }
656 527
657 dn = pci_device_to_OF_node(dev); 528 dn = pci_device_to_OF_node(dev);
658 eeh_dn_check_failure (dn, dev); 529 eeh_dn_check_failure(dn, dev);
659 530
660 pci_dev_put(dev); 531 pci_dev_put(dev);
661 return val; 532 return val;
@@ -663,115 +534,54 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
663 534
664EXPORT_SYMBOL(eeh_check_failure); 535EXPORT_SYMBOL(eeh_check_failure);
665 536
666/* ------------------------------------------------------------- */
667/* The code below deals with error recovery */
668 537
669/** 538/**
670 * rtas_pci_enable - enable MMIO or DMA transfers for this slot 539 * eeh_pci_enable - Enable MMIO or DMA transfers for this slot
671 * @pdn pci device node 540 * @edev: pci device node
541 *
542 * This routine should be called to reenable frozen MMIO or DMA
543 * so that it would work correctly again. It's useful while doing
544 * recovery or log collection on the indicated device.
672 */ 545 */
673 546int eeh_pci_enable(struct eeh_dev *edev, int function)
674int
675rtas_pci_enable(struct pci_dn *pdn, int function)
676{ 547{
677 int config_addr;
678 int rc; 548 int rc;
549 struct device_node *dn = eeh_dev_to_of_node(edev);
679 550
680 /* Use PE configuration address, if present */ 551 rc = eeh_ops->set_option(dn, function);
681 config_addr = pdn->eeh_config_addr;
682 if (pdn->eeh_pe_config_addr)
683 config_addr = pdn->eeh_pe_config_addr;
684
685 rc = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
686 config_addr,
687 BUID_HI(pdn->phb->buid),
688 BUID_LO(pdn->phb->buid),
689 function);
690
691 if (rc) 552 if (rc)
692 printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n", 553 printk(KERN_WARNING "EEH: Unexpected state change %d, err=%d dn=%s\n",
693 function, rc, pdn->node->full_name); 554 function, rc, dn->full_name);
694 555
695 rc = eeh_wait_for_slot_status (pdn, PCI_BUS_RESET_WAIT_MSEC); 556 rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC);
696 if ((rc == 4) && (function == EEH_THAW_MMIO)) 557 if (rc > 0 && (rc & EEH_STATE_MMIO_ENABLED) &&
558 (function == EEH_OPT_THAW_MMIO))
697 return 0; 559 return 0;
698 560
699 return rc; 561 return rc;
700} 562}
701 563
702/** 564/**
703 * rtas_pci_slot_reset - raises/lowers the pci #RST line
704 * @pdn pci device node
705 * @state: 1/0 to raise/lower the #RST
706 *
707 * Clear the EEH-frozen condition on a slot. This routine
708 * asserts the PCI #RST line if the 'state' argument is '1',
709 * and drops the #RST line if 'state is '0'. This routine is
710 * safe to call in an interrupt context.
711 *
712 */
713
714static void
715rtas_pci_slot_reset(struct pci_dn *pdn, int state)
716{
717 int config_addr;
718 int rc;
719
720 BUG_ON (pdn==NULL);
721
722 if (!pdn->phb) {
723 printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n",
724 pdn->node->full_name);
725 return;
726 }
727
728 /* Use PE configuration address, if present */
729 config_addr = pdn->eeh_config_addr;
730 if (pdn->eeh_pe_config_addr)
731 config_addr = pdn->eeh_pe_config_addr;
732
733 rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
734 config_addr,
735 BUID_HI(pdn->phb->buid),
736 BUID_LO(pdn->phb->buid),
737 state);
738
739 /* Fundamental-reset not supported on this PE, try hot-reset */
740 if (rc == -8 && state == 3) {
741 rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
742 config_addr,
743 BUID_HI(pdn->phb->buid),
744 BUID_LO(pdn->phb->buid), 1);
745 if (rc)
746 printk(KERN_WARNING
747 "EEH: Unable to reset the failed slot,"
748 " #RST=%d dn=%s\n",
749 rc, pdn->node->full_name);
750 }
751}
752
753/**
754 * pcibios_set_pcie_slot_reset - Set PCI-E reset state 565 * pcibios_set_pcie_slot_reset - Set PCI-E reset state
755 * @dev: pci device struct 566 * @dev: pci device struct
756 * @state: reset state to enter 567 * @state: reset state to enter
757 * 568 *
758 * Return value: 569 * Return value:
759 * 0 if success 570 * 0 if success
760 **/ 571 */
761int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state) 572int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state state)
762{ 573{
763 struct device_node *dn = pci_device_to_OF_node(dev); 574 struct device_node *dn = pci_device_to_OF_node(dev);
764 struct pci_dn *pdn = PCI_DN(dn);
765 575
766 switch (state) { 576 switch (state) {
767 case pcie_deassert_reset: 577 case pcie_deassert_reset:
768 rtas_pci_slot_reset(pdn, 0); 578 eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
769 break; 579 break;
770 case pcie_hot_reset: 580 case pcie_hot_reset:
771 rtas_pci_slot_reset(pdn, 1); 581 eeh_ops->reset(dn, EEH_RESET_HOT);
772 break; 582 break;
773 case pcie_warm_reset: 583 case pcie_warm_reset:
774 rtas_pci_slot_reset(pdn, 3); 584 eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
775 break; 585 break;
776 default: 586 default:
777 return -EINVAL; 587 return -EINVAL;
@@ -781,13 +591,66 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
781} 591}
782 592
783/** 593/**
784 * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second 594 * __eeh_set_pe_freset - Check the required reset for child devices
785 * @pdn: pci device node to be reset. 595 * @parent: parent device
596 * @freset: return value
597 *
598 * Each device might have its preferred reset type: fundamental or
599 * hot reset. The routine is used to collect the information from
600 * the child devices so that they could be reset accordingly.
601 */
602void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset)
603{
604 struct device_node *dn;
605
606 for_each_child_of_node(parent, dn) {
607 if (of_node_to_eeh_dev(dn)) {
608 struct pci_dev *dev = of_node_to_eeh_dev(dn)->pdev;
609
610 if (dev && dev->driver)
611 *freset |= dev->needs_freset;
612
613 __eeh_set_pe_freset(dn, freset);
614 }
615 }
616}
617
618/**
619 * eeh_set_pe_freset - Check the required reset for the indicated device and its children
620 * @dn: parent device
621 * @freset: return value
622 *
623 * Each device might have its preferred reset type: fundamental or
624 * hot reset. The routine is used to collected the information for
625 * the indicated device and its children so that the bunch of the
626 * devices could be reset properly.
786 */ 627 */
628void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset)
629{
630 struct pci_dev *dev;
631 dn = eeh_find_device_pe(dn);
787 632
788static void __rtas_set_slot_reset(struct pci_dn *pdn) 633 /* Back up one, since config addrs might be shared */
634 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
635 dn = dn->parent;
636
637 dev = of_node_to_eeh_dev(dn)->pdev;
638 if (dev)
639 *freset |= dev->needs_freset;
640
641 __eeh_set_pe_freset(dn, freset);
642}
643
644/**
645 * eeh_reset_pe_once - Assert the pci #RST line for 1/4 second
646 * @edev: pci device node to be reset.
647 *
648 * Assert the PCI #RST line for 1/4 second.
649 */
650static void eeh_reset_pe_once(struct eeh_dev *edev)
789{ 651{
790 unsigned int freset = 0; 652 unsigned int freset = 0;
653 struct device_node *dn = eeh_dev_to_of_node(edev);
791 654
792 /* Determine type of EEH reset required for 655 /* Determine type of EEH reset required for
793 * Partitionable Endpoint, a hot-reset (1) 656 * Partitionable Endpoint, a hot-reset (1)
@@ -795,58 +658,68 @@ static void __rtas_set_slot_reset(struct pci_dn *pdn)
795 * A fundamental reset required by any device under 658 * A fundamental reset required by any device under
796 * Partitionable Endpoint trumps hot-reset. 659 * Partitionable Endpoint trumps hot-reset.
797 */ 660 */
798 eeh_set_pe_freset(pdn->node, &freset); 661 eeh_set_pe_freset(dn, &freset);
799 662
800 if (freset) 663 if (freset)
801 rtas_pci_slot_reset(pdn, 3); 664 eeh_ops->reset(dn, EEH_RESET_FUNDAMENTAL);
802 else 665 else
803 rtas_pci_slot_reset(pdn, 1); 666 eeh_ops->reset(dn, EEH_RESET_HOT);
804 667
805 /* The PCI bus requires that the reset be held high for at least 668 /* The PCI bus requires that the reset be held high for at least
806 * a 100 milliseconds. We wait a bit longer 'just in case'. */ 669 * a 100 milliseconds. We wait a bit longer 'just in case'.
807 670 */
808#define PCI_BUS_RST_HOLD_TIME_MSEC 250 671#define PCI_BUS_RST_HOLD_TIME_MSEC 250
809 msleep (PCI_BUS_RST_HOLD_TIME_MSEC); 672 msleep(PCI_BUS_RST_HOLD_TIME_MSEC);
810 673
811 /* We might get hit with another EEH freeze as soon as the 674 /* We might get hit with another EEH freeze as soon as the
812 * pci slot reset line is dropped. Make sure we don't miss 675 * pci slot reset line is dropped. Make sure we don't miss
813 * these, and clear the flag now. */ 676 * these, and clear the flag now.
814 eeh_clear_slot (pdn->node, EEH_MODE_ISOLATED); 677 */
678 eeh_clear_slot(dn, EEH_MODE_ISOLATED);
815 679
816 rtas_pci_slot_reset (pdn, 0); 680 eeh_ops->reset(dn, EEH_RESET_DEACTIVATE);
817 681
818 /* After a PCI slot has been reset, the PCI Express spec requires 682 /* After a PCI slot has been reset, the PCI Express spec requires
819 * a 1.5 second idle time for the bus to stabilize, before starting 683 * a 1.5 second idle time for the bus to stabilize, before starting
820 * up traffic. */ 684 * up traffic.
685 */
821#define PCI_BUS_SETTLE_TIME_MSEC 1800 686#define PCI_BUS_SETTLE_TIME_MSEC 1800
822 msleep (PCI_BUS_SETTLE_TIME_MSEC); 687 msleep(PCI_BUS_SETTLE_TIME_MSEC);
823} 688}
824 689
825int rtas_set_slot_reset(struct pci_dn *pdn) 690/**
691 * eeh_reset_pe - Reset the indicated PE
692 * @edev: PCI device associated EEH device
693 *
694 * This routine should be called to reset indicated device, including
695 * PE. A PE might include multiple PCI devices and sometimes PCI bridges
696 * might be involved as well.
697 */
698int eeh_reset_pe(struct eeh_dev *edev)
826{ 699{
827 int i, rc; 700 int i, rc;
701 struct device_node *dn = eeh_dev_to_of_node(edev);
828 702
829 /* Take three shots at resetting the bus */ 703 /* Take three shots at resetting the bus */
830 for (i=0; i<3; i++) { 704 for (i=0; i<3; i++) {
831 __rtas_set_slot_reset(pdn); 705 eeh_reset_pe_once(edev);
832 706
833 rc = eeh_wait_for_slot_status(pdn, PCI_BUS_RESET_WAIT_MSEC); 707 rc = eeh_ops->wait_state(dn, PCI_BUS_RESET_WAIT_MSEC);
834 if (rc == 0) 708 if (rc == (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE))
835 return 0; 709 return 0;
836 710
837 if (rc < 0) { 711 if (rc < 0) {
838 printk(KERN_ERR "EEH: unrecoverable slot failure %s\n", 712 printk(KERN_ERR "EEH: unrecoverable slot failure %s\n",
839 pdn->node->full_name); 713 dn->full_name);
840 return -1; 714 return -1;
841 } 715 }
842 printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n", 716 printk(KERN_ERR "EEH: bus reset %d failed on slot %s, rc=%d\n",
843 i+1, pdn->node->full_name, rc); 717 i+1, dn->full_name, rc);
844 } 718 }
845 719
846 return -1; 720 return -1;
847} 721}
848 722
849/* ------------------------------------------------------- */
850/** Save and restore of PCI BARs 723/** Save and restore of PCI BARs
851 * 724 *
852 * Although firmware will set up BARs during boot, it doesn't 725 * Although firmware will set up BARs during boot, it doesn't
@@ -856,181 +729,122 @@ int rtas_set_slot_reset(struct pci_dn *pdn)
856 */ 729 */
857 730
858/** 731/**
859 * __restore_bars - Restore the Base Address Registers 732 * eeh_restore_one_device_bars - Restore the Base Address Registers for one device
860 * @pdn: pci device node 733 * @edev: PCI device associated EEH device
861 * 734 *
862 * Loads the PCI configuration space base address registers, 735 * Loads the PCI configuration space base address registers,
863 * the expansion ROM base address, the latency timer, and etc. 736 * the expansion ROM base address, the latency timer, and etc.
864 * from the saved values in the device node. 737 * from the saved values in the device node.
865 */ 738 */
866static inline void __restore_bars (struct pci_dn *pdn) 739static inline void eeh_restore_one_device_bars(struct eeh_dev *edev)
867{ 740{
868 int i; 741 int i;
869 u32 cmd; 742 u32 cmd;
743 struct device_node *dn = eeh_dev_to_of_node(edev);
744
745 if (!edev->phb)
746 return;
870 747
871 if (NULL==pdn->phb) return;
872 for (i=4; i<10; i++) { 748 for (i=4; i<10; i++) {
873 rtas_write_config(pdn, i*4, 4, pdn->config_space[i]); 749 eeh_ops->write_config(dn, i*4, 4, edev->config_space[i]);
874 } 750 }
875 751
876 /* 12 == Expansion ROM Address */ 752 /* 12 == Expansion ROM Address */
877 rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]); 753 eeh_ops->write_config(dn, 12*4, 4, edev->config_space[12]);
878 754
879#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF)) 755#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
880#define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)]) 756#define SAVED_BYTE(OFF) (((u8 *)(edev->config_space))[BYTE_SWAP(OFF)])
881 757
882 rtas_write_config (pdn, PCI_CACHE_LINE_SIZE, 1, 758 eeh_ops->write_config(dn, PCI_CACHE_LINE_SIZE, 1,
883 SAVED_BYTE(PCI_CACHE_LINE_SIZE)); 759 SAVED_BYTE(PCI_CACHE_LINE_SIZE));
884 760
885 rtas_write_config (pdn, PCI_LATENCY_TIMER, 1, 761 eeh_ops->write_config(dn, PCI_LATENCY_TIMER, 1,
886 SAVED_BYTE(PCI_LATENCY_TIMER)); 762 SAVED_BYTE(PCI_LATENCY_TIMER));
887 763
888 /* max latency, min grant, interrupt pin and line */ 764 /* max latency, min grant, interrupt pin and line */
889 rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]); 765 eeh_ops->write_config(dn, 15*4, 4, edev->config_space[15]);
890 766
891 /* Restore PERR & SERR bits, some devices require it, 767 /* Restore PERR & SERR bits, some devices require it,
892 don't touch the other command bits */ 768 * don't touch the other command bits
893 rtas_read_config(pdn, PCI_COMMAND, 4, &cmd); 769 */
894 if (pdn->config_space[1] & PCI_COMMAND_PARITY) 770 eeh_ops->read_config(dn, PCI_COMMAND, 4, &cmd);
771 if (edev->config_space[1] & PCI_COMMAND_PARITY)
895 cmd |= PCI_COMMAND_PARITY; 772 cmd |= PCI_COMMAND_PARITY;
896 else 773 else
897 cmd &= ~PCI_COMMAND_PARITY; 774 cmd &= ~PCI_COMMAND_PARITY;
898 if (pdn->config_space[1] & PCI_COMMAND_SERR) 775 if (edev->config_space[1] & PCI_COMMAND_SERR)
899 cmd |= PCI_COMMAND_SERR; 776 cmd |= PCI_COMMAND_SERR;
900 else 777 else
901 cmd &= ~PCI_COMMAND_SERR; 778 cmd &= ~PCI_COMMAND_SERR;
902 rtas_write_config(pdn, PCI_COMMAND, 4, cmd); 779 eeh_ops->write_config(dn, PCI_COMMAND, 4, cmd);
903} 780}
904 781
905/** 782/**
906 * eeh_restore_bars - restore the PCI config space info 783 * eeh_restore_bars - Restore the PCI config space info
784 * @edev: EEH device
907 * 785 *
908 * This routine performs a recursive walk to the children 786 * This routine performs a recursive walk to the children
909 * of this device as well. 787 * of this device as well.
910 */ 788 */
911void eeh_restore_bars(struct pci_dn *pdn) 789void eeh_restore_bars(struct eeh_dev *edev)
912{ 790{
913 struct device_node *dn; 791 struct device_node *dn;
914 if (!pdn) 792 if (!edev)
915 return; 793 return;
916 794
917 if ((pdn->eeh_mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(pdn->class_code)) 795 if ((edev->mode & EEH_MODE_SUPPORTED) && !IS_BRIDGE(edev->class_code))
918 __restore_bars (pdn); 796 eeh_restore_one_device_bars(edev);
919 797
920 for_each_child_of_node(pdn->node, dn) 798 for_each_child_of_node(eeh_dev_to_of_node(edev), dn)
921 eeh_restore_bars (PCI_DN(dn)); 799 eeh_restore_bars(of_node_to_eeh_dev(dn));
922} 800}
923 801
924/** 802/**
925 * eeh_save_bars - save device bars 803 * eeh_save_bars - Save device bars
804 * @edev: PCI device associated EEH device
926 * 805 *
927 * Save the values of the device bars. Unlike the restore 806 * Save the values of the device bars. Unlike the restore
928 * routine, this routine is *not* recursive. This is because 807 * routine, this routine is *not* recursive. This is because
929 * PCI devices are added individually; but, for the restore, 808 * PCI devices are added individually; but, for the restore,
930 * an entire slot is reset at a time. 809 * an entire slot is reset at a time.
931 */ 810 */
932static void eeh_save_bars(struct pci_dn *pdn) 811static void eeh_save_bars(struct eeh_dev *edev)
933{ 812{
934 int i; 813 int i;
814 struct device_node *dn;
935 815
936 if (!pdn ) 816 if (!edev)
937 return; 817 return;
818 dn = eeh_dev_to_of_node(edev);
938 819
939 for (i = 0; i < 16; i++) 820 for (i = 0; i < 16; i++)
940 rtas_read_config(pdn, i * 4, 4, &pdn->config_space[i]); 821 eeh_ops->read_config(dn, i * 4, 4, &edev->config_space[i]);
941}
942
943void
944rtas_configure_bridge(struct pci_dn *pdn)
945{
946 int config_addr;
947 int rc;
948 int token;
949
950 /* Use PE configuration address, if present */
951 config_addr = pdn->eeh_config_addr;
952 if (pdn->eeh_pe_config_addr)
953 config_addr = pdn->eeh_pe_config_addr;
954
955 /* Use new configure-pe function, if supported */
956 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE)
957 token = ibm_configure_pe;
958 else
959 token = ibm_configure_bridge;
960
961 rc = rtas_call(token, 3, 1, NULL,
962 config_addr,
963 BUID_HI(pdn->phb->buid),
964 BUID_LO(pdn->phb->buid));
965 if (rc) {
966 printk (KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n",
967 rc, pdn->node->full_name);
968 }
969} 822}
970 823
971/* ------------------------------------------------------------- */ 824/**
972/* The code below deals with enabling EEH for devices during the 825 * eeh_early_enable - Early enable EEH on the indicated device
973 * early boot sequence. EEH must be enabled before any PCI probing 826 * @dn: device node
974 * can be done. 827 * @data: BUID
828 *
829 * Enable EEH functionality on the specified PCI device. The function
830 * is expected to be called before real PCI probing is done. However,
831 * the PHBs have been initialized at this point.
975 */ 832 */
976 833static void *eeh_early_enable(struct device_node *dn, void *data)
977#define EEH_ENABLE 1
978
979struct eeh_early_enable_info {
980 unsigned int buid_hi;
981 unsigned int buid_lo;
982};
983
984static int get_pe_addr (int config_addr,
985 struct eeh_early_enable_info *info)
986{
987 unsigned int rets[3];
988 int ret;
989
990 /* Use latest config-addr token on power6 */
991 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
992 /* Make sure we have a PE in hand */
993 ret = rtas_call (ibm_get_config_addr_info2, 4, 2, rets,
994 config_addr, info->buid_hi, info->buid_lo, 1);
995 if (ret || (rets[0]==0))
996 return 0;
997
998 ret = rtas_call (ibm_get_config_addr_info2, 4, 2, rets,
999 config_addr, info->buid_hi, info->buid_lo, 0);
1000 if (ret)
1001 return 0;
1002 return rets[0];
1003 }
1004
1005 /* Use older config-addr token on power5 */
1006 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
1007 ret = rtas_call (ibm_get_config_addr_info, 4, 2, rets,
1008 config_addr, info->buid_hi, info->buid_lo, 0);
1009 if (ret)
1010 return 0;
1011 return rets[0];
1012 }
1013 return 0;
1014}
1015
1016/* Enable eeh for the given device node. */
1017static void *early_enable_eeh(struct device_node *dn, void *data)
1018{ 834{
1019 unsigned int rets[3];
1020 struct eeh_early_enable_info *info = data;
1021 int ret; 835 int ret;
1022 const u32 *class_code = of_get_property(dn, "class-code", NULL); 836 const u32 *class_code = of_get_property(dn, "class-code", NULL);
1023 const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL); 837 const u32 *vendor_id = of_get_property(dn, "vendor-id", NULL);
1024 const u32 *device_id = of_get_property(dn, "device-id", NULL); 838 const u32 *device_id = of_get_property(dn, "device-id", NULL);
1025 const u32 *regs; 839 const u32 *regs;
1026 int enable; 840 int enable;
1027 struct pci_dn *pdn = PCI_DN(dn); 841 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
1028 842
1029 pdn->class_code = 0; 843 edev->class_code = 0;
1030 pdn->eeh_mode = 0; 844 edev->mode = 0;
1031 pdn->eeh_check_count = 0; 845 edev->check_count = 0;
1032 pdn->eeh_freeze_count = 0; 846 edev->freeze_count = 0;
1033 pdn->eeh_false_positives = 0; 847 edev->false_positives = 0;
1034 848
1035 if (!of_device_is_available(dn)) 849 if (!of_device_is_available(dn))
1036 return NULL; 850 return NULL;
@@ -1041,54 +855,56 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
1041 855
1042 /* There is nothing to check on PCI to ISA bridges */ 856 /* There is nothing to check on PCI to ISA bridges */
1043 if (dn->type && !strcmp(dn->type, "isa")) { 857 if (dn->type && !strcmp(dn->type, "isa")) {
1044 pdn->eeh_mode |= EEH_MODE_NOCHECK; 858 edev->mode |= EEH_MODE_NOCHECK;
1045 return NULL; 859 return NULL;
1046 } 860 }
1047 pdn->class_code = *class_code; 861 edev->class_code = *class_code;
1048 862
1049 /* Ok... see if this device supports EEH. Some do, some don't, 863 /* Ok... see if this device supports EEH. Some do, some don't,
1050 * and the only way to find out is to check each and every one. */ 864 * and the only way to find out is to check each and every one.
865 */
1051 regs = of_get_property(dn, "reg", NULL); 866 regs = of_get_property(dn, "reg", NULL);
1052 if (regs) { 867 if (regs) {
1053 /* First register entry is addr (00BBSS00) */ 868 /* First register entry is addr (00BBSS00) */
1054 /* Try to enable eeh */ 869 /* Try to enable eeh */
1055 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL, 870 ret = eeh_ops->set_option(dn, EEH_OPT_ENABLE);
1056 regs[0], info->buid_hi, info->buid_lo,
1057 EEH_ENABLE);
1058 871
1059 enable = 0; 872 enable = 0;
1060 if (ret == 0) { 873 if (ret == 0) {
1061 pdn->eeh_config_addr = regs[0]; 874 edev->config_addr = regs[0];
1062 875
1063 /* If the newer, better, ibm,get-config-addr-info is supported, 876 /* If the newer, better, ibm,get-config-addr-info is supported,
1064 * then use that instead. */ 877 * then use that instead.
1065 pdn->eeh_pe_config_addr = get_pe_addr(pdn->eeh_config_addr, info); 878 */
879 edev->pe_config_addr = eeh_ops->get_pe_addr(dn);
1066 880
1067 /* Some older systems (Power4) allow the 881 /* Some older systems (Power4) allow the
1068 * ibm,set-eeh-option call to succeed even on nodes 882 * ibm,set-eeh-option call to succeed even on nodes
1069 * where EEH is not supported. Verify support 883 * where EEH is not supported. Verify support
1070 * explicitly. */ 884 * explicitly.
1071 ret = read_slot_reset_state(pdn, rets); 885 */
1072 if ((ret == 0) && (rets[1] == 1)) 886 ret = eeh_ops->get_state(dn, NULL);
887 if (ret > 0 && ret != EEH_STATE_NOT_SUPPORT)
1073 enable = 1; 888 enable = 1;
1074 } 889 }
1075 890
1076 if (enable) { 891 if (enable) {
1077 eeh_subsystem_enabled = 1; 892 eeh_subsystem_enabled = 1;
1078 pdn->eeh_mode |= EEH_MODE_SUPPORTED; 893 edev->mode |= EEH_MODE_SUPPORTED;
1079 894
1080 pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n", 895 pr_debug("EEH: %s: eeh enabled, config=%x pe_config=%x\n",
1081 dn->full_name, pdn->eeh_config_addr, 896 dn->full_name, edev->config_addr,
1082 pdn->eeh_pe_config_addr); 897 edev->pe_config_addr);
1083 } else { 898 } else {
1084 899
1085 /* This device doesn't support EEH, but it may have an 900 /* This device doesn't support EEH, but it may have an
1086 * EEH parent, in which case we mark it as supported. */ 901 * EEH parent, in which case we mark it as supported.
1087 if (dn->parent && PCI_DN(dn->parent) 902 */
1088 && (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) { 903 if (dn->parent && of_node_to_eeh_dev(dn->parent) &&
904 (of_node_to_eeh_dev(dn->parent)->mode & EEH_MODE_SUPPORTED)) {
1089 /* Parent supports EEH. */ 905 /* Parent supports EEH. */
1090 pdn->eeh_mode |= EEH_MODE_SUPPORTED; 906 edev->mode |= EEH_MODE_SUPPORTED;
1091 pdn->eeh_config_addr = PCI_DN(dn->parent)->eeh_config_addr; 907 edev->config_addr = of_node_to_eeh_dev(dn->parent)->config_addr;
1092 return NULL; 908 return NULL;
1093 } 909 }
1094 } 910 }
@@ -1097,11 +913,63 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
1097 dn->full_name); 913 dn->full_name);
1098 } 914 }
1099 915
1100 eeh_save_bars(pdn); 916 eeh_save_bars(edev);
1101 return NULL; 917 return NULL;
1102} 918}
1103 919
1104/* 920/**
921 * eeh_ops_register - Register platform dependent EEH operations
922 * @ops: platform dependent EEH operations
923 *
924 * Register the platform dependent EEH operation callback
925 * functions. The platform should call this function before
926 * any other EEH operations.
927 */
928int __init eeh_ops_register(struct eeh_ops *ops)
929{
930 if (!ops->name) {
931 pr_warning("%s: Invalid EEH ops name for %p\n",
932 __func__, ops);
933 return -EINVAL;
934 }
935
936 if (eeh_ops && eeh_ops != ops) {
937 pr_warning("%s: EEH ops of platform %s already existing (%s)\n",
938 __func__, eeh_ops->name, ops->name);
939 return -EEXIST;
940 }
941
942 eeh_ops = ops;
943
944 return 0;
945}
946
947/**
948 * eeh_ops_unregister - Unreigster platform dependent EEH operations
949 * @name: name of EEH platform operations
950 *
951 * Unregister the platform dependent EEH operation callback
952 * functions.
953 */
954int __exit eeh_ops_unregister(const char *name)
955{
956 if (!name || !strlen(name)) {
957 pr_warning("%s: Invalid EEH ops name\n",
958 __func__);
959 return -EINVAL;
960 }
961
962 if (eeh_ops && !strcmp(eeh_ops->name, name)) {
963 eeh_ops = NULL;
964 return 0;
965 }
966
967 return -EEXIST;
968}
969
970/**
971 * eeh_init - EEH initialization
972 *
1105 * Initialize EEH by trying to enable it for all of the adapters in the system. 973 * Initialize EEH by trying to enable it for all of the adapters in the system.
1106 * As a side effect we can determine here if eeh is supported at all. 974 * As a side effect we can determine here if eeh is supported at all.
1107 * Note that we leave EEH on so failed config cycles won't cause a machine 975 * Note that we leave EEH on so failed config cycles won't cause a machine
@@ -1116,51 +984,27 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
1116 */ 984 */
1117void __init eeh_init(void) 985void __init eeh_init(void)
1118{ 986{
1119 struct device_node *phb, *np; 987 struct pci_controller *hose, *tmp;
1120 struct eeh_early_enable_info info; 988 struct device_node *phb;
1121 989 int ret;
1122 raw_spin_lock_init(&confirm_error_lock);
1123 spin_lock_init(&slot_errbuf_lock);
1124 990
1125 np = of_find_node_by_path("/rtas"); 991 /* call platform initialization function */
1126 if (np == NULL) 992 if (!eeh_ops) {
993 pr_warning("%s: Platform EEH operation not found\n",
994 __func__);
1127 return; 995 return;
1128 996 } else if ((ret = eeh_ops->init())) {
1129 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option"); 997 pr_warning("%s: Failed to call platform init function (%d)\n",
1130 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset"); 998 __func__, ret);
1131 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
1132 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
1133 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
1134 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
1135 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
1136 ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
1137 ibm_configure_pe = rtas_token("ibm,configure-pe");
1138
1139 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE)
1140 return; 999 return;
1141
1142 eeh_error_buf_size = rtas_token("rtas-error-log-max");
1143 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
1144 eeh_error_buf_size = 1024;
1145 }
1146 if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
1147 printk(KERN_WARNING "EEH: rtas-error-log-max is bigger than allocated "
1148 "buffer ! (%d vs %d)", eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
1149 eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
1150 } 1000 }
1151 1001
1152 /* Enable EEH for all adapters. Note that eeh requires buid's */ 1002 raw_spin_lock_init(&confirm_error_lock);
1153 for (phb = of_find_node_by_name(NULL, "pci"); phb;
1154 phb = of_find_node_by_name(phb, "pci")) {
1155 unsigned long buid;
1156
1157 buid = get_phb_buid(phb);
1158 if (buid == 0 || PCI_DN(phb) == NULL)
1159 continue;
1160 1003
1161 info.buid_lo = BUID_LO(buid); 1004 /* Enable EEH for all adapters */
1162 info.buid_hi = BUID_HI(buid); 1005 list_for_each_entry_safe(hose, tmp, &hose_list, list_node) {
1163 traverse_pci_devices(phb, early_enable_eeh, &info); 1006 phb = hose->dn;
1007 traverse_pci_devices(phb, eeh_early_enable, NULL);
1164 } 1008 }
1165 1009
1166 if (eeh_subsystem_enabled) 1010 if (eeh_subsystem_enabled)
@@ -1170,7 +1014,7 @@ void __init eeh_init(void)
1170} 1014}
1171 1015
1172/** 1016/**
1173 * eeh_add_device_early - enable EEH for the indicated device_node 1017 * eeh_add_device_early - Enable EEH for the indicated device_node
1174 * @dn: device node for which to set up EEH 1018 * @dn: device node for which to set up EEH
1175 * 1019 *
1176 * This routine must be used to perform EEH initialization for PCI 1020 * This routine must be used to perform EEH initialization for PCI
@@ -1184,21 +1028,26 @@ void __init eeh_init(void)
1184static void eeh_add_device_early(struct device_node *dn) 1028static void eeh_add_device_early(struct device_node *dn)
1185{ 1029{
1186 struct pci_controller *phb; 1030 struct pci_controller *phb;
1187 struct eeh_early_enable_info info;
1188 1031
1189 if (!dn || !PCI_DN(dn)) 1032 if (!dn || !of_node_to_eeh_dev(dn))
1190 return; 1033 return;
1191 phb = PCI_DN(dn)->phb; 1034 phb = of_node_to_eeh_dev(dn)->phb;
1192 1035
1193 /* USB Bus children of PCI devices will not have BUID's */ 1036 /* USB Bus children of PCI devices will not have BUID's */
1194 if (NULL == phb || 0 == phb->buid) 1037 if (NULL == phb || 0 == phb->buid)
1195 return; 1038 return;
1196 1039
1197 info.buid_hi = BUID_HI(phb->buid); 1040 eeh_early_enable(dn, NULL);
1198 info.buid_lo = BUID_LO(phb->buid);
1199 early_enable_eeh(dn, &info);
1200} 1041}
1201 1042
1043/**
1044 * eeh_add_device_tree_early - Enable EEH for the indicated device
1045 * @dn: device node
1046 *
1047 * This routine must be used to perform EEH initialization for the
1048 * indicated PCI device that was added after system boot (e.g.
1049 * hotplug, dlpar).
1050 */
1202void eeh_add_device_tree_early(struct device_node *dn) 1051void eeh_add_device_tree_early(struct device_node *dn)
1203{ 1052{
1204 struct device_node *sib; 1053 struct device_node *sib;
@@ -1210,7 +1059,7 @@ void eeh_add_device_tree_early(struct device_node *dn)
1210EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); 1059EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
1211 1060
1212/** 1061/**
1213 * eeh_add_device_late - perform EEH initialization for the indicated pci device 1062 * eeh_add_device_late - Perform EEH initialization for the indicated pci device
1214 * @dev: pci device for which to set up EEH 1063 * @dev: pci device for which to set up EEH
1215 * 1064 *
1216 * This routine must be used to complete EEH initialization for PCI 1065 * This routine must be used to complete EEH initialization for PCI
@@ -1219,7 +1068,7 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_early);
1219static void eeh_add_device_late(struct pci_dev *dev) 1068static void eeh_add_device_late(struct pci_dev *dev)
1220{ 1069{
1221 struct device_node *dn; 1070 struct device_node *dn;
1222 struct pci_dn *pdn; 1071 struct eeh_dev *edev;
1223 1072
1224 if (!dev || !eeh_subsystem_enabled) 1073 if (!dev || !eeh_subsystem_enabled)
1225 return; 1074 return;
@@ -1227,20 +1076,29 @@ static void eeh_add_device_late(struct pci_dev *dev)
1227 pr_debug("EEH: Adding device %s\n", pci_name(dev)); 1076 pr_debug("EEH: Adding device %s\n", pci_name(dev));
1228 1077
1229 dn = pci_device_to_OF_node(dev); 1078 dn = pci_device_to_OF_node(dev);
1230 pdn = PCI_DN(dn); 1079 edev = pci_dev_to_eeh_dev(dev);
1231 if (pdn->pcidev == dev) { 1080 if (edev->pdev == dev) {
1232 pr_debug("EEH: Already referenced !\n"); 1081 pr_debug("EEH: Already referenced !\n");
1233 return; 1082 return;
1234 } 1083 }
1235 WARN_ON(pdn->pcidev); 1084 WARN_ON(edev->pdev);
1236 1085
1237 pci_dev_get (dev); 1086 pci_dev_get(dev);
1238 pdn->pcidev = dev; 1087 edev->pdev = dev;
1088 dev->dev.archdata.edev = edev;
1239 1089
1240 pci_addr_cache_insert_device(dev); 1090 pci_addr_cache_insert_device(dev);
1241 eeh_sysfs_add_device(dev); 1091 eeh_sysfs_add_device(dev);
1242} 1092}
1243 1093
1094/**
1095 * eeh_add_device_tree_late - Perform EEH initialization for the indicated PCI bus
1096 * @bus: PCI bus
1097 *
1098 * This routine must be used to perform EEH initialization for PCI
1099 * devices which are attached to the indicated PCI bus. The PCI bus
1100 * is added after system boot through hotplug or dlpar.
1101 */
1244void eeh_add_device_tree_late(struct pci_bus *bus) 1102void eeh_add_device_tree_late(struct pci_bus *bus)
1245{ 1103{
1246 struct pci_dev *dev; 1104 struct pci_dev *dev;
@@ -1257,7 +1115,7 @@ void eeh_add_device_tree_late(struct pci_bus *bus)
1257EXPORT_SYMBOL_GPL(eeh_add_device_tree_late); 1115EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
1258 1116
1259/** 1117/**
1260 * eeh_remove_device - undo EEH setup for the indicated pci device 1118 * eeh_remove_device - Undo EEH setup for the indicated pci device
1261 * @dev: pci device to be removed 1119 * @dev: pci device to be removed
1262 * 1120 *
1263 * This routine should be called when a device is removed from 1121 * This routine should be called when a device is removed from
@@ -1268,25 +1126,35 @@ EXPORT_SYMBOL_GPL(eeh_add_device_tree_late);
1268 */ 1126 */
1269static void eeh_remove_device(struct pci_dev *dev) 1127static void eeh_remove_device(struct pci_dev *dev)
1270{ 1128{
1271 struct device_node *dn; 1129 struct eeh_dev *edev;
1130
1272 if (!dev || !eeh_subsystem_enabled) 1131 if (!dev || !eeh_subsystem_enabled)
1273 return; 1132 return;
1133 edev = pci_dev_to_eeh_dev(dev);
1274 1134
1275 /* Unregister the device with the EEH/PCI address search system */ 1135 /* Unregister the device with the EEH/PCI address search system */
1276 pr_debug("EEH: Removing device %s\n", pci_name(dev)); 1136 pr_debug("EEH: Removing device %s\n", pci_name(dev));
1277 1137
1278 dn = pci_device_to_OF_node(dev); 1138 if (!edev || !edev->pdev) {
1279 if (PCI_DN(dn)->pcidev == NULL) {
1280 pr_debug("EEH: Not referenced !\n"); 1139 pr_debug("EEH: Not referenced !\n");
1281 return; 1140 return;
1282 } 1141 }
1283 PCI_DN(dn)->pcidev = NULL; 1142 edev->pdev = NULL;
1284 pci_dev_put (dev); 1143 dev->dev.archdata.edev = NULL;
1144 pci_dev_put(dev);
1285 1145
1286 pci_addr_cache_remove_device(dev); 1146 pci_addr_cache_remove_device(dev);
1287 eeh_sysfs_remove_device(dev); 1147 eeh_sysfs_remove_device(dev);
1288} 1148}
1289 1149
1150/**
1151 * eeh_remove_bus_device - Undo EEH setup for the indicated PCI device
1152 * @dev: PCI device
1153 *
1154 * This routine must be called when a device is removed from the
1155 * running system through hotplug or dlpar. The corresponding
1156 * PCI address cache will be removed.
1157 */
1290void eeh_remove_bus_device(struct pci_dev *dev) 1158void eeh_remove_bus_device(struct pci_dev *dev)
1291{ 1159{
1292 struct pci_bus *bus = dev->subordinate; 1160 struct pci_bus *bus = dev->subordinate;
@@ -1305,21 +1173,24 @@ static int proc_eeh_show(struct seq_file *m, void *v)
1305{ 1173{
1306 if (0 == eeh_subsystem_enabled) { 1174 if (0 == eeh_subsystem_enabled) {
1307 seq_printf(m, "EEH Subsystem is globally disabled\n"); 1175 seq_printf(m, "EEH Subsystem is globally disabled\n");
1308 seq_printf(m, "eeh_total_mmio_ffs=%ld\n", total_mmio_ffs); 1176 seq_printf(m, "eeh_total_mmio_ffs=%llu\n", eeh_stats.total_mmio_ffs);
1309 } else { 1177 } else {
1310 seq_printf(m, "EEH Subsystem is enabled\n"); 1178 seq_printf(m, "EEH Subsystem is enabled\n");
1311 seq_printf(m, 1179 seq_printf(m,
1312 "no device=%ld\n" 1180 "no device=%llu\n"
1313 "no device node=%ld\n" 1181 "no device node=%llu\n"
1314 "no config address=%ld\n" 1182 "no config address=%llu\n"
1315 "check not wanted=%ld\n" 1183 "check not wanted=%llu\n"
1316 "eeh_total_mmio_ffs=%ld\n" 1184 "eeh_total_mmio_ffs=%llu\n"
1317 "eeh_false_positives=%ld\n" 1185 "eeh_false_positives=%llu\n"
1318 "eeh_slot_resets=%ld\n", 1186 "eeh_slot_resets=%llu\n",
1319 no_device, no_dn, no_cfg_addr, 1187 eeh_stats.no_device,
1320 ignored_check, total_mmio_ffs, 1188 eeh_stats.no_dn,
1321 false_positives, 1189 eeh_stats.no_cfg_addr,
1322 slot_resets); 1190 eeh_stats.ignored_check,
1191 eeh_stats.total_mmio_ffs,
1192 eeh_stats.false_positives,
1193 eeh_stats.slot_resets);
1323 } 1194 }
1324 1195
1325 return 0; 1196 return 0;
diff --git a/arch/powerpc/platforms/pseries/eeh_cache.c b/arch/powerpc/platforms/pseries/eeh_cache.c
index fc5ae767989e..e5ae1c687c66 100644
--- a/arch/powerpc/platforms/pseries/eeh_cache.c
+++ b/arch/powerpc/platforms/pseries/eeh_cache.c
@@ -1,5 +1,4 @@
1/* 1/*
2 * eeh_cache.c
3 * PCI address cache; allows the lookup of PCI devices based on I/O address 2 * PCI address cache; allows the lookup of PCI devices based on I/O address
4 * 3 *
5 * Copyright IBM Corporation 2004 4 * Copyright IBM Corporation 2004
@@ -47,8 +46,7 @@
47 * than any hash algo I could think of for this problem, even 46 * than any hash algo I could think of for this problem, even
48 * with the penalty of slow pointer chases for d-cache misses). 47 * with the penalty of slow pointer chases for d-cache misses).
49 */ 48 */
50struct pci_io_addr_range 49struct pci_io_addr_range {
51{
52 struct rb_node rb_node; 50 struct rb_node rb_node;
53 unsigned long addr_lo; 51 unsigned long addr_lo;
54 unsigned long addr_hi; 52 unsigned long addr_hi;
@@ -56,13 +54,12 @@ struct pci_io_addr_range
56 unsigned int flags; 54 unsigned int flags;
57}; 55};
58 56
59static struct pci_io_addr_cache 57static struct pci_io_addr_cache {
60{
61 struct rb_root rb_root; 58 struct rb_root rb_root;
62 spinlock_t piar_lock; 59 spinlock_t piar_lock;
63} pci_io_addr_cache_root; 60} pci_io_addr_cache_root;
64 61
65static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr) 62static inline struct pci_dev *__pci_addr_cache_get_device(unsigned long addr)
66{ 63{
67 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node; 64 struct rb_node *n = pci_io_addr_cache_root.rb_root.rb_node;
68 65
@@ -86,7 +83,7 @@ static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
86} 83}
87 84
88/** 85/**
89 * pci_get_device_by_addr - Get device, given only address 86 * pci_addr_cache_get_device - Get device, given only address
90 * @addr: mmio (PIO) phys address or i/o port number 87 * @addr: mmio (PIO) phys address or i/o port number
91 * 88 *
92 * Given an mmio phys address, or a port number, find a pci device 89 * Given an mmio phys address, or a port number, find a pci device
@@ -95,13 +92,13 @@ static inline struct pci_dev *__pci_get_device_by_addr(unsigned long addr)
95 * from zero (that is, they do *not* have pci_io_addr added in). 92 * from zero (that is, they do *not* have pci_io_addr added in).
96 * It is safe to call this function within an interrupt. 93 * It is safe to call this function within an interrupt.
97 */ 94 */
98struct pci_dev *pci_get_device_by_addr(unsigned long addr) 95struct pci_dev *pci_addr_cache_get_device(unsigned long addr)
99{ 96{
100 struct pci_dev *dev; 97 struct pci_dev *dev;
101 unsigned long flags; 98 unsigned long flags;
102 99
103 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags); 100 spin_lock_irqsave(&pci_io_addr_cache_root.piar_lock, flags);
104 dev = __pci_get_device_by_addr(addr); 101 dev = __pci_addr_cache_get_device(addr);
105 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags); 102 spin_unlock_irqrestore(&pci_io_addr_cache_root.piar_lock, flags);
106 return dev; 103 return dev;
107} 104}
@@ -166,7 +163,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
166 163
167#ifdef DEBUG 164#ifdef DEBUG
168 printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n", 165 printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
169 alo, ahi, pci_name (dev)); 166 alo, ahi, pci_name(dev));
170#endif 167#endif
171 168
172 rb_link_node(&piar->rb_node, parent, p); 169 rb_link_node(&piar->rb_node, parent, p);
@@ -178,7 +175,7 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
178static void __pci_addr_cache_insert_device(struct pci_dev *dev) 175static void __pci_addr_cache_insert_device(struct pci_dev *dev)
179{ 176{
180 struct device_node *dn; 177 struct device_node *dn;
181 struct pci_dn *pdn; 178 struct eeh_dev *edev;
182 int i; 179 int i;
183 180
184 dn = pci_device_to_OF_node(dev); 181 dn = pci_device_to_OF_node(dev);
@@ -187,13 +184,19 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
187 return; 184 return;
188 } 185 }
189 186
187 edev = of_node_to_eeh_dev(dn);
188 if (!edev) {
189 pr_warning("PCI: no EEH dev found for dn=%s\n",
190 dn->full_name);
191 return;
192 }
193
190 /* Skip any devices for which EEH is not enabled. */ 194 /* Skip any devices for which EEH is not enabled. */
191 pdn = PCI_DN(dn); 195 if (!(edev->mode & EEH_MODE_SUPPORTED) ||
192 if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) || 196 edev->mode & EEH_MODE_NOCHECK) {
193 pdn->eeh_mode & EEH_MODE_NOCHECK) {
194#ifdef DEBUG 197#ifdef DEBUG
195 printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n", 198 pr_info("PCI: skip building address cache for=%s - %s\n",
196 pci_name(dev), pdn->node->full_name); 199 pci_name(dev), dn->full_name);
197#endif 200#endif
198 return; 201 return;
199 } 202 }
@@ -284,6 +287,7 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
284void __init pci_addr_cache_build(void) 287void __init pci_addr_cache_build(void)
285{ 288{
286 struct device_node *dn; 289 struct device_node *dn;
290 struct eeh_dev *edev;
287 struct pci_dev *dev = NULL; 291 struct pci_dev *dev = NULL;
288 292
289 spin_lock_init(&pci_io_addr_cache_root.piar_lock); 293 spin_lock_init(&pci_io_addr_cache_root.piar_lock);
@@ -294,8 +298,14 @@ void __init pci_addr_cache_build(void)
294 dn = pci_device_to_OF_node(dev); 298 dn = pci_device_to_OF_node(dev);
295 if (!dn) 299 if (!dn)
296 continue; 300 continue;
301
302 edev = of_node_to_eeh_dev(dn);
303 if (!edev)
304 continue;
305
297 pci_dev_get(dev); /* matching put is in eeh_remove_device() */ 306 pci_dev_get(dev); /* matching put is in eeh_remove_device() */
298 PCI_DN(dn)->pcidev = dev; 307 dev->dev.archdata.edev = edev;
308 edev->pdev = dev;
299 309
300 eeh_sysfs_add_device(dev); 310 eeh_sysfs_add_device(dev);
301 } 311 }
diff --git a/arch/powerpc/platforms/pseries/eeh_dev.c b/arch/powerpc/platforms/pseries/eeh_dev.c
new file mode 100644
index 000000000000..c4507d095900
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_dev.c
@@ -0,0 +1,102 @@
1/*
2 * The file intends to implement dynamic creation of EEH device, which will
3 * be bound with OF node and PCI device simutaneously. The EEH devices would
4 * be foundamental information for EEH core components to work proerly. Besides,
5 * We have to support multiple situations where dynamic creation of EEH device
6 * is required:
7 *
8 * 1) Before PCI emunation starts, we need create EEH devices according to the
9 * PCI sensitive OF nodes.
10 * 2) When PCI emunation is done, we need do the binding between PCI device and
11 * the associated EEH device.
12 * 3) DR (Dynamic Reconfiguration) would create PCI sensitive OF node. EEH device
13 * will be created while PCI sensitive OF node is detected from DR.
14 * 4) PCI hotplug needs redoing the binding between PCI device and EEH device. If
15 * PHB is newly inserted, we also need create EEH devices accordingly.
16 *
17 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2012.
18 *
19 * This program is free software; you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation; either version 2 of the License, or
22 * (at your option) any later version.
23 *
24 * This program is distributed in the hope that it will be useful,
25 * but WITHOUT ANY WARRANTY; without even the implied warranty of
26 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
27 * GNU General Public License for more details.
28 *
29 * You should have received a copy of the GNU General Public License
30 * along with this program; if not, write to the Free Software
31 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
32 */
33
34#include <linux/export.h>
35#include <linux/gfp.h>
36#include <linux/init.h>
37#include <linux/kernel.h>
38#include <linux/pci.h>
39#include <linux/string.h>
40
41#include <asm/pci-bridge.h>
42#include <asm/ppc-pci.h>
43
44/**
45 * eeh_dev_init - Create EEH device according to OF node
46 * @dn: device node
47 * @data: PHB
48 *
49 * It will create EEH device according to the given OF node. The function
50 * might be called by PCI emunation, DR, PHB hotplug.
51 */
52void * __devinit eeh_dev_init(struct device_node *dn, void *data)
53{
54 struct pci_controller *phb = data;
55 struct eeh_dev *edev;
56
57 /* Allocate EEH device */
58 edev = zalloc_maybe_bootmem(sizeof(*edev), GFP_KERNEL);
59 if (!edev) {
60 pr_warning("%s: out of memory\n", __func__);
61 return NULL;
62 }
63
64 /* Associate EEH device with OF node */
65 PCI_DN(dn)->edev = edev;
66 edev->dn = dn;
67 edev->phb = phb;
68
69 return NULL;
70}
71
72/**
73 * eeh_dev_phb_init_dynamic - Create EEH devices for devices included in PHB
74 * @phb: PHB
75 *
76 * Scan the PHB OF node and its child association, then create the
77 * EEH devices accordingly
78 */
79void __devinit eeh_dev_phb_init_dynamic(struct pci_controller *phb)
80{
81 struct device_node *dn = phb->dn;
82
83 /* EEH device for PHB */
84 eeh_dev_init(dn, phb);
85
86 /* EEH devices for children OF nodes */
87 traverse_pci_devices(dn, eeh_dev_init, phb);
88}
89
90/**
91 * eeh_dev_phb_init - Create EEH devices for devices included in existing PHBs
92 *
93 * Scan all the existing PHBs and create EEH devices for their OF
94 * nodes and their children OF nodes
95 */
96void __init eeh_dev_phb_init(void)
97{
98 struct pci_controller *phb, *tmp;
99
100 list_for_each_entry_safe(phb, tmp, &hose_list, list_node)
101 eeh_dev_phb_init_dynamic(phb);
102}
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c
index 1b6cb10589e0..baf92cd9dfab 100644
--- a/arch/powerpc/platforms/pseries/eeh_driver.c
+++ b/arch/powerpc/platforms/pseries/eeh_driver.c
@@ -33,8 +33,14 @@
33#include <asm/prom.h> 33#include <asm/prom.h>
34#include <asm/rtas.h> 34#include <asm/rtas.h>
35 35
36 36/**
37static inline const char * pcid_name (struct pci_dev *pdev) 37 * eeh_pcid_name - Retrieve name of PCI device driver
38 * @pdev: PCI device
39 *
40 * This routine is used to retrieve the name of PCI device driver
41 * if that's valid.
42 */
43static inline const char *eeh_pcid_name(struct pci_dev *pdev)
38{ 44{
39 if (pdev && pdev->dev.driver) 45 if (pdev && pdev->dev.driver)
40 return pdev->dev.driver->name; 46 return pdev->dev.driver->name;
@@ -64,48 +70,59 @@ static void print_device_node_tree(struct pci_dn *pdn, int dent)
64#endif 70#endif
65 71
66/** 72/**
67 * eeh_disable_irq - disable interrupt for the recovering device 73 * eeh_disable_irq - Disable interrupt for the recovering device
74 * @dev: PCI device
75 *
76 * This routine must be called when reporting temporary or permanent
77 * error to the particular PCI device to disable interrupt of that
78 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
79 * do real work because EEH should freeze DMA transfers for those PCI
80 * devices encountering EEH errors, which includes MSI or MSI-X.
68 */ 81 */
69static void eeh_disable_irq(struct pci_dev *dev) 82static void eeh_disable_irq(struct pci_dev *dev)
70{ 83{
71 struct device_node *dn = pci_device_to_OF_node(dev); 84 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
72 85
73 /* Don't disable MSI and MSI-X interrupts. They are 86 /* Don't disable MSI and MSI-X interrupts. They are
74 * effectively disabled by the DMA Stopped state 87 * effectively disabled by the DMA Stopped state
75 * when an EEH error occurs. 88 * when an EEH error occurs.
76 */ 89 */
77 if (dev->msi_enabled || dev->msix_enabled) 90 if (dev->msi_enabled || dev->msix_enabled)
78 return; 91 return;
79 92
80 if (!irq_has_action(dev->irq)) 93 if (!irq_has_action(dev->irq))
81 return; 94 return;
82 95
83 PCI_DN(dn)->eeh_mode |= EEH_MODE_IRQ_DISABLED; 96 edev->mode |= EEH_MODE_IRQ_DISABLED;
84 disable_irq_nosync(dev->irq); 97 disable_irq_nosync(dev->irq);
85} 98}
86 99
87/** 100/**
88 * eeh_enable_irq - enable interrupt for the recovering device 101 * eeh_enable_irq - Enable interrupt for the recovering device
102 * @dev: PCI device
103 *
104 * This routine must be called to enable interrupt while failed
105 * device could be resumed.
89 */ 106 */
90static void eeh_enable_irq(struct pci_dev *dev) 107static void eeh_enable_irq(struct pci_dev *dev)
91{ 108{
92 struct device_node *dn = pci_device_to_OF_node(dev); 109 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
93 110
94 if ((PCI_DN(dn)->eeh_mode) & EEH_MODE_IRQ_DISABLED) { 111 if ((edev->mode) & EEH_MODE_IRQ_DISABLED) {
95 PCI_DN(dn)->eeh_mode &= ~EEH_MODE_IRQ_DISABLED; 112 edev->mode &= ~EEH_MODE_IRQ_DISABLED;
96 enable_irq(dev->irq); 113 enable_irq(dev->irq);
97 } 114 }
98} 115}
99 116
100/* ------------------------------------------------------- */
101/** 117/**
102 * eeh_report_error - report pci error to each device driver 118 * eeh_report_error - Report pci error to each device driver
119 * @dev: PCI device
120 * @userdata: return value
103 * 121 *
104 * Report an EEH error to each device driver, collect up and 122 * Report an EEH error to each device driver, collect up and
105 * merge the device driver responses. Cumulative response 123 * merge the device driver responses. Cumulative response
106 * passed back in "userdata". 124 * passed back in "userdata".
107 */ 125 */
108
109static int eeh_report_error(struct pci_dev *dev, void *userdata) 126static int eeh_report_error(struct pci_dev *dev, void *userdata)
110{ 127{
111 enum pci_ers_result rc, *res = userdata; 128 enum pci_ers_result rc, *res = userdata;
@@ -122,7 +139,7 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
122 !driver->err_handler->error_detected) 139 !driver->err_handler->error_detected)
123 return 0; 140 return 0;
124 141
125 rc = driver->err_handler->error_detected (dev, pci_channel_io_frozen); 142 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
126 143
127 /* A driver that needs a reset trumps all others */ 144 /* A driver that needs a reset trumps all others */
128 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 145 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
@@ -132,13 +149,14 @@ static int eeh_report_error(struct pci_dev *dev, void *userdata)
132} 149}
133 150
134/** 151/**
135 * eeh_report_mmio_enabled - tell drivers that MMIO has been enabled 152 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
153 * @dev: PCI device
154 * @userdata: return value
136 * 155 *
137 * Tells each device driver that IO ports, MMIO and config space I/O 156 * Tells each device driver that IO ports, MMIO and config space I/O
138 * are now enabled. Collects up and merges the device driver responses. 157 * are now enabled. Collects up and merges the device driver responses.
139 * Cumulative response passed back in "userdata". 158 * Cumulative response passed back in "userdata".
140 */ 159 */
141
142static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata) 160static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
143{ 161{
144 enum pci_ers_result rc, *res = userdata; 162 enum pci_ers_result rc, *res = userdata;
@@ -149,7 +167,7 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
149 !driver->err_handler->mmio_enabled) 167 !driver->err_handler->mmio_enabled)
150 return 0; 168 return 0;
151 169
152 rc = driver->err_handler->mmio_enabled (dev); 170 rc = driver->err_handler->mmio_enabled(dev);
153 171
154 /* A driver that needs a reset trumps all others */ 172 /* A driver that needs a reset trumps all others */
155 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc; 173 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
@@ -159,9 +177,15 @@ static int eeh_report_mmio_enabled(struct pci_dev *dev, void *userdata)
159} 177}
160 178
161/** 179/**
162 * eeh_report_reset - tell device that slot has been reset 180 * eeh_report_reset - Tell device that slot has been reset
181 * @dev: PCI device
182 * @userdata: return value
183 *
184 * This routine must be called while EEH tries to reset particular
185 * PCI device so that the associated PCI device driver could take
186 * some actions, usually to save data the driver needs so that the
187 * driver can work again while the device is recovered.
163 */ 188 */
164
165static int eeh_report_reset(struct pci_dev *dev, void *userdata) 189static int eeh_report_reset(struct pci_dev *dev, void *userdata)
166{ 190{
167 enum pci_ers_result rc, *res = userdata; 191 enum pci_ers_result rc, *res = userdata;
@@ -188,9 +212,14 @@ static int eeh_report_reset(struct pci_dev *dev, void *userdata)
188} 212}
189 213
190/** 214/**
191 * eeh_report_resume - tell device to resume normal operations 215 * eeh_report_resume - Tell device to resume normal operations
216 * @dev: PCI device
217 * @userdata: return value
218 *
219 * This routine must be called to notify the device driver that it
220 * could resume so that the device driver can do some initialization
221 * to make the recovered device work again.
192 */ 222 */
193
194static int eeh_report_resume(struct pci_dev *dev, void *userdata) 223static int eeh_report_resume(struct pci_dev *dev, void *userdata)
195{ 224{
196 struct pci_driver *driver = dev->driver; 225 struct pci_driver *driver = dev->driver;
@@ -212,12 +241,13 @@ static int eeh_report_resume(struct pci_dev *dev, void *userdata)
212} 241}
213 242
214/** 243/**
215 * eeh_report_failure - tell device driver that device is dead. 244 * eeh_report_failure - Tell device driver that device is dead.
245 * @dev: PCI device
246 * @userdata: return value
216 * 247 *
217 * This informs the device driver that the device is permanently 248 * This informs the device driver that the device is permanently
218 * dead, and that no further recovery attempts will be made on it. 249 * dead, and that no further recovery attempts will be made on it.
219 */ 250 */
220
221static int eeh_report_failure(struct pci_dev *dev, void *userdata) 251static int eeh_report_failure(struct pci_dev *dev, void *userdata)
222{ 252{
223 struct pci_driver *driver = dev->driver; 253 struct pci_driver *driver = dev->driver;
@@ -238,65 +268,46 @@ static int eeh_report_failure(struct pci_dev *dev, void *userdata)
238 return 0; 268 return 0;
239} 269}
240 270
241/* ------------------------------------------------------- */
242/** 271/**
243 * handle_eeh_events -- reset a PCI device after hard lockup. 272 * eeh_reset_device - Perform actual reset of a pci slot
244 * 273 * @edev: PE associated EEH device
245 * pSeries systems will isolate a PCI slot if the PCI-Host 274 * @bus: PCI bus corresponding to the isolcated slot
246 * bridge detects address or data parity errors, DMA's
247 * occurring to wild addresses (which usually happen due to
248 * bugs in device drivers or in PCI adapter firmware).
249 * Slot isolations also occur if #SERR, #PERR or other misc
250 * PCI-related errors are detected.
251 * 275 *
252 * Recovery process consists of unplugging the device driver 276 * This routine must be called to do reset on the indicated PE.
253 * (which generated hotplug events to userspace), then issuing 277 * During the reset, udev might be invoked because those affected
254 * a PCI #RST to the device, then reconfiguring the PCI config 278 * PCI devices will be removed and then added.
255 * space for all bridges & devices under this slot, and then
256 * finally restarting the device drivers (which cause a second
257 * set of hotplug events to go out to userspace).
258 */ 279 */
259 280static int eeh_reset_device(struct eeh_dev *edev, struct pci_bus *bus)
260/**
261 * eeh_reset_device() -- perform actual reset of a pci slot
262 * @bus: pointer to the pci bus structure corresponding
263 * to the isolated slot. A non-null value will
264 * cause all devices under the bus to be removed
265 * and then re-added.
266 * @pe_dn: pointer to a "Partionable Endpoint" device node.
267 * This is the top-level structure on which pci
268 * bus resets can be performed.
269 */
270
271static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
272{ 281{
273 struct device_node *dn; 282 struct device_node *dn;
274 int cnt, rc; 283 int cnt, rc;
275 284
276 /* pcibios will clear the counter; save the value */ 285 /* pcibios will clear the counter; save the value */
277 cnt = pe_dn->eeh_freeze_count; 286 cnt = edev->freeze_count;
278 287
279 if (bus) 288 if (bus)
280 pcibios_remove_pci_devices(bus); 289 pcibios_remove_pci_devices(bus);
281 290
282 /* Reset the pci controller. (Asserts RST#; resets config space). 291 /* Reset the pci controller. (Asserts RST#; resets config space).
283 * Reconfigure bridges and devices. Don't try to bring the system 292 * Reconfigure bridges and devices. Don't try to bring the system
284 * up if the reset failed for some reason. */ 293 * up if the reset failed for some reason.
285 rc = rtas_set_slot_reset(pe_dn); 294 */
295 rc = eeh_reset_pe(edev);
286 if (rc) 296 if (rc)
287 return rc; 297 return rc;
288 298
289 /* Walk over all functions on this device. */ 299 /* Walk over all functions on this device. */
290 dn = pe_dn->node; 300 dn = eeh_dev_to_of_node(edev);
291 if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) 301 if (!pcibios_find_pci_bus(dn) && of_node_to_eeh_dev(dn->parent))
292 dn = dn->parent->child; 302 dn = dn->parent->child;
293 303
294 while (dn) { 304 while (dn) {
295 struct pci_dn *ppe = PCI_DN(dn); 305 struct eeh_dev *pedev = of_node_to_eeh_dev(dn);
306
296 /* On Power4, always true because eeh_pe_config_addr=0 */ 307 /* On Power4, always true because eeh_pe_config_addr=0 */
297 if (pe_dn->eeh_pe_config_addr == ppe->eeh_pe_config_addr) { 308 if (edev->pe_config_addr == pedev->pe_config_addr) {
298 rtas_configure_bridge(ppe); 309 eeh_ops->configure_bridge(dn);
299 eeh_restore_bars(ppe); 310 eeh_restore_bars(pedev);
300 } 311 }
301 dn = dn->sibling; 312 dn = dn->sibling;
302 } 313 }
@@ -308,10 +319,10 @@ static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
308 * potentially weird things happen. 319 * potentially weird things happen.
309 */ 320 */
310 if (bus) { 321 if (bus) {
311 ssleep (5); 322 ssleep(5);
312 pcibios_add_pci_devices(bus); 323 pcibios_add_pci_devices(bus);
313 } 324 }
314 pe_dn->eeh_freeze_count = cnt; 325 edev->freeze_count = cnt;
315 326
316 return 0; 327 return 0;
317} 328}
@@ -321,23 +332,39 @@ static int eeh_reset_device (struct pci_dn *pe_dn, struct pci_bus *bus)
321 */ 332 */
322#define MAX_WAIT_FOR_RECOVERY 150 333#define MAX_WAIT_FOR_RECOVERY 150
323 334
324struct pci_dn * handle_eeh_events (struct eeh_event *event) 335/**
336 * eeh_handle_event - Reset a PCI device after hard lockup.
337 * @event: EEH event
338 *
339 * While PHB detects address or data parity errors on particular PCI
340 * slot, the associated PE will be frozen. Besides, DMA's occurring
341 * to wild addresses (which usually happen due to bugs in device
342 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
343 * #PERR or other misc PCI-related errors also can trigger EEH errors.
344 *
345 * Recovery process consists of unplugging the device driver (which
346 * generated hotplug events to userspace), then issuing a PCI #RST to
347 * the device, then reconfiguring the PCI config space for all bridges
348 * & devices under this slot, and then finally restarting the device
349 * drivers (which cause a second set of hotplug events to go out to
350 * userspace).
351 */
352struct eeh_dev *handle_eeh_events(struct eeh_event *event)
325{ 353{
326 struct device_node *frozen_dn; 354 struct device_node *frozen_dn;
327 struct pci_dn *frozen_pdn; 355 struct eeh_dev *frozen_edev;
328 struct pci_bus *frozen_bus; 356 struct pci_bus *frozen_bus;
329 int rc = 0; 357 int rc = 0;
330 enum pci_ers_result result = PCI_ERS_RESULT_NONE; 358 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
331 const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str; 359 const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str;
332 360
333 frozen_dn = find_device_pe(event->dn); 361 frozen_dn = eeh_find_device_pe(eeh_dev_to_of_node(event->edev));
334 if (!frozen_dn) { 362 if (!frozen_dn) {
335 363 location = of_get_property(eeh_dev_to_of_node(event->edev), "ibm,loc-code", NULL);
336 location = of_get_property(event->dn, "ibm,loc-code", NULL);
337 location = location ? location : "unknown"; 364 location = location ? location : "unknown";
338 printk(KERN_ERR "EEH: Error: Cannot find partition endpoint " 365 printk(KERN_ERR "EEH: Error: Cannot find partition endpoint "
339 "for location=%s pci addr=%s\n", 366 "for location=%s pci addr=%s\n",
340 location, eeh_pci_name(event->dev)); 367 location, eeh_pci_name(eeh_dev_to_pci_dev(event->edev)));
341 return NULL; 368 return NULL;
342 } 369 }
343 370
@@ -350,9 +377,10 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
350 * which was always an EADS pci bridge. In the new style, 377 * which was always an EADS pci bridge. In the new style,
351 * there might not be any EADS bridges, and even when there are, 378 * there might not be any EADS bridges, and even when there are,
352 * the firmware marks them as "EEH incapable". So another 379 * the firmware marks them as "EEH incapable". So another
353 * two-step is needed to find the pci bus.. */ 380 * two-step is needed to find the pci bus..
381 */
354 if (!frozen_bus) 382 if (!frozen_bus)
355 frozen_bus = pcibios_find_pci_bus (frozen_dn->parent); 383 frozen_bus = pcibios_find_pci_bus(frozen_dn->parent);
356 384
357 if (!frozen_bus) { 385 if (!frozen_bus) {
358 printk(KERN_ERR "EEH: Cannot find PCI bus " 386 printk(KERN_ERR "EEH: Cannot find PCI bus "
@@ -361,22 +389,21 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
361 return NULL; 389 return NULL;
362 } 390 }
363 391
364 frozen_pdn = PCI_DN(frozen_dn); 392 frozen_edev = of_node_to_eeh_dev(frozen_dn);
365 frozen_pdn->eeh_freeze_count++; 393 frozen_edev->freeze_count++;
394 pci_str = eeh_pci_name(eeh_dev_to_pci_dev(event->edev));
395 drv_str = eeh_pcid_name(eeh_dev_to_pci_dev(event->edev));
366 396
367 pci_str = eeh_pci_name(event->dev); 397 if (frozen_edev->freeze_count > EEH_MAX_ALLOWED_FREEZES)
368 drv_str = pcid_name(event->dev);
369
370 if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES)
371 goto excess_failures; 398 goto excess_failures;
372 399
373 printk(KERN_WARNING 400 printk(KERN_WARNING
374 "EEH: This PCI device has failed %d times in the last hour:\n", 401 "EEH: This PCI device has failed %d times in the last hour:\n",
375 frozen_pdn->eeh_freeze_count); 402 frozen_edev->freeze_count);
376 403
377 if (frozen_pdn->pcidev) { 404 if (frozen_edev->pdev) {
378 bus_pci_str = pci_name(frozen_pdn->pcidev); 405 bus_pci_str = pci_name(frozen_edev->pdev);
379 bus_drv_str = pcid_name(frozen_pdn->pcidev); 406 bus_drv_str = eeh_pcid_name(frozen_edev->pdev);
380 printk(KERN_WARNING 407 printk(KERN_WARNING
381 "EEH: Bus location=%s driver=%s pci addr=%s\n", 408 "EEH: Bus location=%s driver=%s pci addr=%s\n",
382 location, bus_drv_str, bus_pci_str); 409 location, bus_drv_str, bus_pci_str);
@@ -395,9 +422,10 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
395 pci_walk_bus(frozen_bus, eeh_report_error, &result); 422 pci_walk_bus(frozen_bus, eeh_report_error, &result);
396 423
397 /* Get the current PCI slot state. This can take a long time, 424 /* Get the current PCI slot state. This can take a long time,
398 * sometimes over 3 seconds for certain systems. */ 425 * sometimes over 3 seconds for certain systems.
399 rc = eeh_wait_for_slot_status (frozen_pdn, MAX_WAIT_FOR_RECOVERY*1000); 426 */
400 if (rc < 0) { 427 rc = eeh_ops->wait_state(eeh_dev_to_of_node(frozen_edev), MAX_WAIT_FOR_RECOVERY*1000);
428 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
401 printk(KERN_WARNING "EEH: Permanent failure\n"); 429 printk(KERN_WARNING "EEH: Permanent failure\n");
402 goto hard_fail; 430 goto hard_fail;
403 } 431 }
@@ -406,14 +434,14 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
406 * don't post the error log until after all dev drivers 434 * don't post the error log until after all dev drivers
407 * have been informed. 435 * have been informed.
408 */ 436 */
409 eeh_slot_error_detail(frozen_pdn, EEH_LOG_TEMP_FAILURE); 437 eeh_slot_error_detail(frozen_edev, EEH_LOG_TEMP);
410 438
411 /* If all device drivers were EEH-unaware, then shut 439 /* If all device drivers were EEH-unaware, then shut
412 * down all of the device drivers, and hope they 440 * down all of the device drivers, and hope they
413 * go down willingly, without panicing the system. 441 * go down willingly, without panicing the system.
414 */ 442 */
415 if (result == PCI_ERS_RESULT_NONE) { 443 if (result == PCI_ERS_RESULT_NONE) {
416 rc = eeh_reset_device(frozen_pdn, frozen_bus); 444 rc = eeh_reset_device(frozen_edev, frozen_bus);
417 if (rc) { 445 if (rc) {
418 printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc); 446 printk(KERN_WARNING "EEH: Unable to reset, rc=%d\n", rc);
419 goto hard_fail; 447 goto hard_fail;
@@ -422,7 +450,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
422 450
423 /* If all devices reported they can proceed, then re-enable MMIO */ 451 /* If all devices reported they can proceed, then re-enable MMIO */
424 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 452 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
425 rc = rtas_pci_enable(frozen_pdn, EEH_THAW_MMIO); 453 rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_MMIO);
426 454
427 if (rc < 0) 455 if (rc < 0)
428 goto hard_fail; 456 goto hard_fail;
@@ -436,7 +464,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
436 464
437 /* If all devices reported they can proceed, then re-enable DMA */ 465 /* If all devices reported they can proceed, then re-enable DMA */
438 if (result == PCI_ERS_RESULT_CAN_RECOVER) { 466 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
439 rc = rtas_pci_enable(frozen_pdn, EEH_THAW_DMA); 467 rc = eeh_pci_enable(frozen_edev, EEH_OPT_THAW_DMA);
440 468
441 if (rc < 0) 469 if (rc < 0)
442 goto hard_fail; 470 goto hard_fail;
@@ -454,7 +482,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
454 482
455 /* If any device called out for a reset, then reset the slot */ 483 /* If any device called out for a reset, then reset the slot */
456 if (result == PCI_ERS_RESULT_NEED_RESET) { 484 if (result == PCI_ERS_RESULT_NEED_RESET) {
457 rc = eeh_reset_device(frozen_pdn, NULL); 485 rc = eeh_reset_device(frozen_edev, NULL);
458 if (rc) { 486 if (rc) {
459 printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc); 487 printk(KERN_WARNING "EEH: Cannot reset, rc=%d\n", rc);
460 goto hard_fail; 488 goto hard_fail;
@@ -473,7 +501,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event)
473 /* Tell all device drivers that they can resume operations */ 501 /* Tell all device drivers that they can resume operations */
474 pci_walk_bus(frozen_bus, eeh_report_resume, NULL); 502 pci_walk_bus(frozen_bus, eeh_report_resume, NULL);
475 503
476 return frozen_pdn; 504 return frozen_edev;
477 505
478excess_failures: 506excess_failures:
479 /* 507 /*
@@ -486,7 +514,7 @@ excess_failures:
486 "has failed %d times in the last hour " 514 "has failed %d times in the last hour "
487 "and has been permanently disabled.\n" 515 "and has been permanently disabled.\n"
488 "Please try reseating this device or replacing it.\n", 516 "Please try reseating this device or replacing it.\n",
489 location, drv_str, pci_str, frozen_pdn->eeh_freeze_count); 517 location, drv_str, pci_str, frozen_edev->freeze_count);
490 goto perm_error; 518 goto perm_error;
491 519
492hard_fail: 520hard_fail:
@@ -497,7 +525,7 @@ hard_fail:
497 location, drv_str, pci_str); 525 location, drv_str, pci_str);
498 526
499perm_error: 527perm_error:
500 eeh_slot_error_detail(frozen_pdn, EEH_LOG_PERM_FAILURE); 528 eeh_slot_error_detail(frozen_edev, EEH_LOG_PERM);
501 529
502 /* Notify all devices that they're about to go down. */ 530 /* Notify all devices that they're about to go down. */
503 pci_walk_bus(frozen_bus, eeh_report_failure, NULL); 531 pci_walk_bus(frozen_bus, eeh_report_failure, NULL);
@@ -508,4 +536,3 @@ perm_error:
508 return NULL; 536 return NULL;
509} 537}
510 538
511/* ---------- end of file ---------- */
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
index d2383cfb6dfd..4a4752565856 100644
--- a/arch/powerpc/platforms/pseries/eeh_event.c
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -1,6 +1,4 @@
1/* 1/*
2 * eeh_event.c
3 *
4 * This program is free software; you can redistribute it and/or modify 2 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by 3 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or 4 * the Free Software Foundation; either version 2 of the License, or
@@ -46,7 +44,7 @@ DECLARE_WORK(eeh_event_wq, eeh_thread_launcher);
46DEFINE_MUTEX(eeh_event_mutex); 44DEFINE_MUTEX(eeh_event_mutex);
47 45
48/** 46/**
49 * eeh_event_handler - dispatch EEH events. 47 * eeh_event_handler - Dispatch EEH events.
50 * @dummy - unused 48 * @dummy - unused
51 * 49 *
52 * The detection of a frozen slot can occur inside an interrupt, 50 * The detection of a frozen slot can occur inside an interrupt,
@@ -58,10 +56,10 @@ DEFINE_MUTEX(eeh_event_mutex);
58static int eeh_event_handler(void * dummy) 56static int eeh_event_handler(void * dummy)
59{ 57{
60 unsigned long flags; 58 unsigned long flags;
61 struct eeh_event *event; 59 struct eeh_event *event;
62 struct pci_dn *pdn; 60 struct eeh_dev *edev;
63 61
64 daemonize ("eehd"); 62 daemonize("eehd");
65 set_current_state(TASK_INTERRUPTIBLE); 63 set_current_state(TASK_INTERRUPTIBLE);
66 64
67 spin_lock_irqsave(&eeh_eventlist_lock, flags); 65 spin_lock_irqsave(&eeh_eventlist_lock, flags);
@@ -79,31 +77,37 @@ static int eeh_event_handler(void * dummy)
79 77
80 /* Serialize processing of EEH events */ 78 /* Serialize processing of EEH events */
81 mutex_lock(&eeh_event_mutex); 79 mutex_lock(&eeh_event_mutex);
82 eeh_mark_slot(event->dn, EEH_MODE_RECOVERING); 80 edev = event->edev;
81 eeh_mark_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
83 82
84 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n", 83 printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
85 eeh_pci_name(event->dev)); 84 eeh_pci_name(edev->pdev));
85
86 edev = handle_eeh_events(event);
86 87
87 pdn = handle_eeh_events(event); 88 eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
89 pci_dev_put(edev->pdev);
88 90
89 eeh_clear_slot(event->dn, EEH_MODE_RECOVERING);
90 pci_dev_put(event->dev);
91 kfree(event); 91 kfree(event);
92 mutex_unlock(&eeh_event_mutex); 92 mutex_unlock(&eeh_event_mutex);
93 93
94 /* If there are no new errors after an hour, clear the counter. */ 94 /* If there are no new errors after an hour, clear the counter. */
95 if (pdn && pdn->eeh_freeze_count>0) { 95 if (edev && edev->freeze_count>0) {
96 msleep_interruptible (3600*1000); 96 msleep_interruptible(3600*1000);
97 if (pdn->eeh_freeze_count>0) 97 if (edev->freeze_count>0)
98 pdn->eeh_freeze_count--; 98 edev->freeze_count--;
99
99 } 100 }
100 101
101 return 0; 102 return 0;
102} 103}
103 104
104/** 105/**
105 * eeh_thread_launcher 106 * eeh_thread_launcher - Start kernel thread to handle EEH events
106 * @dummy - unused 107 * @dummy - unused
108 *
109 * This routine is called to start the kernel thread for processing
110 * EEH event.
107 */ 111 */
108static void eeh_thread_launcher(struct work_struct *dummy) 112static void eeh_thread_launcher(struct work_struct *dummy)
109{ 113{
@@ -112,18 +116,18 @@ static void eeh_thread_launcher(struct work_struct *dummy)
112} 116}
113 117
114/** 118/**
115 * eeh_send_failure_event - generate a PCI error event 119 * eeh_send_failure_event - Generate a PCI error event
116 * @dev pci device 120 * @edev: EEH device
117 * 121 *
118 * This routine can be called within an interrupt context; 122 * This routine can be called within an interrupt context;
119 * the actual event will be delivered in a normal context 123 * the actual event will be delivered in a normal context
120 * (from a workqueue). 124 * (from a workqueue).
121 */ 125 */
122int eeh_send_failure_event (struct device_node *dn, 126int eeh_send_failure_event(struct eeh_dev *edev)
123 struct pci_dev *dev)
124{ 127{
125 unsigned long flags; 128 unsigned long flags;
126 struct eeh_event *event; 129 struct eeh_event *event;
130 struct device_node *dn = eeh_dev_to_of_node(edev);
127 const char *location; 131 const char *location;
128 132
129 if (!mem_init_done) { 133 if (!mem_init_done) {
@@ -135,15 +139,14 @@ int eeh_send_failure_event (struct device_node *dn,
135 } 139 }
136 event = kmalloc(sizeof(*event), GFP_ATOMIC); 140 event = kmalloc(sizeof(*event), GFP_ATOMIC);
137 if (event == NULL) { 141 if (event == NULL) {
138 printk (KERN_ERR "EEH: out of memory, event not handled\n"); 142 printk(KERN_ERR "EEH: out of memory, event not handled\n");
139 return 1; 143 return 1;
140 } 144 }
141 145
142 if (dev) 146 if (edev->pdev)
143 pci_dev_get(dev); 147 pci_dev_get(edev->pdev);
144 148
145 event->dn = dn; 149 event->edev = edev;
146 event->dev = dev;
147 150
148 /* We may or may not be called in an interrupt context */ 151 /* We may or may not be called in an interrupt context */
149 spin_lock_irqsave(&eeh_eventlist_lock, flags); 152 spin_lock_irqsave(&eeh_eventlist_lock, flags);
@@ -154,5 +157,3 @@ int eeh_send_failure_event (struct device_node *dn,
154 157
155 return 0; 158 return 0;
156} 159}
157
158/********************** END OF FILE ******************************/
diff --git a/arch/powerpc/platforms/pseries/eeh_pseries.c b/arch/powerpc/platforms/pseries/eeh_pseries.c
new file mode 100644
index 000000000000..8752f79a6af8
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_pseries.c
@@ -0,0 +1,565 @@
1/*
2 * The file intends to implement the platform dependent EEH operations on pseries.
3 * Actually, the pseries platform is built based on RTAS heavily. That means the
4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions
5 * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
6 * been done.
7 *
8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
9 * Copyright IBM Corporation 2001, 2005, 2006
10 * Copyright Dave Engebretsen & Todd Inglett 2001
11 * Copyright Linas Vepstas 2005, 2006
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/atomic.h>
29#include <linux/delay.h>
30#include <linux/export.h>
31#include <linux/init.h>
32#include <linux/list.h>
33#include <linux/of.h>
34#include <linux/pci.h>
35#include <linux/proc_fs.h>
36#include <linux/rbtree.h>
37#include <linux/sched.h>
38#include <linux/seq_file.h>
39#include <linux/spinlock.h>
40
41#include <asm/eeh.h>
42#include <asm/eeh_event.h>
43#include <asm/io.h>
44#include <asm/machdep.h>
45#include <asm/ppc-pci.h>
46#include <asm/rtas.h>
47
48/* RTAS tokens */
49static int ibm_set_eeh_option;
50static int ibm_set_slot_reset;
51static int ibm_read_slot_reset_state;
52static int ibm_read_slot_reset_state2;
53static int ibm_slot_error_detail;
54static int ibm_get_config_addr_info;
55static int ibm_get_config_addr_info2;
56static int ibm_configure_bridge;
57static int ibm_configure_pe;
58
59/*
60 * Buffer for reporting slot-error-detail rtas calls. Its here
61 * in BSS, and not dynamically alloced, so that it ends up in
62 * RMO where RTAS can access it.
63 */
64static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
65static DEFINE_SPINLOCK(slot_errbuf_lock);
66static int eeh_error_buf_size;
67
68/**
69 * pseries_eeh_init - EEH platform dependent initialization
70 *
71 * EEH platform dependent initialization on pseries.
72 */
73static int pseries_eeh_init(void)
74{
75 /* figure out EEH RTAS function call tokens */
76 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
77 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
78 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
79 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
80 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
81 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
82 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
83 ibm_configure_pe = rtas_token("ibm,configure-pe");
84 ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
85
86 /* necessary sanity check */
87 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
88 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
89 __func__);
90 return -EINVAL;
91 } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) {
92 pr_warning("%s: RTAS service <ibm, set-slot-reset> invalid\n",
93 __func__);
94 return -EINVAL;
95 } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
96 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) {
97 pr_warning("%s: RTAS service <ibm,read-slot-reset-state2> and "
98 "<ibm,read-slot-reset-state> invalid\n",
99 __func__);
100 return -EINVAL;
101 } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) {
102 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
103 __func__);
104 return -EINVAL;
105 } else if (ibm_get_config_addr_info2 == RTAS_UNKNOWN_SERVICE &&
106 ibm_get_config_addr_info == RTAS_UNKNOWN_SERVICE) {
107 pr_warning("%s: RTAS service <ibm,get-config-addr-info2> and "
108 "<ibm,get-config-addr-info> invalid\n",
109 __func__);
110 return -EINVAL;
111 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
112 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
113 pr_warning("%s: RTAS service <ibm,configure-pe> and "
114 "<ibm,configure-bridge> invalid\n",
115 __func__);
116 return -EINVAL;
117 }
118
119 /* Initialize error log lock and size */
120 spin_lock_init(&slot_errbuf_lock);
121 eeh_error_buf_size = rtas_token("rtas-error-log-max");
122 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
123 pr_warning("%s: unknown EEH error log size\n",
124 __func__);
125 eeh_error_buf_size = 1024;
126 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
127 pr_warning("%s: EEH error log size %d exceeds the maximal %d\n",
128 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
129 eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
130 }
131
132 return 0;
133}
134
135/**
136 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
137 * @dn: device node
138 * @option: operation to be issued
139 *
140 * The function is used to control the EEH functionality globally.
141 * Currently, following options are support according to PAPR:
142 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
143 */
144static int pseries_eeh_set_option(struct device_node *dn, int option)
145{
146 int ret = 0;
147 struct eeh_dev *edev;
148 const u32 *reg;
149 int config_addr;
150
151 edev = of_node_to_eeh_dev(dn);
152
153 /*
154 * When we're enabling or disabling EEH functioality on
155 * the particular PE, the PE config address is possibly
156 * unavailable. Therefore, we have to figure it out from
157 * the FDT node.
158 */
159 switch (option) {
160 case EEH_OPT_DISABLE:
161 case EEH_OPT_ENABLE:
162 reg = of_get_property(dn, "reg", NULL);
163 config_addr = reg[0];
164 break;
165
166 case EEH_OPT_THAW_MMIO:
167 case EEH_OPT_THAW_DMA:
168 config_addr = edev->config_addr;
169 if (edev->pe_config_addr)
170 config_addr = edev->pe_config_addr;
171 break;
172
173 default:
174 pr_err("%s: Invalid option %d\n",
175 __func__, option);
176 return -EINVAL;
177 }
178
179 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
180 config_addr, BUID_HI(edev->phb->buid),
181 BUID_LO(edev->phb->buid), option);
182
183 return ret;
184}
185
186/**
187 * pseries_eeh_get_pe_addr - Retrieve PE address
188 * @dn: device node
189 *
190 * Retrieve the assocated PE address. Actually, there're 2 RTAS
191 * function calls dedicated for the purpose. We need implement
192 * it through the new function and then the old one. Besides,
193 * you should make sure the config address is figured out from
194 * FDT node before calling the function.
195 *
196 * It's notable that zero'ed return value means invalid PE config
197 * address.
198 */
199static int pseries_eeh_get_pe_addr(struct device_node *dn)
200{
201 struct eeh_dev *edev;
202 int ret = 0;
203 int rets[3];
204
205 edev = of_node_to_eeh_dev(dn);
206
207 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
208 /*
209 * First of all, we need to make sure there has one PE
210 * associated with the device. Otherwise, PE address is
211 * meaningless.
212 */
213 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
214 edev->config_addr, BUID_HI(edev->phb->buid),
215 BUID_LO(edev->phb->buid), 1);
216 if (ret || (rets[0] == 0))
217 return 0;
218
219 /* Retrieve the associated PE config address */
220 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
221 edev->config_addr, BUID_HI(edev->phb->buid),
222 BUID_LO(edev->phb->buid), 0);
223 if (ret) {
224 pr_warning("%s: Failed to get PE address for %s\n",
225 __func__, dn->full_name);
226 return 0;
227 }
228
229 return rets[0];
230 }
231
232 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
233 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
234 edev->config_addr, BUID_HI(edev->phb->buid),
235 BUID_LO(edev->phb->buid), 0);
236 if (ret) {
237 pr_warning("%s: Failed to get PE address for %s\n",
238 __func__, dn->full_name);
239 return 0;
240 }
241
242 return rets[0];
243 }
244
245 return ret;
246}
247
248/**
249 * pseries_eeh_get_state - Retrieve PE state
250 * @dn: PE associated device node
251 * @state: return value
252 *
253 * Retrieve the state of the specified PE. On RTAS compliant
254 * pseries platform, there already has one dedicated RTAS function
255 * for the purpose. It's notable that the associated PE config address
256 * might be ready when calling the function. Therefore, endeavour to
257 * use the PE config address if possible. Further more, there're 2
258 * RTAS calls for the purpose, we need to try the new one and back
259 * to the old one if the new one couldn't work properly.
260 */
261static int pseries_eeh_get_state(struct device_node *dn, int *state)
262{
263 struct eeh_dev *edev;
264 int config_addr;
265 int ret;
266 int rets[4];
267 int result;
268
269 /* Figure out PE config address if possible */
270 edev = of_node_to_eeh_dev(dn);
271 config_addr = edev->config_addr;
272 if (edev->pe_config_addr)
273 config_addr = edev->pe_config_addr;
274
275 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
276 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
277 config_addr, BUID_HI(edev->phb->buid),
278 BUID_LO(edev->phb->buid));
279 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
280 /* Fake PE unavailable info */
281 rets[2] = 0;
282 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
283 config_addr, BUID_HI(edev->phb->buid),
284 BUID_LO(edev->phb->buid));
285 } else {
286 return EEH_STATE_NOT_SUPPORT;
287 }
288
289 if (ret)
290 return ret;
291
292 /* Parse the result out */
293 result = 0;
294 if (rets[1]) {
295 switch(rets[0]) {
296 case 0:
297 result &= ~EEH_STATE_RESET_ACTIVE;
298 result |= EEH_STATE_MMIO_ACTIVE;
299 result |= EEH_STATE_DMA_ACTIVE;
300 break;
301 case 1:
302 result |= EEH_STATE_RESET_ACTIVE;
303 result |= EEH_STATE_MMIO_ACTIVE;
304 result |= EEH_STATE_DMA_ACTIVE;
305 break;
306 case 2:
307 result &= ~EEH_STATE_RESET_ACTIVE;
308 result &= ~EEH_STATE_MMIO_ACTIVE;
309 result &= ~EEH_STATE_DMA_ACTIVE;
310 break;
311 case 4:
312 result &= ~EEH_STATE_RESET_ACTIVE;
313 result &= ~EEH_STATE_MMIO_ACTIVE;
314 result &= ~EEH_STATE_DMA_ACTIVE;
315 result |= EEH_STATE_MMIO_ENABLED;
316 break;
317 case 5:
318 if (rets[2]) {
319 if (state) *state = rets[2];
320 result = EEH_STATE_UNAVAILABLE;
321 } else {
322 result = EEH_STATE_NOT_SUPPORT;
323 }
324 default:
325 result = EEH_STATE_NOT_SUPPORT;
326 }
327 } else {
328 result = EEH_STATE_NOT_SUPPORT;
329 }
330
331 return result;
332}
333
334/**
335 * pseries_eeh_reset - Reset the specified PE
336 * @dn: PE associated device node
337 * @option: reset option
338 *
339 * Reset the specified PE
340 */
341static int pseries_eeh_reset(struct device_node *dn, int option)
342{
343 struct eeh_dev *edev;
344 int config_addr;
345 int ret;
346
347 /* Figure out PE address */
348 edev = of_node_to_eeh_dev(dn);
349 config_addr = edev->config_addr;
350 if (edev->pe_config_addr)
351 config_addr = edev->pe_config_addr;
352
353 /* Reset PE through RTAS call */
354 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
355 config_addr, BUID_HI(edev->phb->buid),
356 BUID_LO(edev->phb->buid), option);
357
358 /* If fundamental-reset not supported, try hot-reset */
359 if (option == EEH_RESET_FUNDAMENTAL &&
360 ret == -8) {
361 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
362 config_addr, BUID_HI(edev->phb->buid),
363 BUID_LO(edev->phb->buid), EEH_RESET_HOT);
364 }
365
366 return ret;
367}
368
369/**
370 * pseries_eeh_wait_state - Wait for PE state
371 * @dn: PE associated device node
372 * @max_wait: maximal period in microsecond
373 *
374 * Wait for the state of associated PE. It might take some time
375 * to retrieve the PE's state.
376 */
377static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
378{
379 int ret;
380 int mwait;
381
382 /*
383 * According to PAPR, the state of PE might be temporarily
384 * unavailable. Under the circumstance, we have to wait
385 * for indicated time determined by firmware. The maximal
386 * wait time is 5 minutes, which is acquired from the original
387 * EEH implementation. Also, the original implementation
388 * also defined the minimal wait time as 1 second.
389 */
390#define EEH_STATE_MIN_WAIT_TIME (1000)
391#define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
392
393 while (1) {
394 ret = pseries_eeh_get_state(dn, &mwait);
395
396 /*
397 * If the PE's state is temporarily unavailable,
398 * we have to wait for the specified time. Otherwise,
399 * the PE's state will be returned immediately.
400 */
401 if (ret != EEH_STATE_UNAVAILABLE)
402 return ret;
403
404 if (max_wait <= 0) {
405 pr_warning("%s: Timeout when getting PE's state (%d)\n",
406 __func__, max_wait);
407 return EEH_STATE_NOT_SUPPORT;
408 }
409
410 if (mwait <= 0) {
411 pr_warning("%s: Firmware returned bad wait value %d\n",
412 __func__, mwait);
413 mwait = EEH_STATE_MIN_WAIT_TIME;
414 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
415 pr_warning("%s: Firmware returned too long wait value %d\n",
416 __func__, mwait);
417 mwait = EEH_STATE_MAX_WAIT_TIME;
418 }
419
420 max_wait -= mwait;
421 msleep(mwait);
422 }
423
424 return EEH_STATE_NOT_SUPPORT;
425}
426
427/**
428 * pseries_eeh_get_log - Retrieve error log
429 * @dn: device node
430 * @severity: temporary or permanent error log
431 * @drv_log: driver log to be combined with retrieved error log
432 * @len: length of driver log
433 *
434 * Retrieve the temporary or permanent error from the PE.
435 * Actually, the error will be retrieved through the dedicated
436 * RTAS call.
437 */
438static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len)
439{
440 struct eeh_dev *edev;
441 int config_addr;
442 unsigned long flags;
443 int ret;
444
445 edev = of_node_to_eeh_dev(dn);
446 spin_lock_irqsave(&slot_errbuf_lock, flags);
447 memset(slot_errbuf, 0, eeh_error_buf_size);
448
449 /* Figure out the PE address */
450 config_addr = edev->config_addr;
451 if (edev->pe_config_addr)
452 config_addr = edev->pe_config_addr;
453
454 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
455 BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid),
456 virt_to_phys(drv_log), len,
457 virt_to_phys(slot_errbuf), eeh_error_buf_size,
458 severity);
459 if (!ret)
460 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
461 spin_unlock_irqrestore(&slot_errbuf_lock, flags);
462
463 return ret;
464}
465
466/**
467 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
468 * @dn: PE associated device node
469 *
470 * The function will be called to reconfigure the bridges included
471 * in the specified PE so that the mulfunctional PE would be recovered
472 * again.
473 */
474static int pseries_eeh_configure_bridge(struct device_node *dn)
475{
476 struct eeh_dev *edev;
477 int config_addr;
478 int ret;
479
480 /* Figure out the PE address */
481 edev = of_node_to_eeh_dev(dn);
482 config_addr = edev->config_addr;
483 if (edev->pe_config_addr)
484 config_addr = edev->pe_config_addr;
485
486 /* Use new configure-pe function, if supported */
487 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
488 ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
489 config_addr, BUID_HI(edev->phb->buid),
490 BUID_LO(edev->phb->buid));
491 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
492 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
493 config_addr, BUID_HI(edev->phb->buid),
494 BUID_LO(edev->phb->buid));
495 } else {
496 return -EFAULT;
497 }
498
499 if (ret)
500 pr_warning("%s: Unable to configure bridge %d for %s\n",
501 __func__, ret, dn->full_name);
502
503 return ret;
504}
505
506/**
507 * pseries_eeh_read_config - Read PCI config space
508 * @dn: device node
509 * @where: PCI address
510 * @size: size to read
511 * @val: return value
512 *
513 * Read config space from the speicifed device
514 */
515static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val)
516{
517 struct pci_dn *pdn;
518
519 pdn = PCI_DN(dn);
520
521 return rtas_read_config(pdn, where, size, val);
522}
523
524/**
525 * pseries_eeh_write_config - Write PCI config space
526 * @dn: device node
527 * @where: PCI address
528 * @size: size to write
529 * @val: value to be written
530 *
531 * Write config space to the specified device
532 */
533static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val)
534{
535 struct pci_dn *pdn;
536
537 pdn = PCI_DN(dn);
538
539 return rtas_write_config(pdn, where, size, val);
540}
541
542static struct eeh_ops pseries_eeh_ops = {
543 .name = "pseries",
544 .init = pseries_eeh_init,
545 .set_option = pseries_eeh_set_option,
546 .get_pe_addr = pseries_eeh_get_pe_addr,
547 .get_state = pseries_eeh_get_state,
548 .reset = pseries_eeh_reset,
549 .wait_state = pseries_eeh_wait_state,
550 .get_log = pseries_eeh_get_log,
551 .configure_bridge = pseries_eeh_configure_bridge,
552 .read_config = pseries_eeh_read_config,
553 .write_config = pseries_eeh_write_config
554};
555
556/**
557 * eeh_pseries_init - Register platform dependent EEH operations
558 *
559 * EEH initialization on pseries platform. This function should be
560 * called before any EEH related functions.
561 */
562int __init eeh_pseries_init(void)
563{
564 return eeh_ops_register(&pseries_eeh_ops);
565}
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c
index eb744ee234da..243b3510d70f 100644
--- a/arch/powerpc/platforms/pseries/eeh_sysfs.c
+++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c
@@ -28,7 +28,7 @@
28#include <asm/pci-bridge.h> 28#include <asm/pci-bridge.h>
29 29
30/** 30/**
31 * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic 31 * EEH_SHOW_ATTR -- Create sysfs entry for eeh statistic
32 * @_name: name of file in sysfs directory 32 * @_name: name of file in sysfs directory
33 * @_memb: name of member in struct pci_dn to access 33 * @_memb: name of member in struct pci_dn to access
34 * @_format: printf format for display 34 * @_format: printf format for display
@@ -41,24 +41,21 @@ static ssize_t eeh_show_##_name(struct device *dev, \
41 struct device_attribute *attr, char *buf) \ 41 struct device_attribute *attr, char *buf) \
42{ \ 42{ \
43 struct pci_dev *pdev = to_pci_dev(dev); \ 43 struct pci_dev *pdev = to_pci_dev(dev); \
44 struct device_node *dn = pci_device_to_OF_node(pdev); \ 44 struct eeh_dev *edev = pci_dev_to_eeh_dev(pdev); \
45 struct pci_dn *pdn; \
46 \ 45 \
47 if (!dn || PCI_DN(dn) == NULL) \ 46 if (!edev) \
48 return 0; \ 47 return 0; \
49 \ 48 \
50 pdn = PCI_DN(dn); \ 49 return sprintf(buf, _format "\n", edev->_memb); \
51 return sprintf(buf, _format "\n", pdn->_memb); \
52} \ 50} \
53static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL); 51static DEVICE_ATTR(_name, S_IRUGO, eeh_show_##_name, NULL);
54 52
55 53EEH_SHOW_ATTR(eeh_mode, mode, "0x%x");
56EEH_SHOW_ATTR(eeh_mode, eeh_mode, "0x%x"); 54EEH_SHOW_ATTR(eeh_config_addr, config_addr, "0x%x");
57EEH_SHOW_ATTR(eeh_config_addr, eeh_config_addr, "0x%x"); 55EEH_SHOW_ATTR(eeh_pe_config_addr, pe_config_addr, "0x%x");
58EEH_SHOW_ATTR(eeh_pe_config_addr, eeh_pe_config_addr, "0x%x"); 56EEH_SHOW_ATTR(eeh_check_count, check_count, "%d" );
59EEH_SHOW_ATTR(eeh_check_count, eeh_check_count, "%d"); 57EEH_SHOW_ATTR(eeh_freeze_count, freeze_count, "%d" );
60EEH_SHOW_ATTR(eeh_freeze_count, eeh_freeze_count, "%d"); 58EEH_SHOW_ATTR(eeh_false_positives, false_positives, "%d" );
61EEH_SHOW_ATTR(eeh_false_positives, eeh_false_positives, "%d");
62 59
63void eeh_sysfs_add_device(struct pci_dev *pdev) 60void eeh_sysfs_add_device(struct pci_dev *pdev)
64{ 61{
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c
index 1a709bc48ce1..ef9d9d84c7d5 100644
--- a/arch/powerpc/platforms/pseries/io_event_irq.c
+++ b/arch/powerpc/platforms/pseries/io_event_irq.c
@@ -63,73 +63,9 @@ EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list);
63 63
64static int ioei_check_exception_token; 64static int ioei_check_exception_token;
65 65
66/* pSeries event log format */
67
68/* Two bytes ASCII section IDs */
69#define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H')
70#define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H')
71#define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S')
72#define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H')
73#define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T')
74#define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S')
75#define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H')
76#define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W')
77#define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P')
78#define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R')
79#define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M')
80#define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P')
81#define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E')
82#define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I')
83#define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H')
84#define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D')
85
86/* Vendor specific Platform Event Log Format, Version 6, section header */
87struct pseries_elog_section {
88 uint16_t id; /* 0x00 2-byte ASCII section ID */
89 uint16_t length; /* 0x02 Section length in bytes */
90 uint8_t version; /* 0x04 Section version */
91 uint8_t subtype; /* 0x05 Section subtype */
92 uint16_t creator_component; /* 0x06 Creator component ID */
93 uint8_t data[]; /* 0x08 Start of section data */
94};
95
96static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; 66static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned;
97 67
98/** 68/**
99 * Find data portion of a specific section in RTAS extended event log.
100 * @elog: RTAS error/event log.
101 * @sect_id: secsion ID.
102 *
103 * Return:
104 * pointer to the section data of the specified section
105 * NULL if not found
106 */
107static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog,
108 uint16_t sect_id)
109{
110 struct rtas_ext_event_log_v6 *xelog =
111 (struct rtas_ext_event_log_v6 *) elog->buffer;
112 struct pseries_elog_section *sect;
113 unsigned char *p, *log_end;
114
115 /* Check that we understand the format */
116 if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) ||
117 xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG ||
118 xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM)
119 return NULL;
120
121 log_end = elog->buffer + elog->extended_log_length;
122 p = xelog->vendor_log;
123 while (p < log_end) {
124 sect = (struct pseries_elog_section *)p;
125 if (sect->id == sect_id)
126 return sect;
127 p += sect->length;
128 }
129 return NULL;
130}
131
132/**
133 * Find the data portion of an IO Event section from event log. 69 * Find the data portion of an IO Event section from event log.
134 * @elog: RTAS error/event log. 70 * @elog: RTAS error/event log.
135 * 71 *
@@ -138,7 +74,7 @@ static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *el
138 */ 74 */
139static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) 75static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
140{ 76{
141 struct pseries_elog_section *sect; 77 struct pseries_errorlog *sect;
142 78
143 /* We should only ever get called for io-event interrupts, but if 79 /* We should only ever get called for io-event interrupts, but if
144 * we do get called for another type then something went wrong so 80 * we do get called for another type then something went wrong so
@@ -152,7 +88,7 @@ static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog)
152 return NULL; 88 return NULL;
153 } 89 }
154 90
155 sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); 91 sect = get_pseries_errorlog(elog, PSERIES_ELOG_SECT_ID_IO_EVENT);
156 if (unlikely(!sect)) { 92 if (unlikely(!sect)) {
157 printk_once(KERN_WARNING "io_event_irq: RTAS extended event " 93 printk_once(KERN_WARNING "io_event_irq: RTAS extended event "
158 "log does not contain an IO Event section. " 94 "log does not contain an IO Event section. "
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index c442f2b1980f..0915b1ad66ce 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -809,8 +809,7 @@ machine_arch_initcall(pseries, find_existing_ddw_windows);
809static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, 809static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
810 struct ddw_query_response *query) 810 struct ddw_query_response *query)
811{ 811{
812 struct device_node *dn; 812 struct eeh_dev *edev;
813 struct pci_dn *pcidn;
814 u32 cfg_addr; 813 u32 cfg_addr;
815 u64 buid; 814 u64 buid;
816 int ret; 815 int ret;
@@ -821,12 +820,12 @@ static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail,
821 * Retrieve them from the pci device, not the node with the 820 * Retrieve them from the pci device, not the node with the
822 * dma-window property 821 * dma-window property
823 */ 822 */
824 dn = pci_device_to_OF_node(dev); 823 edev = pci_dev_to_eeh_dev(dev);
825 pcidn = PCI_DN(dn); 824 cfg_addr = edev->config_addr;
826 cfg_addr = pcidn->eeh_config_addr; 825 if (edev->pe_config_addr)
827 if (pcidn->eeh_pe_config_addr) 826 cfg_addr = edev->pe_config_addr;
828 cfg_addr = pcidn->eeh_pe_config_addr; 827 buid = edev->phb->buid;
829 buid = pcidn->phb->buid; 828
830 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, 829 ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query,
831 cfg_addr, BUID_HI(buid), BUID_LO(buid)); 830 cfg_addr, BUID_HI(buid), BUID_LO(buid));
832 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" 831 dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x"
@@ -839,8 +838,7 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
839 struct ddw_create_response *create, int page_shift, 838 struct ddw_create_response *create, int page_shift,
840 int window_shift) 839 int window_shift)
841{ 840{
842 struct device_node *dn; 841 struct eeh_dev *edev;
843 struct pci_dn *pcidn;
844 u32 cfg_addr; 842 u32 cfg_addr;
845 u64 buid; 843 u64 buid;
846 int ret; 844 int ret;
@@ -851,12 +849,11 @@ static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail,
851 * Retrieve them from the pci device, not the node with the 849 * Retrieve them from the pci device, not the node with the
852 * dma-window property 850 * dma-window property
853 */ 851 */
854 dn = pci_device_to_OF_node(dev); 852 edev = pci_dev_to_eeh_dev(dev);
855 pcidn = PCI_DN(dn); 853 cfg_addr = edev->config_addr;
856 cfg_addr = pcidn->eeh_config_addr; 854 if (edev->pe_config_addr)
857 if (pcidn->eeh_pe_config_addr) 855 cfg_addr = edev->pe_config_addr;
858 cfg_addr = pcidn->eeh_pe_config_addr; 856 buid = edev->phb->buid;
859 buid = pcidn->phb->buid;
860 857
861 do { 858 do {
862 /* extra outputs are LIOBN and dma-addr (hi, lo) */ 859 /* extra outputs are LIOBN and dma-addr (hi, lo) */
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 7bc73af6c7b9..5f3ef876ded2 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -41,6 +41,7 @@
41#include <asm/udbg.h> 41#include <asm/udbg.h>
42#include <asm/smp.h> 42#include <asm/smp.h>
43#include <asm/trace.h> 43#include <asm/trace.h>
44#include <asm/firmware.h>
44 45
45#include "plpar_wrappers.h" 46#include "plpar_wrappers.h"
46#include "pseries.h" 47#include "pseries.h"
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 38d24e7e7bb1..109fdb75578d 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -217,7 +217,7 @@ static struct device_node *find_pe_dn(struct pci_dev *dev, int *total)
217 if (!dn) 217 if (!dn)
218 return NULL; 218 return NULL;
219 219
220 dn = find_device_pe(dn); 220 dn = eeh_find_device_pe(dn);
221 if (!dn) 221 if (!dn)
222 return NULL; 222 return NULL;
223 223
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c
index 55d4ec1bd1ac..8b7bafa489c2 100644
--- a/arch/powerpc/platforms/pseries/pci_dlpar.c
+++ b/arch/powerpc/platforms/pseries/pci_dlpar.c
@@ -84,7 +84,7 @@ void pcibios_remove_pci_devices(struct pci_bus *bus)
84 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { 84 list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) {
85 pr_debug(" * Removing %s...\n", pci_name(dev)); 85 pr_debug(" * Removing %s...\n", pci_name(dev));
86 eeh_remove_bus_device(dev); 86 eeh_remove_bus_device(dev);
87 pci_remove_bus_device(dev); 87 pci_stop_and_remove_bus_device(dev);
88 } 88 }
89} 89}
90EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); 90EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices);
@@ -147,6 +147,9 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn)
147 147
148 pci_devs_phb_init_dynamic(phb); 148 pci_devs_phb_init_dynamic(phb);
149 149
150 /* Create EEH devices for the PHB */
151 eeh_dev_phb_init_dynamic(phb);
152
150 if (dn->child) 153 if (dn->child)
151 eeh_add_device_tree_early(dn); 154 eeh_add_device_tree_early(dn);
152 155
diff --git a/arch/powerpc/platforms/pseries/phyp_dump.c b/arch/powerpc/platforms/pseries/phyp_dump.c
deleted file mode 100644
index 6e7742da0072..000000000000
--- a/arch/powerpc/platforms/pseries/phyp_dump.c
+++ /dev/null
@@ -1,513 +0,0 @@
1/*
2 * Hypervisor-assisted dump
3 *
4 * Linas Vepstas, Manish Ahuja 2008
5 * Copyright 2008 IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 *
12 */
13
14#include <linux/gfp.h>
15#include <linux/init.h>
16#include <linux/kobject.h>
17#include <linux/mm.h>
18#include <linux/of.h>
19#include <linux/pfn.h>
20#include <linux/swap.h>
21#include <linux/sysfs.h>
22
23#include <asm/page.h>
24#include <asm/phyp_dump.h>
25#include <asm/machdep.h>
26#include <asm/prom.h>
27#include <asm/rtas.h>
28
29/* Variables, used to communicate data between early boot and late boot */
30static struct phyp_dump phyp_dump_vars;
31struct phyp_dump *phyp_dump_info = &phyp_dump_vars;
32
33static int ibm_configure_kernel_dump;
34/* ------------------------------------------------- */
35/* RTAS interfaces to declare the dump regions */
36
37struct dump_section {
38 u32 dump_flags;
39 u16 source_type;
40 u16 error_flags;
41 u64 source_address;
42 u64 source_length;
43 u64 length_copied;
44 u64 destination_address;
45};
46
47struct phyp_dump_header {
48 u32 version;
49 u16 num_of_sections;
50 u16 status;
51
52 u32 first_offset_section;
53 u32 dump_disk_section;
54 u64 block_num_dd;
55 u64 num_of_blocks_dd;
56 u32 offset_dd;
57 u32 maxtime_to_auto;
58 /* No dump disk path string used */
59
60 struct dump_section cpu_data;
61 struct dump_section hpte_data;
62 struct dump_section kernel_data;
63};
64
65/* The dump header *must be* in low memory, so .bss it */
66static struct phyp_dump_header phdr;
67
68#define NUM_DUMP_SECTIONS 3
69#define DUMP_HEADER_VERSION 0x1
70#define DUMP_REQUEST_FLAG 0x1
71#define DUMP_SOURCE_CPU 0x0001
72#define DUMP_SOURCE_HPTE 0x0002
73#define DUMP_SOURCE_RMO 0x0011
74#define DUMP_ERROR_FLAG 0x2000
75#define DUMP_TRIGGERED 0x4000
76#define DUMP_PERFORMED 0x8000
77
78
79/**
80 * init_dump_header() - initialize the header declaring a dump
81 * Returns: length of dump save area.
82 *
83 * When the hypervisor saves crashed state, it needs to put
84 * it somewhere. The dump header tells the hypervisor where
85 * the data can be saved.
86 */
87static unsigned long init_dump_header(struct phyp_dump_header *ph)
88{
89 unsigned long addr_offset = 0;
90
91 /* Set up the dump header */
92 ph->version = DUMP_HEADER_VERSION;
93 ph->num_of_sections = NUM_DUMP_SECTIONS;
94 ph->status = 0;
95
96 ph->first_offset_section =
97 (u32)offsetof(struct phyp_dump_header, cpu_data);
98 ph->dump_disk_section = 0;
99 ph->block_num_dd = 0;
100 ph->num_of_blocks_dd = 0;
101 ph->offset_dd = 0;
102
103 ph->maxtime_to_auto = 0; /* disabled */
104
105 /* The first two sections are mandatory */
106 ph->cpu_data.dump_flags = DUMP_REQUEST_FLAG;
107 ph->cpu_data.source_type = DUMP_SOURCE_CPU;
108 ph->cpu_data.source_address = 0;
109 ph->cpu_data.source_length = phyp_dump_info->cpu_state_size;
110 ph->cpu_data.destination_address = addr_offset;
111 addr_offset += phyp_dump_info->cpu_state_size;
112
113 ph->hpte_data.dump_flags = DUMP_REQUEST_FLAG;
114 ph->hpte_data.source_type = DUMP_SOURCE_HPTE;
115 ph->hpte_data.source_address = 0;
116 ph->hpte_data.source_length = phyp_dump_info->hpte_region_size;
117 ph->hpte_data.destination_address = addr_offset;
118 addr_offset += phyp_dump_info->hpte_region_size;
119
120 /* This section describes the low kernel region */
121 ph->kernel_data.dump_flags = DUMP_REQUEST_FLAG;
122 ph->kernel_data.source_type = DUMP_SOURCE_RMO;
123 ph->kernel_data.source_address = PHYP_DUMP_RMR_START;
124 ph->kernel_data.source_length = PHYP_DUMP_RMR_END;
125 ph->kernel_data.destination_address = addr_offset;
126 addr_offset += ph->kernel_data.source_length;
127
128 return addr_offset;
129}
130
131static void print_dump_header(const struct phyp_dump_header *ph)
132{
133#ifdef DEBUG
134 if (ph == NULL)
135 return;
136
137 printk(KERN_INFO "dump header:\n");
138 /* setup some ph->sections required */
139 printk(KERN_INFO "version = %d\n", ph->version);
140 printk(KERN_INFO "Sections = %d\n", ph->num_of_sections);
141 printk(KERN_INFO "Status = 0x%x\n", ph->status);
142
143 /* No ph->disk, so all should be set to 0 */
144 printk(KERN_INFO "Offset to first section 0x%x\n",
145 ph->first_offset_section);
146 printk(KERN_INFO "dump disk sections should be zero\n");
147 printk(KERN_INFO "dump disk section = %d\n", ph->dump_disk_section);
148 printk(KERN_INFO "block num = %lld\n", ph->block_num_dd);
149 printk(KERN_INFO "number of blocks = %lld\n", ph->num_of_blocks_dd);
150 printk(KERN_INFO "dump disk offset = %d\n", ph->offset_dd);
151 printk(KERN_INFO "Max auto time= %d\n", ph->maxtime_to_auto);
152
153 /*set cpu state and hpte states as well scratch pad area */
154 printk(KERN_INFO " CPU AREA\n");
155 printk(KERN_INFO "cpu dump_flags =%d\n", ph->cpu_data.dump_flags);
156 printk(KERN_INFO "cpu source_type =%d\n", ph->cpu_data.source_type);
157 printk(KERN_INFO "cpu error_flags =%d\n", ph->cpu_data.error_flags);
158 printk(KERN_INFO "cpu source_address =%llx\n",
159 ph->cpu_data.source_address);
160 printk(KERN_INFO "cpu source_length =%llx\n",
161 ph->cpu_data.source_length);
162 printk(KERN_INFO "cpu length_copied =%llx\n",
163 ph->cpu_data.length_copied);
164
165 printk(KERN_INFO " HPTE AREA\n");
166 printk(KERN_INFO "HPTE dump_flags =%d\n", ph->hpte_data.dump_flags);
167 printk(KERN_INFO "HPTE source_type =%d\n", ph->hpte_data.source_type);
168 printk(KERN_INFO "HPTE error_flags =%d\n", ph->hpte_data.error_flags);
169 printk(KERN_INFO "HPTE source_address =%llx\n",
170 ph->hpte_data.source_address);
171 printk(KERN_INFO "HPTE source_length =%llx\n",
172 ph->hpte_data.source_length);
173 printk(KERN_INFO "HPTE length_copied =%llx\n",
174 ph->hpte_data.length_copied);
175
176 printk(KERN_INFO " SRSD AREA\n");
177 printk(KERN_INFO "SRSD dump_flags =%d\n", ph->kernel_data.dump_flags);
178 printk(KERN_INFO "SRSD source_type =%d\n", ph->kernel_data.source_type);
179 printk(KERN_INFO "SRSD error_flags =%d\n", ph->kernel_data.error_flags);
180 printk(KERN_INFO "SRSD source_address =%llx\n",
181 ph->kernel_data.source_address);
182 printk(KERN_INFO "SRSD source_length =%llx\n",
183 ph->kernel_data.source_length);
184 printk(KERN_INFO "SRSD length_copied =%llx\n",
185 ph->kernel_data.length_copied);
186#endif
187}
188
189static ssize_t show_phyp_dump_active(struct kobject *kobj,
190 struct kobj_attribute *attr, char *buf)
191{
192
193 /* create filesystem entry so kdump is phyp-dump aware */
194 return sprintf(buf, "%lx\n", phyp_dump_info->phyp_dump_at_boot);
195}
196
197static struct kobj_attribute pdl = __ATTR(phyp_dump_active, 0600,
198 show_phyp_dump_active,
199 NULL);
200
201static void register_dump_area(struct phyp_dump_header *ph, unsigned long addr)
202{
203 int rc;
204
205 /* Add addr value if not initialized before */
206 if (ph->cpu_data.destination_address == 0) {
207 ph->cpu_data.destination_address += addr;
208 ph->hpte_data.destination_address += addr;
209 ph->kernel_data.destination_address += addr;
210 }
211
212 /* ToDo Invalidate kdump and free memory range. */
213
214 do {
215 rc = rtas_call(ibm_configure_kernel_dump, 3, 1, NULL,
216 1, ph, sizeof(struct phyp_dump_header));
217 } while (rtas_busy_delay(rc));
218
219 if (rc) {
220 printk(KERN_ERR "phyp-dump: unexpected error (%d) on "
221 "register\n", rc);
222 print_dump_header(ph);
223 return;
224 }
225
226 rc = sysfs_create_file(kernel_kobj, &pdl.attr);
227 if (rc)
228 printk(KERN_ERR "phyp-dump: unable to create sysfs"
229 " file (%d)\n", rc);
230}
231
232static
233void invalidate_last_dump(struct phyp_dump_header *ph, unsigned long addr)
234{
235 int rc;
236
237 /* Add addr value if not initialized before */
238 if (ph->cpu_data.destination_address == 0) {
239 ph->cpu_data.destination_address += addr;
240 ph->hpte_data.destination_address += addr;
241 ph->kernel_data.destination_address += addr;
242 }
243
244 do {
245 rc = rtas_call(ibm_configure_kernel_dump, 3, 1, NULL,
246 2, ph, sizeof(struct phyp_dump_header));
247 } while (rtas_busy_delay(rc));
248
249 if (rc) {
250 printk(KERN_ERR "phyp-dump: unexpected error (%d) "
251 "on invalidate\n", rc);
252 print_dump_header(ph);
253 }
254}
255
256/* ------------------------------------------------- */
257/**
258 * release_memory_range -- release memory previously memblock_reserved
259 * @start_pfn: starting physical frame number
260 * @nr_pages: number of pages to free.
261 *
262 * This routine will release memory that had been previously
263 * memblock_reserved in early boot. The released memory becomes
264 * available for genreal use.
265 */
266static void release_memory_range(unsigned long start_pfn,
267 unsigned long nr_pages)
268{
269 struct page *rpage;
270 unsigned long end_pfn;
271 long i;
272
273 end_pfn = start_pfn + nr_pages;
274
275 for (i = start_pfn; i <= end_pfn; i++) {
276 rpage = pfn_to_page(i);
277 if (PageReserved(rpage)) {
278 ClearPageReserved(rpage);
279 init_page_count(rpage);
280 __free_page(rpage);
281 totalram_pages++;
282 }
283 }
284}
285
286/**
287 * track_freed_range -- Counts the range being freed.
288 * Once the counter goes to zero, it re-registers dump for
289 * future use.
290 */
291static void
292track_freed_range(unsigned long addr, unsigned long length)
293{
294 static unsigned long scratch_area_size, reserved_area_size;
295
296 if (addr < phyp_dump_info->init_reserve_start)
297 return;
298
299 if ((addr >= phyp_dump_info->init_reserve_start) &&
300 (addr <= phyp_dump_info->init_reserve_start +
301 phyp_dump_info->init_reserve_size))
302 reserved_area_size += length;
303
304 if ((addr >= phyp_dump_info->reserved_scratch_addr) &&
305 (addr <= phyp_dump_info->reserved_scratch_addr +
306 phyp_dump_info->reserved_scratch_size))
307 scratch_area_size += length;
308
309 if ((reserved_area_size == phyp_dump_info->init_reserve_size) &&
310 (scratch_area_size == phyp_dump_info->reserved_scratch_size)) {
311
312 invalidate_last_dump(&phdr,
313 phyp_dump_info->reserved_scratch_addr);
314 register_dump_area(&phdr,
315 phyp_dump_info->reserved_scratch_addr);
316 }
317}
318
319/* ------------------------------------------------- */
320/**
321 * sysfs_release_region -- sysfs interface to release memory range.
322 *
323 * Usage:
324 * "echo <start addr> <length> > /sys/kernel/release_region"
325 *
326 * Example:
327 * "echo 0x40000000 0x10000000 > /sys/kernel/release_region"
328 *
329 * will release 256MB starting at 1GB.
330 */
331static ssize_t store_release_region(struct kobject *kobj,
332 struct kobj_attribute *attr,
333 const char *buf, size_t count)
334{
335 unsigned long start_addr, length, end_addr;
336 unsigned long start_pfn, nr_pages;
337 ssize_t ret;
338
339 ret = sscanf(buf, "%lx %lx", &start_addr, &length);
340 if (ret != 2)
341 return -EINVAL;
342
343 track_freed_range(start_addr, length);
344
345 /* Range-check - don't free any reserved memory that
346 * wasn't reserved for phyp-dump */
347 if (start_addr < phyp_dump_info->init_reserve_start)
348 start_addr = phyp_dump_info->init_reserve_start;
349
350 end_addr = phyp_dump_info->init_reserve_start +
351 phyp_dump_info->init_reserve_size;
352 if (start_addr+length > end_addr)
353 length = end_addr - start_addr;
354
355 /* Release the region of memory assed in by user */
356 start_pfn = PFN_DOWN(start_addr);
357 nr_pages = PFN_DOWN(length);
358 release_memory_range(start_pfn, nr_pages);
359
360 return count;
361}
362
363static ssize_t show_release_region(struct kobject *kobj,
364 struct kobj_attribute *attr, char *buf)
365{
366 u64 second_addr_range;
367
368 /* total reserved size - start of scratch area */
369 second_addr_range = phyp_dump_info->init_reserve_size -
370 phyp_dump_info->reserved_scratch_size;
371 return sprintf(buf, "CPU:0x%llx-0x%llx: HPTE:0x%llx-0x%llx:"
372 " DUMP:0x%llx-0x%llx, 0x%lx-0x%llx:\n",
373 phdr.cpu_data.destination_address,
374 phdr.cpu_data.length_copied,
375 phdr.hpte_data.destination_address,
376 phdr.hpte_data.length_copied,
377 phdr.kernel_data.destination_address,
378 phdr.kernel_data.length_copied,
379 phyp_dump_info->init_reserve_start,
380 second_addr_range);
381}
382
383static struct kobj_attribute rr = __ATTR(release_region, 0600,
384 show_release_region,
385 store_release_region);
386
387static int __init phyp_dump_setup(void)
388{
389 struct device_node *rtas;
390 const struct phyp_dump_header *dump_header = NULL;
391 unsigned long dump_area_start;
392 unsigned long dump_area_length;
393 int header_len = 0;
394 int rc;
395
396 /* If no memory was reserved in early boot, there is nothing to do */
397 if (phyp_dump_info->init_reserve_size == 0)
398 return 0;
399
400 /* Return if phyp dump not supported */
401 if (!phyp_dump_info->phyp_dump_configured)
402 return -ENOSYS;
403
404 /* Is there dump data waiting for us? If there isn't,
405 * then register a new dump area, and release all of
406 * the rest of the reserved ram.
407 *
408 * The /rtas/ibm,kernel-dump rtas node is present only
409 * if there is dump data waiting for us.
410 */
411 rtas = of_find_node_by_path("/rtas");
412 if (rtas) {
413 dump_header = of_get_property(rtas, "ibm,kernel-dump",
414 &header_len);
415 of_node_put(rtas);
416 }
417
418 ibm_configure_kernel_dump = rtas_token("ibm,configure-kernel-dump");
419
420 print_dump_header(dump_header);
421 dump_area_length = init_dump_header(&phdr);
422 /* align down */
423 dump_area_start = phyp_dump_info->init_reserve_start & PAGE_MASK;
424
425 if (dump_header == NULL) {
426 register_dump_area(&phdr, dump_area_start);
427 return 0;
428 }
429
430 /* re-register the dump area, if old dump was invalid */
431 if ((dump_header) && (dump_header->status & DUMP_ERROR_FLAG)) {
432 invalidate_last_dump(&phdr, dump_area_start);
433 register_dump_area(&phdr, dump_area_start);
434 return 0;
435 }
436
437 if (dump_header) {
438 phyp_dump_info->reserved_scratch_addr =
439 dump_header->cpu_data.destination_address;
440 phyp_dump_info->reserved_scratch_size =
441 dump_header->cpu_data.source_length +
442 dump_header->hpte_data.source_length +
443 dump_header->kernel_data.source_length;
444 }
445
446 /* Should we create a dump_subsys, analogous to s390/ipl.c ? */
447 rc = sysfs_create_file(kernel_kobj, &rr.attr);
448 if (rc)
449 printk(KERN_ERR "phyp-dump: unable to create sysfs file (%d)\n",
450 rc);
451
452 /* ToDo: re-register the dump area, for next time. */
453 return 0;
454}
455machine_subsys_initcall(pseries, phyp_dump_setup);
456
457int __init early_init_dt_scan_phyp_dump(unsigned long node,
458 const char *uname, int depth, void *data)
459{
460 const unsigned int *sizes;
461
462 phyp_dump_info->phyp_dump_configured = 0;
463 phyp_dump_info->phyp_dump_is_active = 0;
464
465 if (depth != 1 || strcmp(uname, "rtas") != 0)
466 return 0;
467
468 if (of_get_flat_dt_prop(node, "ibm,configure-kernel-dump", NULL))
469 phyp_dump_info->phyp_dump_configured++;
470
471 if (of_get_flat_dt_prop(node, "ibm,dump-kernel", NULL))
472 phyp_dump_info->phyp_dump_is_active++;
473
474 sizes = of_get_flat_dt_prop(node, "ibm,configure-kernel-dump-sizes",
475 NULL);
476 if (!sizes)
477 return 0;
478
479 if (sizes[0] == 1)
480 phyp_dump_info->cpu_state_size = *((unsigned long *)&sizes[1]);
481
482 if (sizes[3] == 2)
483 phyp_dump_info->hpte_region_size =
484 *((unsigned long *)&sizes[4]);
485 return 1;
486}
487
488/* Look for phyp_dump= cmdline option */
489static int __init early_phyp_dump_enabled(char *p)
490{
491 phyp_dump_info->phyp_dump_at_boot = 1;
492
493 if (!p)
494 return 0;
495
496 if (strncmp(p, "1", 1) == 0)
497 phyp_dump_info->phyp_dump_at_boot = 1;
498 else if (strncmp(p, "0", 1) == 0)
499 phyp_dump_info->phyp_dump_at_boot = 0;
500
501 return 0;
502}
503early_param("phyp_dump", early_phyp_dump_enabled);
504
505/* Look for phyp_dump_reserve_size= cmdline option */
506static int __init early_phyp_dump_reserve_size(char *p)
507{
508 if (p)
509 phyp_dump_info->reserve_bootvar = memparse(p, &p);
510
511 return 0;
512}
513early_param("phyp_dump_reserve_size", early_phyp_dump_reserve_size);
diff --git a/arch/powerpc/platforms/pseries/processor_idle.c b/arch/powerpc/platforms/pseries/processor_idle.c
index 085fd3f45ad2..a12e95af6933 100644
--- a/arch/powerpc/platforms/pseries/processor_idle.c
+++ b/arch/powerpc/platforms/pseries/processor_idle.c
@@ -96,6 +96,20 @@ out:
96 return index; 96 return index;
97} 97}
98 98
99static void check_and_cede_processor(void)
100{
101 /*
102 * Interrupts are soft-disabled at this point,
103 * but not hard disabled. So an interrupt might have
104 * occurred before entering NAP, and would be potentially
105 * lost (edge events, decrementer events, etc...) unless
106 * we first hard disable then check.
107 */
108 hard_irq_disable();
109 if (get_paca()->irq_happened == 0)
110 cede_processor();
111}
112
99static int dedicated_cede_loop(struct cpuidle_device *dev, 113static int dedicated_cede_loop(struct cpuidle_device *dev,
100 struct cpuidle_driver *drv, 114 struct cpuidle_driver *drv,
101 int index) 115 int index)
@@ -108,7 +122,7 @@ static int dedicated_cede_loop(struct cpuidle_device *dev,
108 122
109 ppc64_runlatch_off(); 123 ppc64_runlatch_off();
110 HMT_medium(); 124 HMT_medium();
111 cede_processor(); 125 check_and_cede_processor();
112 126
113 get_lppaca()->donate_dedicated_cpu = 0; 127 get_lppaca()->donate_dedicated_cpu = 0;
114 dev->last_residency = 128 dev->last_residency =
@@ -132,7 +146,7 @@ static int shared_cede_loop(struct cpuidle_device *dev,
132 * processor. When returning here, external interrupts 146 * processor. When returning here, external interrupts
133 * are enabled. 147 * are enabled.
134 */ 148 */
135 cede_processor(); 149 check_and_cede_processor();
136 150
137 dev->last_residency = 151 dev->last_residency =
138 (int)idle_loop_epilog(in_purr, kt_before); 152 (int)idle_loop_epilog(in_purr, kt_before);
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 086d2ae4e06a..c4dfccd3a3d9 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -16,37 +16,15 @@
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */ 17 */
18 18
19/* Change Activity:
20 * 2001/09/21 : engebret : Created with minimal EPOW and HW exception support.
21 * End Change Activity
22 */
23
24#include <linux/errno.h>
25#include <linux/threads.h>
26#include <linux/kernel_stat.h>
27#include <linux/signal.h>
28#include <linux/sched.h> 19#include <linux/sched.h>
29#include <linux/ioport.h>
30#include <linux/interrupt.h> 20#include <linux/interrupt.h>
31#include <linux/timex.h>
32#include <linux/init.h>
33#include <linux/delay.h>
34#include <linux/irq.h> 21#include <linux/irq.h>
35#include <linux/random.h> 22#include <linux/of.h>
36#include <linux/sysrq.h> 23#include <linux/fs.h>
37#include <linux/bitops.h> 24#include <linux/reboot.h>
38 25
39#include <asm/uaccess.h>
40#include <asm/system.h>
41#include <asm/io.h>
42#include <asm/pgtable.h>
43#include <asm/irq.h>
44#include <asm/cache.h>
45#include <asm/prom.h>
46#include <asm/ptrace.h>
47#include <asm/machdep.h> 26#include <asm/machdep.h>
48#include <asm/rtas.h> 27#include <asm/rtas.h>
49#include <asm/udbg.h>
50#include <asm/firmware.h> 28#include <asm/firmware.h>
51 29
52#include "pseries.h" 30#include "pseries.h"
@@ -57,7 +35,6 @@ static DEFINE_SPINLOCK(ras_log_buf_lock);
57static char global_mce_data_buf[RTAS_ERROR_LOG_MAX]; 35static char global_mce_data_buf[RTAS_ERROR_LOG_MAX];
58static DEFINE_PER_CPU(__u64, mce_data_buf); 36static DEFINE_PER_CPU(__u64, mce_data_buf);
59 37
60static int ras_get_sensor_state_token;
61static int ras_check_exception_token; 38static int ras_check_exception_token;
62 39
63#define EPOW_SENSOR_TOKEN 9 40#define EPOW_SENSOR_TOKEN 9
@@ -75,7 +52,6 @@ static int __init init_ras_IRQ(void)
75{ 52{
76 struct device_node *np; 53 struct device_node *np;
77 54
78 ras_get_sensor_state_token = rtas_token("get-sensor-state");
79 ras_check_exception_token = rtas_token("check-exception"); 55 ras_check_exception_token = rtas_token("check-exception");
80 56
81 /* Internal Errors */ 57 /* Internal Errors */
@@ -95,26 +71,126 @@ static int __init init_ras_IRQ(void)
95 71
96 return 0; 72 return 0;
97} 73}
98__initcall(init_ras_IRQ); 74subsys_initcall(init_ras_IRQ);
99 75
100/* 76#define EPOW_SHUTDOWN_NORMAL 1
101 * Handle power subsystem events (EPOW). 77#define EPOW_SHUTDOWN_ON_UPS 2
102 * 78#define EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS 3
103 * Presently we just log the event has occurred. This should be fixed 79#define EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH 4
104 * to examine the type of power failure and take appropriate action where 80
105 * the time horizon permits something useful to be done. 81static void handle_system_shutdown(char event_modifier)
106 */ 82{
83 switch (event_modifier) {
84 case EPOW_SHUTDOWN_NORMAL:
85 pr_emerg("Firmware initiated power off");
86 orderly_poweroff(1);
87 break;
88
89 case EPOW_SHUTDOWN_ON_UPS:
90 pr_emerg("Loss of power reported by firmware, system is "
91 "running on UPS/battery");
92 break;
93
94 case EPOW_SHUTDOWN_LOSS_OF_CRITICAL_FUNCTIONS:
95 pr_emerg("Loss of system critical functions reported by "
96 "firmware");
97 pr_emerg("Check RTAS error log for details");
98 orderly_poweroff(1);
99 break;
100
101 case EPOW_SHUTDOWN_AMBIENT_TEMPERATURE_TOO_HIGH:
102 pr_emerg("Ambient temperature too high reported by firmware");
103 pr_emerg("Check RTAS error log for details");
104 orderly_poweroff(1);
105 break;
106
107 default:
108 pr_err("Unknown power/cooling shutdown event (modifier %d)",
109 event_modifier);
110 }
111}
112
113struct epow_errorlog {
114 unsigned char sensor_value;
115 unsigned char event_modifier;
116 unsigned char extended_modifier;
117 unsigned char reserved;
118 unsigned char platform_reason;
119};
120
121#define EPOW_RESET 0
122#define EPOW_WARN_COOLING 1
123#define EPOW_WARN_POWER 2
124#define EPOW_SYSTEM_SHUTDOWN 3
125#define EPOW_SYSTEM_HALT 4
126#define EPOW_MAIN_ENCLOSURE 5
127#define EPOW_POWER_OFF 7
128
129void rtas_parse_epow_errlog(struct rtas_error_log *log)
130{
131 struct pseries_errorlog *pseries_log;
132 struct epow_errorlog *epow_log;
133 char action_code;
134 char modifier;
135
136 pseries_log = get_pseries_errorlog(log, PSERIES_ELOG_SECT_ID_EPOW);
137 if (pseries_log == NULL)
138 return;
139
140 epow_log = (struct epow_errorlog *)pseries_log->data;
141 action_code = epow_log->sensor_value & 0xF; /* bottom 4 bits */
142 modifier = epow_log->event_modifier & 0xF; /* bottom 4 bits */
143
144 switch (action_code) {
145 case EPOW_RESET:
146 pr_err("Non critical power or cooling issue cleared");
147 break;
148
149 case EPOW_WARN_COOLING:
150 pr_err("Non critical cooling issue reported by firmware");
151 pr_err("Check RTAS error log for details");
152 break;
153
154 case EPOW_WARN_POWER:
155 pr_err("Non critical power issue reported by firmware");
156 pr_err("Check RTAS error log for details");
157 break;
158
159 case EPOW_SYSTEM_SHUTDOWN:
160 handle_system_shutdown(epow_log->event_modifier);
161 break;
162
163 case EPOW_SYSTEM_HALT:
164 pr_emerg("Firmware initiated power off");
165 orderly_poweroff(1);
166 break;
167
168 case EPOW_MAIN_ENCLOSURE:
169 case EPOW_POWER_OFF:
170 pr_emerg("Critical power/cooling issue reported by firmware");
171 pr_emerg("Check RTAS error log for details");
172 pr_emerg("Immediate power off");
173 emergency_sync();
174 kernel_power_off();
175 break;
176
177 default:
178 pr_err("Unknown power/cooling event (action code %d)",
179 action_code);
180 }
181}
182
183/* Handle environmental and power warning (EPOW) interrupts. */
107static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) 184static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
108{ 185{
109 int status = 0xdeadbeef; 186 int status;
110 int state = 0; 187 int state;
111 int critical; 188 int critical;
112 189
113 status = rtas_call(ras_get_sensor_state_token, 2, 2, &state, 190 status = rtas_get_sensor(EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX, &state);
114 EPOW_SENSOR_TOKEN, EPOW_SENSOR_INDEX);
115 191
116 if (state > 3) 192 if (state > 3)
117 critical = 1; /* Time Critical */ 193 critical = 1; /* Time Critical */
118 else 194 else
119 critical = 0; 195 critical = 0;
120 196
@@ -123,18 +199,14 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
123 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 199 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
124 RTAS_VECTOR_EXTERNAL_INTERRUPT, 200 RTAS_VECTOR_EXTERNAL_INTERRUPT,
125 virq_to_hw(irq), 201 virq_to_hw(irq),
126 RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, 202 RTAS_EPOW_WARNING,
127 critical, __pa(&ras_log_buf), 203 critical, __pa(&ras_log_buf),
128 rtas_get_error_log_max()); 204 rtas_get_error_log_max());
129 205
130 udbg_printf("EPOW <0x%lx 0x%x 0x%x>\n",
131 *((unsigned long *)&ras_log_buf), status, state);
132 printk(KERN_WARNING "EPOW <0x%lx 0x%x 0x%x>\n",
133 *((unsigned long *)&ras_log_buf), status, state);
134
135 /* format and print the extended information */
136 log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0); 206 log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, 0);
137 207
208 rtas_parse_epow_errlog((struct rtas_error_log *)ras_log_buf);
209
138 spin_unlock(&ras_log_buf_lock); 210 spin_unlock(&ras_log_buf_lock);
139 return IRQ_HANDLED; 211 return IRQ_HANDLED;
140} 212}
@@ -150,7 +222,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id)
150static irqreturn_t ras_error_interrupt(int irq, void *dev_id) 222static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
151{ 223{
152 struct rtas_error_log *rtas_elog; 224 struct rtas_error_log *rtas_elog;
153 int status = 0xdeadbeef; 225 int status;
154 int fatal; 226 int fatal;
155 227
156 spin_lock(&ras_log_buf_lock); 228 spin_lock(&ras_log_buf_lock);
@@ -158,7 +230,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
158 status = rtas_call(ras_check_exception_token, 6, 1, NULL, 230 status = rtas_call(ras_check_exception_token, 6, 1, NULL,
159 RTAS_VECTOR_EXTERNAL_INTERRUPT, 231 RTAS_VECTOR_EXTERNAL_INTERRUPT,
160 virq_to_hw(irq), 232 virq_to_hw(irq),
161 RTAS_INTERNAL_ERROR, 1 /*Time Critical */, 233 RTAS_INTERNAL_ERROR, 1 /* Time Critical */,
162 __pa(&ras_log_buf), 234 __pa(&ras_log_buf),
163 rtas_get_error_log_max()); 235 rtas_get_error_log_max());
164 236
@@ -173,24 +245,13 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id)
173 log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal); 245 log_error(ras_log_buf, ERR_TYPE_RTAS_LOG, fatal);
174 246
175 if (fatal) { 247 if (fatal) {
176 udbg_printf("Fatal HW Error <0x%lx 0x%x>\n", 248 pr_emerg("Fatal hardware error reported by firmware");
177 *((unsigned long *)&ras_log_buf), status); 249 pr_emerg("Check RTAS error log for details");
178 printk(KERN_EMERG "Error: Fatal hardware error <0x%lx 0x%x>\n", 250 pr_emerg("Immediate power off");
179 *((unsigned long *)&ras_log_buf), status); 251 emergency_sync();
180 252 kernel_power_off();
181#ifndef DEBUG_RTAS_POWER_OFF
182 /* Don't actually power off when debugging so we can test
183 * without actually failing while injecting errors.
184 * Error data will not be logged to syslog.
185 */
186 ppc_md.power_off();
187#endif
188 } else { 253 } else {
189 udbg_printf("Recoverable HW Error <0x%lx 0x%x>\n", 254 pr_err("Recoverable hardware error reported by firmware");
190 *((unsigned long *)&ras_log_buf), status);
191 printk(KERN_WARNING
192 "Warning: Recoverable hardware error <0x%lx 0x%x>\n",
193 *((unsigned long *)&ras_log_buf), status);
194 } 255 }
195 256
196 spin_unlock(&ras_log_buf_lock); 257 spin_unlock(&ras_log_buf_lock);
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index f79f1278dfca..51ecac920dd8 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -190,9 +190,8 @@ static void __init pseries_mpic_init_IRQ(void)
190 BUG_ON(openpic_addr == 0); 190 BUG_ON(openpic_addr == 0);
191 191
192 /* Setup the openpic driver */ 192 /* Setup the openpic driver */
193 mpic = mpic_alloc(pSeries_mpic_node, openpic_addr, 0, 193 mpic = mpic_alloc(pSeries_mpic_node, openpic_addr,
194 16, 250, /* isu size, irq count */ 194 MPIC_NO_RESET, 16, 0, " MPIC ");
195 " MPIC ");
196 BUG_ON(mpic == NULL); 195 BUG_ON(mpic == NULL);
197 196
198 /* Add ISUs */ 197 /* Add ISUs */
@@ -261,8 +260,12 @@ static int pci_dn_reconfig_notifier(struct notifier_block *nb, unsigned long act
261 switch (action) { 260 switch (action) {
262 case PSERIES_RECONFIG_ADD: 261 case PSERIES_RECONFIG_ADD:
263 pci = np->parent->data; 262 pci = np->parent->data;
264 if (pci) 263 if (pci) {
265 update_dn_pci_info(np, pci->phb); 264 update_dn_pci_info(np, pci->phb);
265
266 /* Create EEH device for the OF node */
267 eeh_dev_init(np, pci->phb);
268 }
266 break; 269 break;
267 default: 270 default:
268 err = NOTIFY_DONE; 271 err = NOTIFY_DONE;
@@ -380,8 +383,12 @@ static void __init pSeries_setup_arch(void)
380 383
381 fwnmi_init(); 384 fwnmi_init();
382 385
386 /* By default, only probe PCI (can be overriden by rtas_pci) */
387 pci_add_flags(PCI_PROBE_ONLY);
388
383 /* Find and initialize PCI host bridges */ 389 /* Find and initialize PCI host bridges */
384 init_pci_config_tokens(); 390 init_pci_config_tokens();
391 eeh_pseries_init();
385 find_and_init_phbs(); 392 find_and_init_phbs();
386 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb); 393 pSeries_reconfig_notifier_register(&pci_dn_reconfig_nb);
387 eeh_init(); 394 eeh_init();
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c
index b84a8b2238dd..47226e04126d 100644
--- a/arch/powerpc/platforms/pseries/suspend.c
+++ b/arch/powerpc/platforms/pseries/suspend.c
@@ -24,6 +24,7 @@
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/mmu.h> 25#include <asm/mmu.h>
26#include <asm/rtas.h> 26#include <asm/rtas.h>
27#include <asm/topology.h>
27 28
28static u64 stream_id; 29static u64 stream_id;
29static struct device suspend_dev; 30static struct device suspend_dev;
@@ -138,8 +139,11 @@ static ssize_t store_hibernate(struct device *dev,
138 ssleep(1); 139 ssleep(1);
139 } while (rc == -EAGAIN); 140 } while (rc == -EAGAIN);
140 141
141 if (!rc) 142 if (!rc) {
143 stop_topology_update();
142 rc = pm_suspend(PM_SUSPEND_MEM); 144 rc = pm_suspend(PM_SUSPEND_MEM);
145 start_topology_update();
146 }
143 147
144 stream_id = 0; 148 stream_id = 0;
145 149
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
index 57d22a2f4ba9..79d2225b7608 100644
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ b/arch/powerpc/platforms/wsp/Kconfig
@@ -25,6 +25,7 @@ config PPC_CHROMA
25 bool "PowerEN PCIe Chroma Card" 25 bool "PowerEN PCIe Chroma Card"
26 select EPAPR_BOOT 26 select EPAPR_BOOT
27 select PPC_WSP 27 select PPC_WSP
28 select OF_DYNAMIC
28 default y 29 default y
29 30
30endmenu 31endmenu
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
index 576874392543..97fe82ee8633 100644
--- a/arch/powerpc/platforms/wsp/ics.c
+++ b/arch/powerpc/platforms/wsp/ics.c
@@ -346,7 +346,7 @@ static int wsp_chip_set_affinity(struct irq_data *d,
346 * For the moment only implement delivery to all cpus or one cpu. 346 * For the moment only implement delivery to all cpus or one cpu.
347 * Get current irq_server for the given irq 347 * Get current irq_server for the given irq
348 */ 348 */
349 ret = cache_hwirq_map(ics, d->irq, cpumask); 349 ret = cache_hwirq_map(ics, hw_irq, cpumask);
350 if (ret == -1) { 350 if (ret == -1) {
351 char cpulist[128]; 351 char cpulist[128];
352 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); 352 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
index 19f353dfcd03..cb565bf93650 100644
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ b/arch/powerpc/platforms/wsp/opb_pic.c
@@ -30,7 +30,7 @@
30static int opb_index = 0; 30static int opb_index = 0;
31 31
32struct opb_pic { 32struct opb_pic {
33 struct irq_host *host; 33 struct irq_domain *host;
34 void *regs; 34 void *regs;
35 int index; 35 int index;
36 spinlock_t lock; 36 spinlock_t lock;
@@ -179,7 +179,7 @@ static struct irq_chip opb_irq_chip = {
179 .irq_set_type = opb_set_irq_type 179 .irq_set_type = opb_set_irq_type
180}; 180};
181 181
182static int opb_host_map(struct irq_host *host, unsigned int virq, 182static int opb_host_map(struct irq_domain *host, unsigned int virq,
183 irq_hw_number_t hwirq) 183 irq_hw_number_t hwirq)
184{ 184{
185 struct opb_pic *opb; 185 struct opb_pic *opb;
@@ -196,20 +196,9 @@ static int opb_host_map(struct irq_host *host, unsigned int virq,
196 return 0; 196 return 0;
197} 197}
198 198
199static int opb_host_xlate(struct irq_host *host, struct device_node *dn, 199static const struct irq_domain_ops opb_host_ops = {
200 const u32 *intspec, unsigned int intsize,
201 irq_hw_number_t *out_hwirq, unsigned int *out_type)
202{
203 /* Interrupt size must == 2 */
204 BUG_ON(intsize != 2);
205 *out_hwirq = intspec[0];
206 *out_type = intspec[1];
207 return 0;
208}
209
210static struct irq_host_ops opb_host_ops = {
211 .map = opb_host_map, 200 .map = opb_host_map,
212 .xlate = opb_host_xlate, 201 .xlate = irq_domain_xlate_twocell,
213}; 202};
214 203
215irqreturn_t opb_irq_handler(int irq, void *private) 204irqreturn_t opb_irq_handler(int irq, void *private)
@@ -263,13 +252,11 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn)
263 goto free_opb; 252 goto free_opb;
264 } 253 }
265 254
266 /* Allocate an irq host so that Linux knows that despite only 255 /* Allocate an irq domain so that Linux knows that despite only
267 * having one interrupt to issue, we're the controller for multiple 256 * having one interrupt to issue, we're the controller for multiple
268 * hardware IRQs, so later we can lookup their virtual IRQs. */ 257 * hardware IRQs, so later we can lookup their virtual IRQs. */
269 258
270 opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR, 259 opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb);
271 OPB_NR_IRQS, &opb_host_ops, -1);
272
273 if (!opb->host) { 260 if (!opb->host) {
274 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); 261 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
275 goto free_regs; 262 goto free_regs;
@@ -277,7 +264,6 @@ struct opb_pic *opb_pic_init_one(struct device_node *dn)
277 264
278 opb->index = opb_index++; 265 opb->index = opb_index++;
279 spin_lock_init(&opb->lock); 266 spin_lock_init(&opb->lock);
280 opb->host->host_data = opb;
281 267
282 /* Disable all interrupts by default */ 268 /* Disable all interrupts by default */
283 opb_out(opb, OPB_MLSASIER, 0); 269 opb_out(opb, OPB_MLSASIER, 0);
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
index 71bd105f3863..0ba103ae83a5 100644
--- a/arch/powerpc/platforms/wsp/smp.c
+++ b/arch/powerpc/platforms/wsp/smp.c
@@ -71,7 +71,7 @@ int __devinit smp_a2_kick_cpu(int nr)
71 71
72static int __init smp_a2_probe(void) 72static int __init smp_a2_probe(void)
73{ 73{
74 return cpus_weight(cpu_possible_map); 74 return num_possible_cpus();
75} 75}
76 76
77static struct smp_ops_t a2_smp_ops = { 77static struct smp_ops_t a2_smp_ops = {
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
index e0262cd0e2d3..763014cd1e62 100644
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ b/arch/powerpc/platforms/wsp/wsp_pci.c
@@ -468,15 +468,15 @@ static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
468#define DUMP_REG(x) \ 468#define DUMP_REG(x) \
469 pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x)) 469 pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
470 470
471#ifdef CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS 471 /*
472 /* WSP DD1 has a bogus class code by default in the PCI-E 472 * Some WSP variants has a bogus class code by default in the PCI-E
473 * root complex's built-in P2P bridge */ 473 * root complex's built-in P2P bridge
474 */
474 val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1); 475 val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
475 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val); 476 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
476 out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1, 477 out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
477 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8)); 478 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
478 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1)); 479 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
479#endif /* CONFIG_WSP_DD1_WORKAROUND_BAD_PCIE_CLASS */
480 480
481#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS 481#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
482 /* XXX Disable TCE caching, it doesn't work on DD1 */ 482 /* XXX Disable TCE caching, it doesn't work on DD1 */
@@ -682,7 +682,6 @@ static int __init wsp_setup_one_phb(struct device_node *np)
682 /* XXX Force re-assigning of everything for now */ 682 /* XXX Force re-assigning of everything for now */
683 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC | 683 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
684 PCI_ENABLE_PROC_DOMAINS); 684 PCI_ENABLE_PROC_DOMAINS);
685 pci_probe_only = 0;
686 685
687 /* Calculate how the TCE space is divided */ 686 /* Calculate how the TCE space is divided */
688 phb->dma32_base = 0; 687 phb->dma32_base = 0;
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig
index 7b4df37ac381..a84fecf63c4d 100644
--- a/arch/powerpc/sysdev/Kconfig
+++ b/arch/powerpc/sysdev/Kconfig
@@ -29,3 +29,7 @@ config SCOM_DEBUGFS
29 bool "Expose SCOM controllers via debugfs" 29 bool "Expose SCOM controllers via debugfs"
30 depends on PPC_SCOM 30 depends on PPC_SCOM
31 default n 31 default n
32
33config GE_FPGA
34 bool
35 default n
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile
index 5e37b4717864..1bd7ecb24620 100644
--- a/arch/powerpc/sysdev/Makefile
+++ b/arch/powerpc/sysdev/Makefile
@@ -4,6 +4,8 @@ ccflags-$(CONFIG_PPC64) := -mno-minimal-toc
4 4
5mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o 5mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o
6obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) 6obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y)
7mpic-msgr-obj-$(CONFIG_MPIC_MSGR) += mpic_msgr.o
8obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) $(mpic-msgr-obj-y)
7obj-$(CONFIG_PPC_EPAPR_HV_PIC) += ehv_pic.o 9obj-$(CONFIG_PPC_EPAPR_HV_PIC) += ehv_pic.o
8fsl-msi-obj-$(CONFIG_PCI_MSI) += fsl_msi.o 10fsl-msi-obj-$(CONFIG_PCI_MSI) += fsl_msi.o
9obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o 11obj-$(CONFIG_PPC_MSI_BITMAP) += msi_bitmap.o
@@ -65,3 +67,5 @@ obj-$(CONFIG_PPC_SCOM) += scom.o
65subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror 67subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
66 68
67obj-$(CONFIG_PPC_XICS) += xics/ 69obj-$(CONFIG_PPC_XICS) += xics/
70
71obj-$(CONFIG_GE_FPGA) += ge/
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c
index 5d7d59a43c4c..d4fa03f2b6ac 100644
--- a/arch/powerpc/sysdev/cpm1.c
+++ b/arch/powerpc/sysdev/cpm1.c
@@ -54,7 +54,7 @@ cpm8xx_t __iomem *cpmp; /* Pointer to comm processor space */
54immap_t __iomem *mpc8xx_immr; 54immap_t __iomem *mpc8xx_immr;
55static cpic8xx_t __iomem *cpic_reg; 55static cpic8xx_t __iomem *cpic_reg;
56 56
57static struct irq_host *cpm_pic_host; 57static struct irq_domain *cpm_pic_host;
58 58
59static void cpm_mask_irq(struct irq_data *d) 59static void cpm_mask_irq(struct irq_data *d)
60{ 60{
@@ -98,7 +98,7 @@ int cpm_get_irq(void)
98 return irq_linear_revmap(cpm_pic_host, cpm_vec); 98 return irq_linear_revmap(cpm_pic_host, cpm_vec);
99} 99}
100 100
101static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, 101static int cpm_pic_host_map(struct irq_domain *h, unsigned int virq,
102 irq_hw_number_t hw) 102 irq_hw_number_t hw)
103{ 103{
104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); 104 pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -123,7 +123,7 @@ static struct irqaction cpm_error_irqaction = {
123 .name = "error", 123 .name = "error",
124}; 124};
125 125
126static struct irq_host_ops cpm_pic_host_ops = { 126static const struct irq_domain_ops cpm_pic_host_ops = {
127 .map = cpm_pic_host_map, 127 .map = cpm_pic_host_map,
128}; 128};
129 129
@@ -164,8 +164,7 @@ unsigned int cpm_pic_init(void)
164 164
165 out_be32(&cpic_reg->cpic_cimr, 0); 165 out_be32(&cpic_reg->cpic_cimr, 0);
166 166
167 cpm_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 167 cpm_pic_host = irq_domain_add_linear(np, 64, &cpm_pic_host_ops, NULL);
168 64, &cpm_pic_host_ops, 64);
169 if (cpm_pic_host == NULL) { 168 if (cpm_pic_host == NULL) {
170 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 169 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
171 sirq = NO_IRQ; 170 sirq = NO_IRQ;
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c
index bcab50e2a9eb..d3be961e2ae7 100644
--- a/arch/powerpc/sysdev/cpm2_pic.c
+++ b/arch/powerpc/sysdev/cpm2_pic.c
@@ -50,7 +50,7 @@
50 50
51static intctl_cpm2_t __iomem *cpm2_intctl; 51static intctl_cpm2_t __iomem *cpm2_intctl;
52 52
53static struct irq_host *cpm2_pic_host; 53static struct irq_domain *cpm2_pic_host;
54#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 54#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
55static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 55static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
56 56
@@ -214,7 +214,7 @@ unsigned int cpm2_get_irq(void)
214 return irq_linear_revmap(cpm2_pic_host, irq); 214 return irq_linear_revmap(cpm2_pic_host, irq);
215} 215}
216 216
217static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, 217static int cpm2_pic_host_map(struct irq_domain *h, unsigned int virq,
218 irq_hw_number_t hw) 218 irq_hw_number_t hw)
219{ 219{
220 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); 220 pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -224,21 +224,9 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq,
224 return 0; 224 return 0;
225} 225}
226 226
227static int cpm2_pic_host_xlate(struct irq_host *h, struct device_node *ct, 227static const struct irq_domain_ops cpm2_pic_host_ops = {
228 const u32 *intspec, unsigned int intsize,
229 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
230{
231 *out_hwirq = intspec[0];
232 if (intsize > 1)
233 *out_flags = intspec[1];
234 else
235 *out_flags = IRQ_TYPE_NONE;
236 return 0;
237}
238
239static struct irq_host_ops cpm2_pic_host_ops = {
240 .map = cpm2_pic_host_map, 228 .map = cpm2_pic_host_map,
241 .xlate = cpm2_pic_host_xlate, 229 .xlate = irq_domain_xlate_onetwocell,
242}; 230};
243 231
244void cpm2_pic_init(struct device_node *node) 232void cpm2_pic_init(struct device_node *node)
@@ -275,8 +263,7 @@ void cpm2_pic_init(struct device_node *node)
275 out_be32(&cpm2_intctl->ic_scprrl, 0x05309770); 263 out_be32(&cpm2_intctl->ic_scprrl, 0x05309770);
276 264
277 /* create a legacy host */ 265 /* create a legacy host */
278 cpm2_pic_host = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 266 cpm2_pic_host = irq_domain_add_linear(node, 64, &cpm2_pic_host_ops, NULL);
279 64, &cpm2_pic_host_ops, 64);
280 if (cpm2_pic_host == NULL) { 267 if (cpm2_pic_host == NULL) {
281 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n"); 268 printk(KERN_ERR "CPM2 PIC: failed to allocate irq host!\n");
282 return; 269 return;
diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c
index b6731e4a6646..6e0e1005227f 100644
--- a/arch/powerpc/sysdev/ehv_pic.c
+++ b/arch/powerpc/sysdev/ehv_pic.c
@@ -182,13 +182,13 @@ unsigned int ehv_pic_get_irq(void)
182 return irq_linear_revmap(global_ehv_pic->irqhost, irq); 182 return irq_linear_revmap(global_ehv_pic->irqhost, irq);
183} 183}
184 184
185static int ehv_pic_host_match(struct irq_host *h, struct device_node *node) 185static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node)
186{ 186{
187 /* Exact match, unless ehv_pic node is NULL */ 187 /* Exact match, unless ehv_pic node is NULL */
188 return h->of_node == NULL || h->of_node == node; 188 return h->of_node == NULL || h->of_node == node;
189} 189}
190 190
191static int ehv_pic_host_map(struct irq_host *h, unsigned int virq, 191static int ehv_pic_host_map(struct irq_domain *h, unsigned int virq,
192 irq_hw_number_t hw) 192 irq_hw_number_t hw)
193{ 193{
194 struct ehv_pic *ehv_pic = h->host_data; 194 struct ehv_pic *ehv_pic = h->host_data;
@@ -217,7 +217,7 @@ static int ehv_pic_host_map(struct irq_host *h, unsigned int virq,
217 return 0; 217 return 0;
218} 218}
219 219
220static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct, 220static int ehv_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
221 const u32 *intspec, unsigned int intsize, 221 const u32 *intspec, unsigned int intsize,
222 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 222 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
223 223
@@ -248,7 +248,7 @@ static int ehv_pic_host_xlate(struct irq_host *h, struct device_node *ct,
248 return 0; 248 return 0;
249} 249}
250 250
251static struct irq_host_ops ehv_pic_host_ops = { 251static const struct irq_domain_ops ehv_pic_host_ops = {
252 .match = ehv_pic_host_match, 252 .match = ehv_pic_host_match,
253 .map = ehv_pic_host_map, 253 .map = ehv_pic_host_map,
254 .xlate = ehv_pic_host_xlate, 254 .xlate = ehv_pic_host_xlate,
@@ -275,9 +275,8 @@ void __init ehv_pic_init(void)
275 return; 275 return;
276 } 276 }
277 277
278 ehv_pic->irqhost = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 278 ehv_pic->irqhost = irq_domain_add_linear(np, NR_EHV_PIC_INTS,
279 NR_EHV_PIC_INTS, &ehv_pic_host_ops, 0); 279 &ehv_pic_host_ops, ehv_pic);
280
281 if (!ehv_pic->irqhost) { 280 if (!ehv_pic->irqhost) {
282 of_node_put(np); 281 of_node_put(np);
283 kfree(ehv_pic); 282 kfree(ehv_pic);
@@ -293,7 +292,6 @@ void __init ehv_pic_init(void)
293 of_node_put(np2); 292 of_node_put(np2);
294 } 293 }
295 294
296 ehv_pic->irqhost->host_data = ehv_pic;
297 ehv_pic->hc_irq = ehv_pic_irq_chip; 295 ehv_pic->hc_irq = ehv_pic_irq_chip;
298 ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity; 296 ehv_pic->hc_irq.irq_set_affinity = ehv_pic_set_affinity;
299 ehv_pic->coreint_flag = coreint_flag; 297 ehv_pic->coreint_flag = coreint_flag;
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
index 116415899176..37a69097e022 100644
--- a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
+++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c
@@ -24,6 +24,7 @@
24 */ 24 */
25 25
26#include <linux/kernel.h> 26#include <linux/kernel.h>
27#include <linux/export.h>
27#include <linux/slab.h> 28#include <linux/slab.h>
28#include <linux/err.h> 29#include <linux/err.h>
29#include <linux/of_platform.h> 30#include <linux/of_platform.h>
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
index 5f88797dce73..cedabd0f4bfe 100644
--- a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
+++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/module.h>
24#include <linux/of_platform.h> 25#include <linux/of_platform.h>
25#include <asm/io.h> 26#include <asm/io.h>
26 27
@@ -200,6 +201,9 @@ static struct of_device_id mpc85xx_l2ctlr_of_match[] = {
200 { 201 {
201 .compatible = "fsl,p1022-l2-cache-controller", 202 .compatible = "fsl,p1022-l2-cache-controller",
202 }, 203 },
204 {
205 .compatible = "fsl,mpc8548-l2-cache-controller",
206 },
203 {}, 207 {},
204}; 208};
205 209
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index ecb5c1946d22..6e097de00e09 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -60,7 +60,7 @@ static struct irq_chip fsl_msi_chip = {
60 .name = "FSL-MSI", 60 .name = "FSL-MSI",
61}; 61};
62 62
63static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, 63static int fsl_msi_host_map(struct irq_domain *h, unsigned int virq,
64 irq_hw_number_t hw) 64 irq_hw_number_t hw)
65{ 65{
66 struct fsl_msi *msi_data = h->host_data; 66 struct fsl_msi *msi_data = h->host_data;
@@ -74,7 +74,7 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq,
74 return 0; 74 return 0;
75} 75}
76 76
77static struct irq_host_ops fsl_msi_host_ops = { 77static const struct irq_domain_ops fsl_msi_host_ops = {
78 .map = fsl_msi_host_map, 78 .map = fsl_msi_host_map,
79}; 79};
80 80
@@ -387,8 +387,8 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
387 } 387 }
388 platform_set_drvdata(dev, msi); 388 platform_set_drvdata(dev, msi);
389 389
390 msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, 390 msi->irqhost = irq_domain_add_linear(dev->dev.of_node,
391 NR_MSI_IRQS, &fsl_msi_host_ops, 0); 391 NR_MSI_IRQS, &fsl_msi_host_ops, msi);
392 392
393 if (msi->irqhost == NULL) { 393 if (msi->irqhost == NULL) {
394 dev_err(&dev->dev, "No memory for MSI irqhost\n"); 394 dev_err(&dev->dev, "No memory for MSI irqhost\n");
@@ -410,6 +410,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
410 410
411 msi->msi_regs = ioremap(res.start, resource_size(&res)); 411 msi->msi_regs = ioremap(res.start, resource_size(&res));
412 if (!msi->msi_regs) { 412 if (!msi->msi_regs) {
413 err = -ENOMEM;
413 dev_err(&dev->dev, "could not map node %s\n", 414 dev_err(&dev->dev, "could not map node %s\n",
414 dev->dev.of_node->full_name); 415 dev->dev.of_node->full_name);
415 goto error_out; 416 goto error_out;
@@ -420,8 +421,6 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev)
420 421
421 msi->feature = features->fsl_pic_ip; 422 msi->feature = features->fsl_pic_ip;
422 423
423 msi->irqhost->host_data = msi;
424
425 /* 424 /*
426 * Remember the phandle, so that we can match with any PCI nodes 425 * Remember the phandle, so that we can match with any PCI nodes
427 * that have an "fsl,msi" property. 426 * that have an "fsl,msi" property.
diff --git a/arch/powerpc/sysdev/fsl_msi.h b/arch/powerpc/sysdev/fsl_msi.h
index f6c646a52541..8225f8653f78 100644
--- a/arch/powerpc/sysdev/fsl_msi.h
+++ b/arch/powerpc/sysdev/fsl_msi.h
@@ -26,7 +26,7 @@
26#define FSL_PIC_IP_VMPIC 0x00000003 26#define FSL_PIC_IP_VMPIC 0x00000003
27 27
28struct fsl_msi { 28struct fsl_msi {
29 struct irq_host *irqhost; 29 struct irq_domain *irqhost;
30 30
31 unsigned long cascade_irq; 31 unsigned long cascade_irq;
32 32
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 3b61e8cf3421..6073288fed29 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -205,12 +205,12 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
205 205
206 if (paddr_hi == paddr_lo) { 206 if (paddr_hi == paddr_lo) {
207 pr_err("%s: No outbound window space\n", name); 207 pr_err("%s: No outbound window space\n", name);
208 return ; 208 goto out;
209 } 209 }
210 210
211 if (paddr_lo == 0) { 211 if (paddr_lo == 0) {
212 pr_err("%s: No space for inbound window\n", name); 212 pr_err("%s: No space for inbound window\n", name);
213 return ; 213 goto out;
214 } 214 }
215 215
216 /* setup PCSRBAR/PEXCSRBAR */ 216 /* setup PCSRBAR/PEXCSRBAR */
@@ -357,6 +357,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose,
357 (u64)hose->dma_window_size); 357 (u64)hose->dma_window_size);
358 } 358 }
359 359
360out:
360 iounmap(pci); 361 iounmap(pci);
361} 362}
362 363
@@ -384,26 +385,36 @@ static void __init setup_pci_cmd(struct pci_controller *hose)
384void fsl_pcibios_fixup_bus(struct pci_bus *bus) 385void fsl_pcibios_fixup_bus(struct pci_bus *bus)
385{ 386{
386 struct pci_controller *hose = pci_bus_to_host(bus); 387 struct pci_controller *hose = pci_bus_to_host(bus);
387 int i; 388 int i, is_pcie = 0, no_link;
388 389
389 if ((bus->parent == hose->bus) && 390 /* The root complex bridge comes up with bogus resources,
390 ((fsl_pcie_bus_fixup && 391 * we copy the PHB ones in.
391 early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP)) || 392 *
392 (hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK))) 393 * With the current generic PCI code, the PHB bus no longer
393 { 394 * has bus->resource[0..4] set, so things are a bit more
394 for (i = 0; i < 4; ++i) { 395 * tricky.
396 */
397
398 if (fsl_pcie_bus_fixup)
399 is_pcie = early_find_capability(hose, 0, 0, PCI_CAP_ID_EXP);
400 no_link = !!(hose->indirect_type & PPC_INDIRECT_TYPE_NO_PCIE_LINK);
401
402 if (bus->parent == hose->bus && (is_pcie || no_link)) {
403 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; ++i) {
395 struct resource *res = bus->resource[i]; 404 struct resource *res = bus->resource[i];
396 struct resource *par = bus->parent->resource[i]; 405 struct resource *par;
397 if (res) { 406
398 res->start = 0; 407 if (!res)
399 res->end = 0; 408 continue;
400 res->flags = 0; 409 if (i == 0)
401 } 410 par = &hose->io_resource;
402 if (res && par) { 411 else if (i < 4)
403 res->start = par->start; 412 par = &hose->mem_resources[i-1];
404 res->end = par->end; 413 else par = NULL;
405 res->flags = par->flags; 414
406 } 415 res->start = par ? par->start : 0;
416 res->end = par ? par->end : 0;
417 res->flags = par ? par->flags : 0;
407 } 418 }
408 } 419 }
409} 420}
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index a4c4f4a932d8..5b6f556094dd 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -66,8 +66,8 @@
66 " li %0,%3\n" \ 66 " li %0,%3\n" \
67 " b 2b\n" \ 67 " b 2b\n" \
68 ".section __ex_table,\"a\"\n" \ 68 ".section __ex_table,\"a\"\n" \
69 " .align 2\n" \ 69 PPC_LONG_ALIGN "\n" \
70 " .long 1b,3b\n" \ 70 PPC_LONG "1b,3b\n" \
71 ".text" \ 71 ".text" \
72 : "=r" (err), "=r" (x) \ 72 : "=r" (err), "=r" (x) \
73 : "b" (addr), "i" (-EFAULT), "0" (err)) 73 : "b" (addr), "i" (-EFAULT), "0" (err))
diff --git a/arch/powerpc/sysdev/fsl_rmu.c b/arch/powerpc/sysdev/fsl_rmu.c
index 15485789e9db..14bd5221f28a 100644
--- a/arch/powerpc/sysdev/fsl_rmu.c
+++ b/arch/powerpc/sysdev/fsl_rmu.c
@@ -100,14 +100,8 @@
100#define DOORBELL_DSR_TE 0x00000080 100#define DOORBELL_DSR_TE 0x00000080
101#define DOORBELL_DSR_QFI 0x00000010 101#define DOORBELL_DSR_QFI 0x00000010
102#define DOORBELL_DSR_DIQI 0x00000001 102#define DOORBELL_DSR_DIQI 0x00000001
103#define DOORBELL_TID_OFFSET 0x02
104#define DOORBELL_SID_OFFSET 0x04
105#define DOORBELL_INFO_OFFSET 0x06
106 103
107#define DOORBELL_MESSAGE_SIZE 0x08 104#define DOORBELL_MESSAGE_SIZE 0x08
108#define DBELL_SID(x) (*(u16 *)(x + DOORBELL_SID_OFFSET))
109#define DBELL_TID(x) (*(u16 *)(x + DOORBELL_TID_OFFSET))
110#define DBELL_INF(x) (*(u16 *)(x + DOORBELL_INFO_OFFSET))
111 105
112struct rio_msg_regs { 106struct rio_msg_regs {
113 u32 omr; 107 u32 omr;
@@ -193,6 +187,13 @@ struct fsl_rmu {
193 int rxirq; 187 int rxirq;
194}; 188};
195 189
190struct rio_dbell_msg {
191 u16 pad1;
192 u16 tid;
193 u16 sid;
194 u16 info;
195};
196
196/** 197/**
197 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler 198 * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler
198 * @irq: Linux interrupt number 199 * @irq: Linux interrupt number
@@ -311,8 +312,8 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
311 312
312 /* XXX Need to check/dispatch until queue empty */ 313 /* XXX Need to check/dispatch until queue empty */
313 if (dsr & DOORBELL_DSR_DIQI) { 314 if (dsr & DOORBELL_DSR_DIQI) {
314 u32 dmsg = 315 struct rio_dbell_msg *dmsg =
315 (u32) fsl_dbell->dbell_ring.virt + 316 fsl_dbell->dbell_ring.virt +
316 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff); 317 (in_be32(&fsl_dbell->dbell_regs->dqdpar) & 0xfff);
317 struct rio_dbell *dbell; 318 struct rio_dbell *dbell;
318 int found = 0; 319 int found = 0;
@@ -320,25 +321,25 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
320 pr_debug 321 pr_debug
321 ("RIO: processing doorbell," 322 ("RIO: processing doorbell,"
322 " sid %2.2x tid %2.2x info %4.4x\n", 323 " sid %2.2x tid %2.2x info %4.4x\n",
323 DBELL_SID(dmsg), DBELL_TID(dmsg), DBELL_INF(dmsg)); 324 dmsg->sid, dmsg->tid, dmsg->info);
324 325
325 for (i = 0; i < MAX_PORT_NUM; i++) { 326 for (i = 0; i < MAX_PORT_NUM; i++) {
326 if (fsl_dbell->mport[i]) { 327 if (fsl_dbell->mport[i]) {
327 list_for_each_entry(dbell, 328 list_for_each_entry(dbell,
328 &fsl_dbell->mport[i]->dbells, node) { 329 &fsl_dbell->mport[i]->dbells, node) {
329 if ((dbell->res->start 330 if ((dbell->res->start
330 <= DBELL_INF(dmsg)) 331 <= dmsg->info)
331 && (dbell->res->end 332 && (dbell->res->end
332 >= DBELL_INF(dmsg))) { 333 >= dmsg->info)) {
333 found = 1; 334 found = 1;
334 break; 335 break;
335 } 336 }
336 } 337 }
337 if (found && dbell->dinb) { 338 if (found && dbell->dinb) {
338 dbell->dinb(fsl_dbell->mport[i], 339 dbell->dinb(fsl_dbell->mport[i],
339 dbell->dev_id, DBELL_SID(dmsg), 340 dbell->dev_id, dmsg->sid,
340 DBELL_TID(dmsg), 341 dmsg->tid,
341 DBELL_INF(dmsg)); 342 dmsg->info);
342 break; 343 break;
343 } 344 }
344 } 345 }
@@ -348,8 +349,8 @@ fsl_rio_dbell_handler(int irq, void *dev_instance)
348 pr_debug 349 pr_debug
349 ("RIO: spurious doorbell," 350 ("RIO: spurious doorbell,"
350 " sid %2.2x tid %2.2x info %4.4x\n", 351 " sid %2.2x tid %2.2x info %4.4x\n",
351 DBELL_SID(dmsg), DBELL_TID(dmsg), 352 dmsg->sid, dmsg->tid,
352 DBELL_INF(dmsg)); 353 dmsg->info);
353 } 354 }
354 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI); 355 setbits32(&fsl_dbell->dbell_regs->dmr, DOORBELL_DMR_DI);
355 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI); 356 out_be32(&fsl_dbell->dbell_regs->dsr, DOORBELL_DSR_DIQI);
@@ -657,7 +658,7 @@ fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox,
657 int ret = 0; 658 int ret = 0;
658 659
659 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ 660 pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \
660 "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); 661 "%p len %8.8zx\n", rdev->destid, mbox, buffer, len);
661 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { 662 if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) {
662 ret = -EINVAL; 663 ret = -EINVAL;
663 goto out; 664 goto out;
@@ -972,7 +973,8 @@ out:
972void *fsl_get_inb_message(struct rio_mport *mport, int mbox) 973void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
973{ 974{
974 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport); 975 struct fsl_rmu *rmu = GET_RMM_HANDLE(mport);
975 u32 phys_buf, virt_buf; 976 u32 phys_buf;
977 void *virt_buf;
976 void *buf = NULL; 978 void *buf = NULL;
977 int buf_idx; 979 int buf_idx;
978 980
@@ -982,7 +984,7 @@ void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
982 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar)) 984 if (phys_buf == in_be32(&rmu->msg_regs->ifqepar))
983 goto out2; 985 goto out2;
984 986
985 virt_buf = (u32) rmu->msg_rx_ring.virt + (phys_buf 987 virt_buf = rmu->msg_rx_ring.virt + (phys_buf
986 - rmu->msg_rx_ring.phys); 988 - rmu->msg_rx_ring.phys);
987 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE; 989 buf_idx = (phys_buf - rmu->msg_rx_ring.phys) / RIO_MAX_MSG_SIZE;
988 buf = rmu->msg_rx_ring.virt_buffer[buf_idx]; 990 buf = rmu->msg_rx_ring.virt_buffer[buf_idx];
@@ -994,7 +996,7 @@ void *fsl_get_inb_message(struct rio_mport *mport, int mbox)
994 } 996 }
995 997
996 /* Copy max message size, caller is expected to allocate that big */ 998 /* Copy max message size, caller is expected to allocate that big */
997 memcpy(buf, (void *)virt_buf, RIO_MAX_MSG_SIZE); 999 memcpy(buf, virt_buf, RIO_MAX_MSG_SIZE);
998 1000
999 /* Clear the available buffer */ 1001 /* Clear the available buffer */
1000 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL; 1002 rmu->msg_rx_ring.virt_buffer[buf_idx] = NULL;
diff --git a/arch/powerpc/sysdev/ge/Makefile b/arch/powerpc/sysdev/ge/Makefile
new file mode 100644
index 000000000000..8731ffcb79b9
--- /dev/null
+++ b/arch/powerpc/sysdev/ge/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_GE_FPGA) += ge_pic.o
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/sysdev/ge/ge_pic.c
index 94594e58594c..2bcb78bb3a15 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.c
+++ b/arch/powerpc/sysdev/ge/ge_pic.c
@@ -22,7 +22,7 @@
22#include <asm/prom.h> 22#include <asm/prom.h>
23#include <asm/irq.h> 23#include <asm/irq.h>
24 24
25#include "gef_pic.h" 25#include "ge_pic.h"
26 26
27#define DEBUG 27#define DEBUG
28#undef DEBUG 28#undef DEBUG
@@ -50,7 +50,7 @@
50static DEFINE_RAW_SPINLOCK(gef_pic_lock); 50static DEFINE_RAW_SPINLOCK(gef_pic_lock);
51 51
52static void __iomem *gef_pic_irq_reg_base; 52static void __iomem *gef_pic_irq_reg_base;
53static struct irq_host *gef_pic_irq_host; 53static struct irq_domain *gef_pic_irq_host;
54static int gef_pic_cascade_irq; 54static int gef_pic_cascade_irq;
55 55
56/* 56/*
@@ -153,7 +153,7 @@ static struct irq_chip gef_pic_chip = {
153/* When an interrupt is being configured, this call allows some flexibilty 153/* When an interrupt is being configured, this call allows some flexibilty
154 * in deciding which irq_chip structure is used 154 * in deciding which irq_chip structure is used
155 */ 155 */
156static int gef_pic_host_map(struct irq_host *h, unsigned int virq, 156static int gef_pic_host_map(struct irq_domain *h, unsigned int virq,
157 irq_hw_number_t hwirq) 157 irq_hw_number_t hwirq)
158{ 158{
159 /* All interrupts are LEVEL sensitive */ 159 /* All interrupts are LEVEL sensitive */
@@ -163,7 +163,7 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq,
163 return 0; 163 return 0;
164} 164}
165 165
166static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct, 166static int gef_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
167 const u32 *intspec, unsigned int intsize, 167 const u32 *intspec, unsigned int intsize,
168 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 168 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
169{ 169{
@@ -177,7 +177,7 @@ static int gef_pic_host_xlate(struct irq_host *h, struct device_node *ct,
177 return 0; 177 return 0;
178} 178}
179 179
180static struct irq_host_ops gef_pic_host_ops = { 180static const struct irq_domain_ops gef_pic_host_ops = {
181 .map = gef_pic_host_map, 181 .map = gef_pic_host_map,
182 .xlate = gef_pic_host_xlate, 182 .xlate = gef_pic_host_xlate,
183}; 183};
@@ -211,10 +211,9 @@ void __init gef_pic_init(struct device_node *np)
211 return; 211 return;
212 } 212 }
213 213
214 /* Setup an irq_host structure */ 214 /* Setup an irq_domain structure */
215 gef_pic_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 215 gef_pic_irq_host = irq_domain_add_linear(np, GEF_PIC_NUM_IRQS,
216 GEF_PIC_NUM_IRQS, 216 &gef_pic_host_ops, NULL);
217 &gef_pic_host_ops, NO_IRQ);
218 if (gef_pic_irq_host == NULL) 217 if (gef_pic_irq_host == NULL)
219 return; 218 return;
220 219
diff --git a/arch/powerpc/platforms/86xx/gef_pic.h b/arch/powerpc/sysdev/ge/ge_pic.h
index 6149916da3f4..6149916da3f4 100644
--- a/arch/powerpc/platforms/86xx/gef_pic.h
+++ b/arch/powerpc/sysdev/ge/ge_pic.h
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c
index d18bb27e4df9..997df6a7ab5d 100644
--- a/arch/powerpc/sysdev/i8259.c
+++ b/arch/powerpc/sysdev/i8259.c
@@ -25,7 +25,7 @@ static unsigned char cached_8259[2] = { 0xff, 0xff };
25 25
26static DEFINE_RAW_SPINLOCK(i8259_lock); 26static DEFINE_RAW_SPINLOCK(i8259_lock);
27 27
28static struct irq_host *i8259_host; 28static struct irq_domain *i8259_host;
29 29
30/* 30/*
31 * Acknowledge the IRQ using either the PCI host bridge's interrupt 31 * Acknowledge the IRQ using either the PCI host bridge's interrupt
@@ -163,12 +163,12 @@ static struct resource pic_edgectrl_iores = {
163 .flags = IORESOURCE_BUSY, 163 .flags = IORESOURCE_BUSY,
164}; 164};
165 165
166static int i8259_host_match(struct irq_host *h, struct device_node *node) 166static int i8259_host_match(struct irq_domain *h, struct device_node *node)
167{ 167{
168 return h->of_node == NULL || h->of_node == node; 168 return h->of_node == NULL || h->of_node == node;
169} 169}
170 170
171static int i8259_host_map(struct irq_host *h, unsigned int virq, 171static int i8259_host_map(struct irq_domain *h, unsigned int virq,
172 irq_hw_number_t hw) 172 irq_hw_number_t hw)
173{ 173{
174 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw); 174 pr_debug("i8259_host_map(%d, 0x%lx)\n", virq, hw);
@@ -185,7 +185,7 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq,
185 return 0; 185 return 0;
186} 186}
187 187
188static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, 188static int i8259_host_xlate(struct irq_domain *h, struct device_node *ct,
189 const u32 *intspec, unsigned int intsize, 189 const u32 *intspec, unsigned int intsize,
190 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 190 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
191{ 191{
@@ -205,13 +205,13 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct,
205 return 0; 205 return 0;
206} 206}
207 207
208static struct irq_host_ops i8259_host_ops = { 208static struct irq_domain_ops i8259_host_ops = {
209 .match = i8259_host_match, 209 .match = i8259_host_match,
210 .map = i8259_host_map, 210 .map = i8259_host_map,
211 .xlate = i8259_host_xlate, 211 .xlate = i8259_host_xlate,
212}; 212};
213 213
214struct irq_host *i8259_get_host(void) 214struct irq_domain *i8259_get_host(void)
215{ 215{
216 return i8259_host; 216 return i8259_host;
217} 217}
@@ -263,8 +263,7 @@ void i8259_init(struct device_node *node, unsigned long intack_addr)
263 raw_spin_unlock_irqrestore(&i8259_lock, flags); 263 raw_spin_unlock_irqrestore(&i8259_lock, flags);
264 264
265 /* create a legacy host */ 265 /* create a legacy host */
266 i8259_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 266 i8259_host = irq_domain_add_legacy_isa(node, &i8259_host_ops, NULL);
267 0, &i8259_host_ops, 0);
268 if (i8259_host == NULL) { 267 if (i8259_host == NULL) {
269 printk(KERN_ERR "i8259: failed to allocate irq host !\n"); 268 printk(KERN_ERR "i8259: failed to allocate irq host !\n");
270 return; 269 return;
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index 95da897f05a7..b50f97811c25 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -672,13 +672,13 @@ static struct irq_chip ipic_edge_irq_chip = {
672 .irq_set_type = ipic_set_irq_type, 672 .irq_set_type = ipic_set_irq_type,
673}; 673};
674 674
675static int ipic_host_match(struct irq_host *h, struct device_node *node) 675static int ipic_host_match(struct irq_domain *h, struct device_node *node)
676{ 676{
677 /* Exact match, unless ipic node is NULL */ 677 /* Exact match, unless ipic node is NULL */
678 return h->of_node == NULL || h->of_node == node; 678 return h->of_node == NULL || h->of_node == node;
679} 679}
680 680
681static int ipic_host_map(struct irq_host *h, unsigned int virq, 681static int ipic_host_map(struct irq_domain *h, unsigned int virq,
682 irq_hw_number_t hw) 682 irq_hw_number_t hw)
683{ 683{
684 struct ipic *ipic = h->host_data; 684 struct ipic *ipic = h->host_data;
@@ -692,26 +692,10 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq,
692 return 0; 692 return 0;
693} 693}
694 694
695static int ipic_host_xlate(struct irq_host *h, struct device_node *ct, 695static struct irq_domain_ops ipic_host_ops = {
696 const u32 *intspec, unsigned int intsize,
697 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
698
699{
700 /* interrupt sense values coming from the device tree equal either
701 * LEVEL_LOW (low assertion) or EDGE_FALLING (high-to-low change)
702 */
703 *out_hwirq = intspec[0];
704 if (intsize > 1)
705 *out_flags = intspec[1];
706 else
707 *out_flags = IRQ_TYPE_NONE;
708 return 0;
709}
710
711static struct irq_host_ops ipic_host_ops = {
712 .match = ipic_host_match, 696 .match = ipic_host_match,
713 .map = ipic_host_map, 697 .map = ipic_host_map,
714 .xlate = ipic_host_xlate, 698 .xlate = irq_domain_xlate_onetwocell,
715}; 699};
716 700
717struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) 701struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
@@ -728,9 +712,8 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
728 if (ipic == NULL) 712 if (ipic == NULL)
729 return NULL; 713 return NULL;
730 714
731 ipic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 715 ipic->irqhost = irq_domain_add_linear(node, NR_IPIC_INTS,
732 NR_IPIC_INTS, 716 &ipic_host_ops, ipic);
733 &ipic_host_ops, 0);
734 if (ipic->irqhost == NULL) { 717 if (ipic->irqhost == NULL) {
735 kfree(ipic); 718 kfree(ipic);
736 return NULL; 719 return NULL;
@@ -738,8 +721,6 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags)
738 721
739 ipic->regs = ioremap(res.start, resource_size(&res)); 722 ipic->regs = ioremap(res.start, resource_size(&res));
740 723
741 ipic->irqhost->host_data = ipic;
742
743 /* init hw */ 724 /* init hw */
744 ipic_write(ipic->regs, IPIC_SICNR, 0x0); 725 ipic_write(ipic->regs, IPIC_SICNR, 0x0);
745 726
diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h
index 9391c57b0c51..90031d1282e1 100644
--- a/arch/powerpc/sysdev/ipic.h
+++ b/arch/powerpc/sysdev/ipic.h
@@ -43,7 +43,7 @@ struct ipic {
43 volatile u32 __iomem *regs; 43 volatile u32 __iomem *regs;
44 44
45 /* The remapper for this IPIC */ 45 /* The remapper for this IPIC */
46 struct irq_host *irqhost; 46 struct irq_domain *irqhost;
47}; 47};
48 48
49struct ipic_info { 49struct ipic_info {
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c
index 2ca0a85fcce9..d5f5416be310 100644
--- a/arch/powerpc/sysdev/mpc8xx_pic.c
+++ b/arch/powerpc/sysdev/mpc8xx_pic.c
@@ -17,7 +17,7 @@
17 17
18extern int cpm_get_irq(struct pt_regs *regs); 18extern int cpm_get_irq(struct pt_regs *regs);
19 19
20static struct irq_host *mpc8xx_pic_host; 20static struct irq_domain *mpc8xx_pic_host;
21#define NR_MASK_WORDS ((NR_IRQS + 31) / 32) 21#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
22static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS]; 22static unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
23static sysconf8xx_t __iomem *siu_reg; 23static sysconf8xx_t __iomem *siu_reg;
@@ -110,7 +110,7 @@ unsigned int mpc8xx_get_irq(void)
110 110
111} 111}
112 112
113static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, 113static int mpc8xx_pic_host_map(struct irq_domain *h, unsigned int virq,
114 irq_hw_number_t hw) 114 irq_hw_number_t hw)
115{ 115{
116 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); 116 pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw);
@@ -121,7 +121,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq,
121} 121}
122 122
123 123
124static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct, 124static int mpc8xx_pic_host_xlate(struct irq_domain *h, struct device_node *ct,
125 const u32 *intspec, unsigned int intsize, 125 const u32 *intspec, unsigned int intsize,
126 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 126 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
127{ 127{
@@ -142,7 +142,7 @@ static int mpc8xx_pic_host_xlate(struct irq_host *h, struct device_node *ct,
142} 142}
143 143
144 144
145static struct irq_host_ops mpc8xx_pic_host_ops = { 145static struct irq_domain_ops mpc8xx_pic_host_ops = {
146 .map = mpc8xx_pic_host_map, 146 .map = mpc8xx_pic_host_map,
147 .xlate = mpc8xx_pic_host_xlate, 147 .xlate = mpc8xx_pic_host_xlate,
148}; 148};
@@ -171,8 +171,7 @@ int mpc8xx_pic_init(void)
171 goto out; 171 goto out;
172 } 172 }
173 173
174 mpc8xx_pic_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 174 mpc8xx_pic_host = irq_domain_add_linear(np, 64, &mpc8xx_pic_host_ops, NULL);
175 64, &mpc8xx_pic_host_ops, 64);
176 if (mpc8xx_pic_host == NULL) { 175 if (mpc8xx_pic_host == NULL) {
177 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n"); 176 printk(KERN_ERR "MPC8xx PIC: failed to allocate irq host!\n");
178 ret = -ENOMEM; 177 ret = -ENOMEM;
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 4e9ccb1015de..9ac71ebd2c40 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -873,7 +873,7 @@ int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type)
873 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", 873 DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n",
874 mpic, d->irq, src, flow_type); 874 mpic, d->irq, src, flow_type);
875 875
876 if (src >= mpic->irq_count) 876 if (src >= mpic->num_sources)
877 return -EINVAL; 877 return -EINVAL;
878 878
879 if (flow_type == IRQ_TYPE_NONE) 879 if (flow_type == IRQ_TYPE_NONE)
@@ -909,7 +909,7 @@ void mpic_set_vector(unsigned int virq, unsigned int vector)
909 DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", 909 DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n",
910 mpic, virq, src, vector); 910 mpic, virq, src, vector);
911 911
912 if (src >= mpic->irq_count) 912 if (src >= mpic->num_sources)
913 return; 913 return;
914 914
915 vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)); 915 vecpri = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI));
@@ -926,7 +926,7 @@ void mpic_set_destination(unsigned int virq, unsigned int cpuid)
926 DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n", 926 DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n",
927 mpic, virq, src, cpuid); 927 mpic, virq, src, cpuid);
928 928
929 if (src >= mpic->irq_count) 929 if (src >= mpic->num_sources)
930 return; 930 return;
931 931
932 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); 932 mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid);
@@ -965,13 +965,13 @@ static struct irq_chip mpic_irq_ht_chip = {
965#endif /* CONFIG_MPIC_U3_HT_IRQS */ 965#endif /* CONFIG_MPIC_U3_HT_IRQS */
966 966
967 967
968static int mpic_host_match(struct irq_host *h, struct device_node *node) 968static int mpic_host_match(struct irq_domain *h, struct device_node *node)
969{ 969{
970 /* Exact match, unless mpic node is NULL */ 970 /* Exact match, unless mpic node is NULL */
971 return h->of_node == NULL || h->of_node == node; 971 return h->of_node == NULL || h->of_node == node;
972} 972}
973 973
974static int mpic_host_map(struct irq_host *h, unsigned int virq, 974static int mpic_host_map(struct irq_domain *h, unsigned int virq,
975 irq_hw_number_t hw) 975 irq_hw_number_t hw)
976{ 976{
977 struct mpic *mpic = h->host_data; 977 struct mpic *mpic = h->host_data;
@@ -1006,7 +1006,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
1006 return 0; 1006 return 0;
1007 } 1007 }
1008 1008
1009 if (hw >= mpic->irq_count) 1009 if (hw >= mpic->num_sources)
1010 return -EINVAL; 1010 return -EINVAL;
1011 1011
1012 mpic_msi_reserve_hwirq(mpic, hw); 1012 mpic_msi_reserve_hwirq(mpic, hw);
@@ -1041,7 +1041,7 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq,
1041 return 0; 1041 return 0;
1042} 1042}
1043 1043
1044static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, 1044static int mpic_host_xlate(struct irq_domain *h, struct device_node *ct,
1045 const u32 *intspec, unsigned int intsize, 1045 const u32 *intspec, unsigned int intsize,
1046 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1046 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1047 1047
@@ -1121,13 +1121,13 @@ static void mpic_cascade(unsigned int irq, struct irq_desc *desc)
1121 BUG_ON(!(mpic->flags & MPIC_SECONDARY)); 1121 BUG_ON(!(mpic->flags & MPIC_SECONDARY));
1122 1122
1123 virq = mpic_get_one_irq(mpic); 1123 virq = mpic_get_one_irq(mpic);
1124 if (virq != NO_IRQ) 1124 if (virq)
1125 generic_handle_irq(virq); 1125 generic_handle_irq(virq);
1126 1126
1127 chip->irq_eoi(&desc->irq_data); 1127 chip->irq_eoi(&desc->irq_data);
1128} 1128}
1129 1129
1130static struct irq_host_ops mpic_host_ops = { 1130static struct irq_domain_ops mpic_host_ops = {
1131 .match = mpic_host_match, 1131 .match = mpic_host_match,
1132 .map = mpic_host_map, 1132 .map = mpic_host_map,
1133 .xlate = mpic_host_xlate, 1133 .xlate = mpic_host_xlate,
@@ -1149,6 +1149,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1149 u32 greg_feature; 1149 u32 greg_feature;
1150 const char *vers; 1150 const char *vers;
1151 const u32 *psrc; 1151 const u32 *psrc;
1152 u32 last_irq;
1152 1153
1153 /* Default MPIC search parameters */ 1154 /* Default MPIC search parameters */
1154 static const struct of_device_id __initconst mpic_device_id[] = { 1155 static const struct of_device_id __initconst mpic_device_id[] = {
@@ -1182,6 +1183,16 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1182 } 1183 }
1183 } 1184 }
1184 1185
1186 /* Read extra device-tree properties into the flags variable */
1187 if (of_get_property(node, "big-endian", NULL))
1188 flags |= MPIC_BIG_ENDIAN;
1189 if (of_get_property(node, "pic-no-reset", NULL))
1190 flags |= MPIC_NO_RESET;
1191 if (of_get_property(node, "single-cpu-affinity", NULL))
1192 flags |= MPIC_SINGLE_DEST_CPU;
1193 if (of_device_is_compatible(node, "fsl,mpic"))
1194 flags |= MPIC_FSL;
1195
1185 mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL); 1196 mpic = kzalloc(sizeof(struct mpic), GFP_KERNEL);
1186 if (mpic == NULL) 1197 if (mpic == NULL)
1187 goto err_of_node_put; 1198 goto err_of_node_put;
@@ -1189,15 +1200,16 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1189 mpic->name = name; 1200 mpic->name = name;
1190 mpic->node = node; 1201 mpic->node = node;
1191 mpic->paddr = phys_addr; 1202 mpic->paddr = phys_addr;
1203 mpic->flags = flags;
1192 1204
1193 mpic->hc_irq = mpic_irq_chip; 1205 mpic->hc_irq = mpic_irq_chip;
1194 mpic->hc_irq.name = name; 1206 mpic->hc_irq.name = name;
1195 if (!(flags & MPIC_SECONDARY)) 1207 if (!(mpic->flags & MPIC_SECONDARY))
1196 mpic->hc_irq.irq_set_affinity = mpic_set_affinity; 1208 mpic->hc_irq.irq_set_affinity = mpic_set_affinity;
1197#ifdef CONFIG_MPIC_U3_HT_IRQS 1209#ifdef CONFIG_MPIC_U3_HT_IRQS
1198 mpic->hc_ht_irq = mpic_irq_ht_chip; 1210 mpic->hc_ht_irq = mpic_irq_ht_chip;
1199 mpic->hc_ht_irq.name = name; 1211 mpic->hc_ht_irq.name = name;
1200 if (!(flags & MPIC_SECONDARY)) 1212 if (!(mpic->flags & MPIC_SECONDARY))
1201 mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; 1213 mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity;
1202#endif /* CONFIG_MPIC_U3_HT_IRQS */ 1214#endif /* CONFIG_MPIC_U3_HT_IRQS */
1203 1215
@@ -1209,12 +1221,9 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1209 mpic->hc_tm = mpic_tm_chip; 1221 mpic->hc_tm = mpic_tm_chip;
1210 mpic->hc_tm.name = name; 1222 mpic->hc_tm.name = name;
1211 1223
1212 mpic->flags = flags;
1213 mpic->isu_size = isu_size;
1214 mpic->irq_count = irq_count;
1215 mpic->num_sources = 0; /* so far */ 1224 mpic->num_sources = 0; /* so far */
1216 1225
1217 if (flags & MPIC_LARGE_VECTORS) 1226 if (mpic->flags & MPIC_LARGE_VECTORS)
1218 intvec_top = 2047; 1227 intvec_top = 2047;
1219 else 1228 else
1220 intvec_top = 255; 1229 intvec_top = 255;
@@ -1233,12 +1242,6 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1233 mpic->ipi_vecs[3] = intvec_top - 1; 1242 mpic->ipi_vecs[3] = intvec_top - 1;
1234 mpic->spurious_vec = intvec_top; 1243 mpic->spurious_vec = intvec_top;
1235 1244
1236 /* Check for "big-endian" in device-tree */
1237 if (of_get_property(mpic->node, "big-endian", NULL) != NULL)
1238 mpic->flags |= MPIC_BIG_ENDIAN;
1239 if (of_device_is_compatible(mpic->node, "fsl,mpic"))
1240 mpic->flags |= MPIC_FSL;
1241
1242 /* Look for protected sources */ 1245 /* Look for protected sources */
1243 psrc = of_get_property(mpic->node, "protected-sources", &psize); 1246 psrc = of_get_property(mpic->node, "protected-sources", &psize);
1244 if (psrc) { 1247 if (psrc) {
@@ -1254,11 +1257,11 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1254 } 1257 }
1255 1258
1256#ifdef CONFIG_MPIC_WEIRD 1259#ifdef CONFIG_MPIC_WEIRD
1257 mpic->hw_set = mpic_infos[MPIC_GET_REGSET(flags)]; 1260 mpic->hw_set = mpic_infos[MPIC_GET_REGSET(mpic->flags)];
1258#endif 1261#endif
1259 1262
1260 /* default register type */ 1263 /* default register type */
1261 if (flags & MPIC_BIG_ENDIAN) 1264 if (mpic->flags & MPIC_BIG_ENDIAN)
1262 mpic->reg_type = mpic_access_mmio_be; 1265 mpic->reg_type = mpic_access_mmio_be;
1263 else 1266 else
1264 mpic->reg_type = mpic_access_mmio_le; 1267 mpic->reg_type = mpic_access_mmio_le;
@@ -1268,10 +1271,10 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1268 * only if the kernel includes DCR support. 1271 * only if the kernel includes DCR support.
1269 */ 1272 */
1270#ifdef CONFIG_PPC_DCR 1273#ifdef CONFIG_PPC_DCR
1271 if (flags & MPIC_USES_DCR) 1274 if (mpic->flags & MPIC_USES_DCR)
1272 mpic->reg_type = mpic_access_dcr; 1275 mpic->reg_type = mpic_access_dcr;
1273#else 1276#else
1274 BUG_ON(flags & MPIC_USES_DCR); 1277 BUG_ON(mpic->flags & MPIC_USES_DCR);
1275#endif 1278#endif
1276 1279
1277 /* Map the global registers */ 1280 /* Map the global registers */
@@ -1283,10 +1286,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1283 /* When using a device-node, reset requests are only honored if the MPIC 1286 /* When using a device-node, reset requests are only honored if the MPIC
1284 * is allowed to reset. 1287 * is allowed to reset.
1285 */ 1288 */
1286 if (of_get_property(mpic->node, "pic-no-reset", NULL)) 1289 if (!(mpic->flags & MPIC_NO_RESET)) {
1287 mpic->flags |= MPIC_NO_RESET;
1288
1289 if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) {
1290 printk(KERN_DEBUG "mpic: Resetting\n"); 1290 printk(KERN_DEBUG "mpic: Resetting\n");
1291 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1291 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1292 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1292 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
@@ -1297,31 +1297,17 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1297 } 1297 }
1298 1298
1299 /* CoreInt */ 1299 /* CoreInt */
1300 if (flags & MPIC_ENABLE_COREINT) 1300 if (mpic->flags & MPIC_ENABLE_COREINT)
1301 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1301 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1302 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1302 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1303 | MPIC_GREG_GCONF_COREINT); 1303 | MPIC_GREG_GCONF_COREINT);
1304 1304
1305 if (flags & MPIC_ENABLE_MCK) 1305 if (mpic->flags & MPIC_ENABLE_MCK)
1306 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), 1306 mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0),
1307 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) 1307 mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0))
1308 | MPIC_GREG_GCONF_MCK); 1308 | MPIC_GREG_GCONF_MCK);
1309 1309
1310 /* 1310 /*
1311 * Read feature register. For non-ISU MPICs, num sources as well. On
1312 * ISU MPICs, sources are counted as ISUs are added
1313 */
1314 greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
1315 if (isu_size == 0) {
1316 if (flags & MPIC_BROKEN_FRR_NIRQS)
1317 mpic->num_sources = mpic->irq_count;
1318 else
1319 mpic->num_sources =
1320 ((greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
1321 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1;
1322 }
1323
1324 /*
1325 * The MPIC driver will crash if there are more cores than we 1311 * The MPIC driver will crash if there are more cores than we
1326 * can initialize, so we may as well catch that problem here. 1312 * can initialize, so we may as well catch that problem here.
1327 */ 1313 */
@@ -1336,19 +1322,42 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1336 0x1000); 1322 0x1000);
1337 } 1323 }
1338 1324
1325 /*
1326 * Read feature register. For non-ISU MPICs, num sources as well. On
1327 * ISU MPICs, sources are counted as ISUs are added
1328 */
1329 greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0));
1330
1331 /*
1332 * By default, the last source number comes from the MPIC, but the
1333 * device-tree and board support code can override it on buggy hw.
1334 * If we get passed an isu_size (multi-isu MPIC) then we use that
1335 * as a default instead of the value read from the HW.
1336 */
1337 last_irq = (greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK)
1338 >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT;
1339 if (isu_size)
1340 last_irq = isu_size * MPIC_MAX_ISU - 1;
1341 of_property_read_u32(mpic->node, "last-interrupt-source", &last_irq);
1342 if (irq_count)
1343 last_irq = irq_count - 1;
1344
1339 /* Initialize main ISU if none provided */ 1345 /* Initialize main ISU if none provided */
1340 if (mpic->isu_size == 0) { 1346 if (!isu_size) {
1341 mpic->isu_size = mpic->num_sources; 1347 isu_size = last_irq + 1;
1348 mpic->num_sources = isu_size;
1342 mpic_map(mpic, mpic->paddr, &mpic->isus[0], 1349 mpic_map(mpic, mpic->paddr, &mpic->isus[0],
1343 MPIC_INFO(IRQ_BASE), MPIC_INFO(IRQ_STRIDE) * mpic->isu_size); 1350 MPIC_INFO(IRQ_BASE),
1351 MPIC_INFO(IRQ_STRIDE) * isu_size);
1344 } 1352 }
1353
1354 mpic->isu_size = isu_size;
1345 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1); 1355 mpic->isu_shift = 1 + __ilog2(mpic->isu_size - 1);
1346 mpic->isu_mask = (1 << mpic->isu_shift) - 1; 1356 mpic->isu_mask = (1 << mpic->isu_shift) - 1;
1347 1357
1348 mpic->irqhost = irq_alloc_host(mpic->node, IRQ_HOST_MAP_LINEAR, 1358 mpic->irqhost = irq_domain_add_linear(mpic->node,
1349 isu_size ? isu_size : mpic->num_sources, 1359 last_irq + 1,
1350 &mpic_host_ops, 1360 &mpic_host_ops, mpic);
1351 flags & MPIC_LARGE_VECTORS ? 2048 : 256);
1352 1361
1353 /* 1362 /*
1354 * FIXME: The code leaks the MPIC object and mappings here; this 1363 * FIXME: The code leaks the MPIC object and mappings here; this
@@ -1357,8 +1366,6 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1357 if (mpic->irqhost == NULL) 1366 if (mpic->irqhost == NULL)
1358 return NULL; 1367 return NULL;
1359 1368
1360 mpic->irqhost->host_data = mpic;
1361
1362 /* Display version */ 1369 /* Display version */
1363 switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { 1370 switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) {
1364 case 1: 1371 case 1:
@@ -1383,7 +1390,7 @@ struct mpic * __init mpic_alloc(struct device_node *node,
1383 mpic->next = mpics; 1390 mpic->next = mpics;
1384 mpics = mpic; 1391 mpics = mpic;
1385 1392
1386 if (!(flags & MPIC_SECONDARY)) { 1393 if (!(mpic->flags & MPIC_SECONDARY)) {
1387 mpic_primary = mpic; 1394 mpic_primary = mpic;
1388 irq_set_default_host(mpic->irqhost); 1395 irq_set_default_host(mpic->irqhost);
1389 } 1396 }
@@ -1450,10 +1457,6 @@ void __init mpic_init(struct mpic *mpic)
1450 (mpic->ipi_vecs[0] + i)); 1457 (mpic->ipi_vecs[0] + i));
1451 } 1458 }
1452 1459
1453 /* Initialize interrupt sources */
1454 if (mpic->irq_count == 0)
1455 mpic->irq_count = mpic->num_sources;
1456
1457 /* Do the HT PIC fixups on U3 broken mpic */ 1460 /* Do the HT PIC fixups on U3 broken mpic */
1458 DBG("MPIC flags: %x\n", mpic->flags); 1461 DBG("MPIC flags: %x\n", mpic->flags);
1459 if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) { 1462 if ((mpic->flags & MPIC_U3_HT_IRQS) && !(mpic->flags & MPIC_SECONDARY)) {
diff --git a/arch/powerpc/sysdev/mpic_msgr.c b/arch/powerpc/sysdev/mpic_msgr.c
new file mode 100644
index 000000000000..6e7fa386e76a
--- /dev/null
+++ b/arch/powerpc/sysdev/mpic_msgr.c
@@ -0,0 +1,282 @@
1/*
2 * Copyright 2011-2012, Meador Inge, Mentor Graphics Corporation.
3 *
4 * Some ideas based on un-pushed work done by Vivek Mahajan, Jason Jin, and
5 * Mingkai Hu from Freescale Semiconductor, Inc.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2 of the
10 * License.
11 *
12 */
13
14#include <linux/list.h>
15#include <linux/of_platform.h>
16#include <linux/errno.h>
17#include <asm/prom.h>
18#include <asm/hw_irq.h>
19#include <asm/ppc-pci.h>
20#include <asm/mpic_msgr.h>
21
22#define MPIC_MSGR_REGISTERS_PER_BLOCK 4
23#define MPIC_MSGR_STRIDE 0x10
24#define MPIC_MSGR_MER_OFFSET 0x100
25#define MSGR_INUSE 0
26#define MSGR_FREE 1
27
28static struct mpic_msgr **mpic_msgrs;
29static unsigned int mpic_msgr_count;
30
31static inline void _mpic_msgr_mer_write(struct mpic_msgr *msgr, u32 value)
32{
33 out_be32(msgr->mer, value);
34}
35
36static inline u32 _mpic_msgr_mer_read(struct mpic_msgr *msgr)
37{
38 return in_be32(msgr->mer);
39}
40
41static inline void _mpic_msgr_disable(struct mpic_msgr *msgr)
42{
43 u32 mer = _mpic_msgr_mer_read(msgr);
44
45 _mpic_msgr_mer_write(msgr, mer & ~(1 << msgr->num));
46}
47
48struct mpic_msgr *mpic_msgr_get(unsigned int reg_num)
49{
50 unsigned long flags;
51 struct mpic_msgr *msgr;
52
53 /* Assume busy until proven otherwise. */
54 msgr = ERR_PTR(-EBUSY);
55
56 if (reg_num >= mpic_msgr_count)
57 return ERR_PTR(-ENODEV);
58
59 raw_spin_lock_irqsave(&msgr->lock, flags);
60 if (mpic_msgrs[reg_num]->in_use == MSGR_FREE) {
61 msgr = mpic_msgrs[reg_num];
62 msgr->in_use = MSGR_INUSE;
63 }
64 raw_spin_unlock_irqrestore(&msgr->lock, flags);
65
66 return msgr;
67}
68EXPORT_SYMBOL_GPL(mpic_msgr_get);
69
70void mpic_msgr_put(struct mpic_msgr *msgr)
71{
72 unsigned long flags;
73
74 raw_spin_lock_irqsave(&msgr->lock, flags);
75 msgr->in_use = MSGR_FREE;
76 _mpic_msgr_disable(msgr);
77 raw_spin_unlock_irqrestore(&msgr->lock, flags);
78}
79EXPORT_SYMBOL_GPL(mpic_msgr_put);
80
81void mpic_msgr_enable(struct mpic_msgr *msgr)
82{
83 unsigned long flags;
84 u32 mer;
85
86 raw_spin_lock_irqsave(&msgr->lock, flags);
87 mer = _mpic_msgr_mer_read(msgr);
88 _mpic_msgr_mer_write(msgr, mer | (1 << msgr->num));
89 raw_spin_unlock_irqrestore(&msgr->lock, flags);
90}
91EXPORT_SYMBOL_GPL(mpic_msgr_enable);
92
93void mpic_msgr_disable(struct mpic_msgr *msgr)
94{
95 unsigned long flags;
96
97 raw_spin_lock_irqsave(&msgr->lock, flags);
98 _mpic_msgr_disable(msgr);
99 raw_spin_unlock_irqrestore(&msgr->lock, flags);
100}
101EXPORT_SYMBOL_GPL(mpic_msgr_disable);
102
103/* The following three functions are used to compute the order and number of
104 * the message register blocks. They are clearly very inefficent. However,
105 * they are called *only* a few times during device initialization.
106 */
107static unsigned int mpic_msgr_number_of_blocks(void)
108{
109 unsigned int count;
110 struct device_node *aliases;
111
112 count = 0;
113 aliases = of_find_node_by_name(NULL, "aliases");
114
115 if (aliases) {
116 char buf[32];
117
118 for (;;) {
119 snprintf(buf, sizeof(buf), "mpic-msgr-block%d", count);
120 if (!of_find_property(aliases, buf, NULL))
121 break;
122
123 count += 1;
124 }
125 }
126
127 return count;
128}
129
130static unsigned int mpic_msgr_number_of_registers(void)
131{
132 return mpic_msgr_number_of_blocks() * MPIC_MSGR_REGISTERS_PER_BLOCK;
133}
134
135static int mpic_msgr_block_number(struct device_node *node)
136{
137 struct device_node *aliases;
138 unsigned int index, number_of_blocks;
139 char buf[64];
140
141 number_of_blocks = mpic_msgr_number_of_blocks();
142 aliases = of_find_node_by_name(NULL, "aliases");
143 if (!aliases)
144 return -1;
145
146 for (index = 0; index < number_of_blocks; ++index) {
147 struct property *prop;
148
149 snprintf(buf, sizeof(buf), "mpic-msgr-block%d", index);
150 prop = of_find_property(aliases, buf, NULL);
151 if (node == of_find_node_by_path(prop->value))
152 break;
153 }
154
155 return index == number_of_blocks ? -1 : index;
156}
157
158/* The probe function for a single message register block.
159 */
160static __devinit int mpic_msgr_probe(struct platform_device *dev)
161{
162 void __iomem *msgr_block_addr;
163 int block_number;
164 struct resource rsrc;
165 unsigned int i;
166 unsigned int irq_index;
167 struct device_node *np = dev->dev.of_node;
168 unsigned int receive_mask;
169 const unsigned int *prop;
170
171 if (!np) {
172 dev_err(&dev->dev, "Device OF-Node is NULL");
173 return -EFAULT;
174 }
175
176 /* Allocate the message register array upon the first device
177 * registered.
178 */
179 if (!mpic_msgrs) {
180 mpic_msgr_count = mpic_msgr_number_of_registers();
181 dev_info(&dev->dev, "Found %d message registers\n",
182 mpic_msgr_count);
183
184 mpic_msgrs = kzalloc(sizeof(struct mpic_msgr) * mpic_msgr_count,
185 GFP_KERNEL);
186 if (!mpic_msgrs) {
187 dev_err(&dev->dev,
188 "No memory for message register blocks\n");
189 return -ENOMEM;
190 }
191 }
192 dev_info(&dev->dev, "Of-device full name %s\n", np->full_name);
193
194 /* IO map the message register block. */
195 of_address_to_resource(np, 0, &rsrc);
196 msgr_block_addr = ioremap(rsrc.start, rsrc.end - rsrc.start);
197 if (!msgr_block_addr) {
198 dev_err(&dev->dev, "Failed to iomap MPIC message registers");
199 return -EFAULT;
200 }
201
202 /* Ensure the block has a defined order. */
203 block_number = mpic_msgr_block_number(np);
204 if (block_number < 0) {
205 dev_err(&dev->dev,
206 "Failed to find message register block alias\n");
207 return -ENODEV;
208 }
209 dev_info(&dev->dev, "Setting up message register block %d\n",
210 block_number);
211
212 /* Grab the receive mask which specifies what registers can receive
213 * interrupts.
214 */
215 prop = of_get_property(np, "mpic-msgr-receive-mask", NULL);
216 receive_mask = (prop) ? *prop : 0xF;
217
218 /* Build up the appropriate message register data structures. */
219 for (i = 0, irq_index = 0; i < MPIC_MSGR_REGISTERS_PER_BLOCK; ++i) {
220 struct mpic_msgr *msgr;
221 unsigned int reg_number;
222
223 msgr = kzalloc(sizeof(struct mpic_msgr), GFP_KERNEL);
224 if (!msgr) {
225 dev_err(&dev->dev, "No memory for message register\n");
226 return -ENOMEM;
227 }
228
229 reg_number = block_number * MPIC_MSGR_REGISTERS_PER_BLOCK + i;
230 msgr->base = msgr_block_addr + i * MPIC_MSGR_STRIDE;
231 msgr->mer = msgr->base + MPIC_MSGR_MER_OFFSET;
232 msgr->in_use = MSGR_FREE;
233 msgr->num = i;
234 raw_spin_lock_init(&msgr->lock);
235
236 if (receive_mask & (1 << i)) {
237 struct resource irq;
238
239 if (of_irq_to_resource(np, irq_index, &irq) == NO_IRQ) {
240 dev_err(&dev->dev,
241 "Missing interrupt specifier");
242 kfree(msgr);
243 return -EFAULT;
244 }
245 msgr->irq = irq.start;
246 irq_index += 1;
247 } else {
248 msgr->irq = NO_IRQ;
249 }
250
251 mpic_msgrs[reg_number] = msgr;
252 mpic_msgr_disable(msgr);
253 dev_info(&dev->dev, "Register %d initialized: irq %d\n",
254 reg_number, msgr->irq);
255
256 }
257
258 return 0;
259}
260
261static const struct of_device_id mpic_msgr_ids[] = {
262 {
263 .compatible = "fsl,mpic-v3.1-msgr",
264 .data = NULL,
265 },
266 {}
267};
268
269static struct platform_driver mpic_msgr_driver = {
270 .driver = {
271 .name = "mpic-msgr",
272 .owner = THIS_MODULE,
273 .of_match_table = mpic_msgr_ids,
274 },
275 .probe = mpic_msgr_probe,
276};
277
278static __init int mpic_msgr_init(void)
279{
280 return platform_driver_register(&mpic_msgr_driver);
281}
282subsys_initcall(mpic_msgr_init);
diff --git a/arch/powerpc/sysdev/mpic_msi.c b/arch/powerpc/sysdev/mpic_msi.c
index 0f67cd79d481..bbf342c88314 100644
--- a/arch/powerpc/sysdev/mpic_msi.c
+++ b/arch/powerpc/sysdev/mpic_msi.c
@@ -32,7 +32,7 @@ void mpic_msi_reserve_hwirq(struct mpic *mpic, irq_hw_number_t hwirq)
32static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic) 32static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
33{ 33{
34 irq_hw_number_t hwirq; 34 irq_hw_number_t hwirq;
35 struct irq_host_ops *ops = mpic->irqhost->ops; 35 const struct irq_domain_ops *ops = mpic->irqhost->ops;
36 struct device_node *np; 36 struct device_node *np;
37 int flags, index, i; 37 int flags, index, i;
38 struct of_irq oirq; 38 struct of_irq oirq;
@@ -54,7 +54,7 @@ static int mpic_msi_reserve_u3_hwirqs(struct mpic *mpic)
54 for (i = 100; i < 105; i++) 54 for (i = 100; i < 105; i++)
55 msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); 55 msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
56 56
57 for (i = 124; i < mpic->irq_count; i++) 57 for (i = 124; i < mpic->num_sources; i++)
58 msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i); 58 msi_bitmap_reserve_hwirq(&mpic->msi_bitmap, i);
59 59
60 60
@@ -83,7 +83,7 @@ int mpic_msi_init_allocator(struct mpic *mpic)
83{ 83{
84 int rc; 84 int rc;
85 85
86 rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->irq_count, 86 rc = msi_bitmap_alloc(&mpic->msi_bitmap, mpic->num_sources,
87 mpic->irqhost->of_node); 87 mpic->irqhost->of_node);
88 if (rc) 88 if (rc)
89 return rc; 89 return rc;
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c
index 14d130268e7a..8848e99a83f2 100644
--- a/arch/powerpc/sysdev/mv64x60_pic.c
+++ b/arch/powerpc/sysdev/mv64x60_pic.c
@@ -70,7 +70,7 @@ static u32 mv64x60_cached_low_mask;
70static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS; 70static u32 mv64x60_cached_high_mask = MV64X60_HIGH_GPP_GROUPS;
71static u32 mv64x60_cached_gpp_mask; 71static u32 mv64x60_cached_gpp_mask;
72 72
73static struct irq_host *mv64x60_irq_host; 73static struct irq_domain *mv64x60_irq_host;
74 74
75/* 75/*
76 * mv64x60_chip_low functions 76 * mv64x60_chip_low functions
@@ -208,7 +208,7 @@ static struct irq_chip *mv64x60_chips[] = {
208 [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp, 208 [MV64x60_LEVEL1_GPP] = &mv64x60_chip_gpp,
209}; 209};
210 210
211static int mv64x60_host_map(struct irq_host *h, unsigned int virq, 211static int mv64x60_host_map(struct irq_domain *h, unsigned int virq,
212 irq_hw_number_t hwirq) 212 irq_hw_number_t hwirq)
213{ 213{
214 int level1; 214 int level1;
@@ -223,7 +223,7 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq,
223 return 0; 223 return 0;
224} 224}
225 225
226static struct irq_host_ops mv64x60_host_ops = { 226static struct irq_domain_ops mv64x60_host_ops = {
227 .map = mv64x60_host_map, 227 .map = mv64x60_host_map,
228}; 228};
229 229
@@ -250,9 +250,8 @@ void __init mv64x60_init_irq(void)
250 paddr = of_translate_address(np, reg); 250 paddr = of_translate_address(np, reg);
251 mv64x60_irq_reg_base = ioremap(paddr, reg[1]); 251 mv64x60_irq_reg_base = ioremap(paddr, reg[1]);
252 252
253 mv64x60_irq_host = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, 253 mv64x60_irq_host = irq_domain_add_linear(np, MV64x60_NUM_IRQS,
254 MV64x60_NUM_IRQS, 254 &mv64x60_host_ops, NULL);
255 &mv64x60_host_ops, MV64x60_NUM_IRQS);
256 255
257 spin_lock_irqsave(&mv64x60_lock, flags); 256 spin_lock_irqsave(&mv64x60_lock, flags);
258 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK, 257 out_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK,
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index 4f05f7542346..56e8b3c3c890 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -1050,6 +1050,74 @@ static struct ppc4xx_pciex_hwops ppc460ex_pcie_hwops __initdata =
1050 .check_link = ppc4xx_pciex_check_link_sdr, 1050 .check_link = ppc4xx_pciex_check_link_sdr,
1051}; 1051};
1052 1052
1053static int __init apm821xx_pciex_core_init(struct device_node *np)
1054{
1055 /* Return the number of pcie port */
1056 return 1;
1057}
1058
1059static int apm821xx_pciex_init_port_hw(struct ppc4xx_pciex_port *port)
1060{
1061 u32 val;
1062
1063 /*
1064 * Do a software reset on PCIe ports.
1065 * This code is to fix the issue that pci drivers doesn't re-assign
1066 * bus number for PCIE devices after Uboot
1067 * scanned and configured all the buses (eg. PCIE NIC IntelPro/1000
1068 * PT quad port, SAS LSI 1064E)
1069 */
1070
1071 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x0);
1072 mdelay(10);
1073
1074 if (port->endpoint)
1075 val = PTYPE_LEGACY_ENDPOINT << 20;
1076 else
1077 val = PTYPE_ROOT_PORT << 20;
1078
1079 val |= LNKW_X1 << 12;
1080
1081 mtdcri(SDR0, port->sdr_base + PESDRn_DLPSET, val);
1082 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET1, 0x00000000);
1083 mtdcri(SDR0, port->sdr_base + PESDRn_UTLSET2, 0x01010000);
1084
1085 mtdcri(SDR0, PESDR0_460EX_L0CDRCTL, 0x00003230);
1086 mtdcri(SDR0, PESDR0_460EX_L0DRV, 0x00000130);
1087 mtdcri(SDR0, PESDR0_460EX_L0CLK, 0x00000006);
1088
1089 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x10000000);
1090 mdelay(50);
1091 mtdcri(SDR0, PESDR0_460EX_PHY_CTL_RST, 0x30000000);
1092
1093 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1094 mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) |
1095 (PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTPYN));
1096
1097 /* Poll for PHY reset */
1098 val = PESDR0_460EX_RSTSTA - port->sdr_base;
1099 if (ppc4xx_pciex_wait_on_sdr(port, val, 0x1, 1, 100)) {
1100 printk(KERN_WARNING "%s: PCIE: Can't reset PHY\n", __func__);
1101 return -EBUSY;
1102 } else {
1103 mtdcri(SDR0, port->sdr_base + PESDRn_RCSSET,
1104 (mfdcri(SDR0, port->sdr_base + PESDRn_RCSSET) &
1105 ~(PESDRx_RCSSET_RSTGU | PESDRx_RCSSET_RSTDL)) |
1106 PESDRx_RCSSET_RSTPYN);
1107
1108 port->has_ibpre = 1;
1109 return 0;
1110 }
1111}
1112
1113static struct ppc4xx_pciex_hwops apm821xx_pcie_hwops __initdata = {
1114 .want_sdr = true,
1115 .core_init = apm821xx_pciex_core_init,
1116 .port_init_hw = apm821xx_pciex_init_port_hw,
1117 .setup_utl = ppc460ex_pciex_init_utl,
1118 .check_link = ppc4xx_pciex_check_link_sdr,
1119};
1120
1053static int __init ppc460sx_pciex_core_init(struct device_node *np) 1121static int __init ppc460sx_pciex_core_init(struct device_node *np)
1054{ 1122{
1055 /* HSS drive amplitude */ 1123 /* HSS drive amplitude */
@@ -1362,6 +1430,8 @@ static int __init ppc4xx_pciex_check_core_init(struct device_node *np)
1362 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops; 1430 ppc4xx_pciex_hwops = &ppc460ex_pcie_hwops;
1363 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx")) 1431 if (of_device_is_compatible(np, "ibm,plb-pciex-460sx"))
1364 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops; 1432 ppc4xx_pciex_hwops = &ppc460sx_pcie_hwops;
1433 if (of_device_is_compatible(np, "ibm,plb-pciex-apm821xx"))
1434 ppc4xx_pciex_hwops = &apm821xx_pcie_hwops;
1365#endif /* CONFIG_44x */ 1435#endif /* CONFIG_44x */
1366#ifdef CONFIG_40x 1436#ifdef CONFIG_40x
1367 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex")) 1437 if (of_device_is_compatible(np, "ibm,plb-pciex-405ex"))
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index 73034bd203c4..2fba6ef2f95e 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -245,13 +245,13 @@ static struct irq_chip qe_ic_irq_chip = {
245 .irq_mask_ack = qe_ic_mask_irq, 245 .irq_mask_ack = qe_ic_mask_irq,
246}; 246};
247 247
248static int qe_ic_host_match(struct irq_host *h, struct device_node *node) 248static int qe_ic_host_match(struct irq_domain *h, struct device_node *node)
249{ 249{
250 /* Exact match, unless qe_ic node is NULL */ 250 /* Exact match, unless qe_ic node is NULL */
251 return h->of_node == NULL || h->of_node == node; 251 return h->of_node == NULL || h->of_node == node;
252} 252}
253 253
254static int qe_ic_host_map(struct irq_host *h, unsigned int virq, 254static int qe_ic_host_map(struct irq_domain *h, unsigned int virq,
255 irq_hw_number_t hw) 255 irq_hw_number_t hw)
256{ 256{
257 struct qe_ic *qe_ic = h->host_data; 257 struct qe_ic *qe_ic = h->host_data;
@@ -272,23 +272,10 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq,
272 return 0; 272 return 0;
273} 273}
274 274
275static int qe_ic_host_xlate(struct irq_host *h, struct device_node *ct, 275static struct irq_domain_ops qe_ic_host_ops = {
276 const u32 * intspec, unsigned int intsize,
277 irq_hw_number_t * out_hwirq,
278 unsigned int *out_flags)
279{
280 *out_hwirq = intspec[0];
281 if (intsize > 1)
282 *out_flags = intspec[1];
283 else
284 *out_flags = IRQ_TYPE_NONE;
285 return 0;
286}
287
288static struct irq_host_ops qe_ic_host_ops = {
289 .match = qe_ic_host_match, 276 .match = qe_ic_host_match,
290 .map = qe_ic_host_map, 277 .map = qe_ic_host_map,
291 .xlate = qe_ic_host_xlate, 278 .xlate = irq_domain_xlate_onetwocell,
292}; 279};
293 280
294/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */ 281/* Return an interrupt vector or NO_IRQ if no interrupt is pending. */
@@ -339,8 +326,8 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
339 if (qe_ic == NULL) 326 if (qe_ic == NULL)
340 return; 327 return;
341 328
342 qe_ic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 329 qe_ic->irqhost = irq_domain_add_linear(node, NR_QE_IC_INTS,
343 NR_QE_IC_INTS, &qe_ic_host_ops, 0); 330 &qe_ic_host_ops, qe_ic);
344 if (qe_ic->irqhost == NULL) { 331 if (qe_ic->irqhost == NULL) {
345 kfree(qe_ic); 332 kfree(qe_ic);
346 return; 333 return;
@@ -348,7 +335,6 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags,
348 335
349 qe_ic->regs = ioremap(res.start, resource_size(&res)); 336 qe_ic->regs = ioremap(res.start, resource_size(&res));
350 337
351 qe_ic->irqhost->host_data = qe_ic;
352 qe_ic->hc_irq = qe_ic_irq_chip; 338 qe_ic->hc_irq = qe_ic_irq_chip;
353 339
354 qe_ic->virq_high = irq_of_parse_and_map(node, 0); 340 qe_ic->virq_high = irq_of_parse_and_map(node, 0);
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.h b/arch/powerpc/sysdev/qe_lib/qe_ic.h
index c1361d005a8a..c327872ed35c 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.h
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.h
@@ -79,7 +79,7 @@ struct qe_ic {
79 volatile u32 __iomem *regs; 79 volatile u32 __iomem *regs;
80 80
81 /* The remapper for this QEIC */ 81 /* The remapper for this QEIC */
82 struct irq_host *irqhost; 82 struct irq_domain *irqhost;
83 83
84 /* The "linux" controller struct */ 84 /* The "linux" controller struct */
85 struct irq_chip hc_irq; 85 struct irq_chip hc_irq;
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c
index 4d18658116e5..188012c58f7f 100644
--- a/arch/powerpc/sysdev/tsi108_pci.c
+++ b/arch/powerpc/sysdev/tsi108_pci.c
@@ -51,7 +51,7 @@
51u32 tsi108_pci_cfg_base; 51u32 tsi108_pci_cfg_base;
52static u32 tsi108_pci_cfg_phys; 52static u32 tsi108_pci_cfg_phys;
53u32 tsi108_csr_vir_base; 53u32 tsi108_csr_vir_base;
54static struct irq_host *pci_irq_host; 54static struct irq_domain *pci_irq_host;
55 55
56extern u32 get_vir_csrbase(void); 56extern u32 get_vir_csrbase(void);
57extern u32 tsi108_read_reg(u32 reg_offset); 57extern u32 tsi108_read_reg(u32 reg_offset);
@@ -376,7 +376,7 @@ static struct irq_chip tsi108_pci_irq = {
376 .irq_unmask = tsi108_pci_irq_unmask, 376 .irq_unmask = tsi108_pci_irq_unmask,
377}; 377};
378 378
379static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, 379static int pci_irq_host_xlate(struct irq_domain *h, struct device_node *ct,
380 const u32 *intspec, unsigned int intsize, 380 const u32 *intspec, unsigned int intsize,
381 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 381 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
382{ 382{
@@ -385,7 +385,7 @@ static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct,
385 return 0; 385 return 0;
386} 386}
387 387
388static int pci_irq_host_map(struct irq_host *h, unsigned int virq, 388static int pci_irq_host_map(struct irq_domain *h, unsigned int virq,
389 irq_hw_number_t hw) 389 irq_hw_number_t hw)
390{ unsigned int irq; 390{ unsigned int irq;
391 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); 391 DBG("%s(%d, 0x%lx)\n", __func__, virq, hw);
@@ -397,7 +397,7 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq,
397 return 0; 397 return 0;
398} 398}
399 399
400static struct irq_host_ops pci_irq_host_ops = { 400static struct irq_domain_ops pci_irq_domain_ops = {
401 .map = pci_irq_host_map, 401 .map = pci_irq_host_map,
402 .xlate = pci_irq_host_xlate, 402 .xlate = pci_irq_host_xlate,
403}; 403};
@@ -419,10 +419,9 @@ void __init tsi108_pci_int_init(struct device_node *node)
419{ 419{
420 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n"); 420 DBG("Tsi108_pci_int_init: initializing PCI interrupts\n");
421 421
422 pci_irq_host = irq_alloc_host(node, IRQ_HOST_MAP_LEGACY, 422 pci_irq_host = irq_domain_add_legacy_isa(node, &pci_irq_domain_ops, NULL);
423 0, &pci_irq_host_ops, 0);
424 if (pci_irq_host == NULL) { 423 if (pci_irq_host == NULL) {
425 printk(KERN_ERR "pci_irq_host: failed to allocate irq host !\n"); 424 printk(KERN_ERR "pci_irq_host: failed to allocate irq domain!\n");
426 return; 425 return;
427 } 426 }
428 427
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 063c901b1265..92033936a8f7 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -49,7 +49,7 @@ struct uic {
49 raw_spinlock_t lock; 49 raw_spinlock_t lock;
50 50
51 /* The remapper for this UIC */ 51 /* The remapper for this UIC */
52 struct irq_host *irqhost; 52 struct irq_domain *irqhost;
53}; 53};
54 54
55static void uic_unmask_irq(struct irq_data *d) 55static void uic_unmask_irq(struct irq_data *d)
@@ -174,7 +174,7 @@ static struct irq_chip uic_irq_chip = {
174 .irq_set_type = uic_set_irq_type, 174 .irq_set_type = uic_set_irq_type,
175}; 175};
176 176
177static int uic_host_map(struct irq_host *h, unsigned int virq, 177static int uic_host_map(struct irq_domain *h, unsigned int virq,
178 irq_hw_number_t hw) 178 irq_hw_number_t hw)
179{ 179{
180 struct uic *uic = h->host_data; 180 struct uic *uic = h->host_data;
@@ -190,21 +190,9 @@ static int uic_host_map(struct irq_host *h, unsigned int virq,
190 return 0; 190 return 0;
191} 191}
192 192
193static int uic_host_xlate(struct irq_host *h, struct device_node *ct, 193static struct irq_domain_ops uic_host_ops = {
194 const u32 *intspec, unsigned int intsize,
195 irq_hw_number_t *out_hwirq, unsigned int *out_type)
196
197{
198 /* UIC intspecs must have 2 cells */
199 BUG_ON(intsize != 2);
200 *out_hwirq = intspec[0];
201 *out_type = intspec[1];
202 return 0;
203}
204
205static struct irq_host_ops uic_host_ops = {
206 .map = uic_host_map, 194 .map = uic_host_map,
207 .xlate = uic_host_xlate, 195 .xlate = irq_domain_xlate_twocell,
208}; 196};
209 197
210void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) 198void uic_irq_cascade(unsigned int virq, struct irq_desc *desc)
@@ -270,13 +258,11 @@ static struct uic * __init uic_init_one(struct device_node *node)
270 } 258 }
271 uic->dcrbase = *dcrreg; 259 uic->dcrbase = *dcrreg;
272 260
273 uic->irqhost = irq_alloc_host(node, IRQ_HOST_MAP_LINEAR, 261 uic->irqhost = irq_domain_add_linear(node, NR_UIC_INTS, &uic_host_ops,
274 NR_UIC_INTS, &uic_host_ops, -1); 262 uic);
275 if (! uic->irqhost) 263 if (! uic->irqhost)
276 return NULL; /* FIXME: panic? */ 264 return NULL; /* FIXME: panic? */
277 265
278 uic->irqhost->host_data = uic;
279
280 /* Start with all interrupts disabled, level and non-critical */ 266 /* Start with all interrupts disabled, level and non-critical */
281 mtdcr(uic->dcrbase + UIC_ER, 0); 267 mtdcr(uic->dcrbase + UIC_ER, 0);
282 mtdcr(uic->dcrbase + UIC_CR, 0); 268 mtdcr(uic->dcrbase + UIC_CR, 0);
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index d72eda6a4c05..ea5e204e3450 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -40,7 +40,7 @@ unsigned int xics_interrupt_server_size = 8;
40 40
41DEFINE_PER_CPU(struct xics_cppr, xics_cppr); 41DEFINE_PER_CPU(struct xics_cppr, xics_cppr);
42 42
43struct irq_host *xics_host; 43struct irq_domain *xics_host;
44 44
45static LIST_HEAD(ics_list); 45static LIST_HEAD(ics_list);
46 46
@@ -212,16 +212,16 @@ void xics_migrate_irqs_away(void)
212 /* We can't set affinity on ISA interrupts */ 212 /* We can't set affinity on ISA interrupts */
213 if (virq < NUM_ISA_INTERRUPTS) 213 if (virq < NUM_ISA_INTERRUPTS)
214 continue; 214 continue;
215 if (!virq_is_host(virq, xics_host))
216 continue;
217 irq = (unsigned int)virq_to_hw(virq);
218 /* We need to get IPIs still. */
219 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
220 continue;
221 desc = irq_to_desc(virq); 215 desc = irq_to_desc(virq);
222 /* We only need to migrate enabled IRQS */ 216 /* We only need to migrate enabled IRQS */
223 if (!desc || !desc->action) 217 if (!desc || !desc->action)
224 continue; 218 continue;
219 if (desc->irq_data.domain != xics_host)
220 continue;
221 irq = desc->irq_data.hwirq;
222 /* We need to get IPIs still. */
223 if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS)
224 continue;
225 chip = irq_desc_get_chip(desc); 225 chip = irq_desc_get_chip(desc);
226 if (!chip || !chip->irq_set_affinity) 226 if (!chip || !chip->irq_set_affinity)
227 continue; 227 continue;
@@ -301,7 +301,7 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask,
301} 301}
302#endif /* CONFIG_SMP */ 302#endif /* CONFIG_SMP */
303 303
304static int xics_host_match(struct irq_host *h, struct device_node *node) 304static int xics_host_match(struct irq_domain *h, struct device_node *node)
305{ 305{
306 struct ics *ics; 306 struct ics *ics;
307 307
@@ -323,7 +323,7 @@ static struct irq_chip xics_ipi_chip = {
323 .irq_unmask = xics_ipi_unmask, 323 .irq_unmask = xics_ipi_unmask,
324}; 324};
325 325
326static int xics_host_map(struct irq_host *h, unsigned int virq, 326static int xics_host_map(struct irq_domain *h, unsigned int virq,
327 irq_hw_number_t hw) 327 irq_hw_number_t hw)
328{ 328{
329 struct ics *ics; 329 struct ics *ics;
@@ -351,7 +351,7 @@ static int xics_host_map(struct irq_host *h, unsigned int virq,
351 return -EINVAL; 351 return -EINVAL;
352} 352}
353 353
354static int xics_host_xlate(struct irq_host *h, struct device_node *ct, 354static int xics_host_xlate(struct irq_domain *h, struct device_node *ct,
355 const u32 *intspec, unsigned int intsize, 355 const u32 *intspec, unsigned int intsize,
356 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 356 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
357 357
@@ -366,7 +366,7 @@ static int xics_host_xlate(struct irq_host *h, struct device_node *ct,
366 return 0; 366 return 0;
367} 367}
368 368
369static struct irq_host_ops xics_host_ops = { 369static struct irq_domain_ops xics_host_ops = {
370 .match = xics_host_match, 370 .match = xics_host_match,
371 .map = xics_host_map, 371 .map = xics_host_map,
372 .xlate = xics_host_xlate, 372 .xlate = xics_host_xlate,
@@ -374,8 +374,7 @@ static struct irq_host_ops xics_host_ops = {
374 374
375static void __init xics_init_host(void) 375static void __init xics_init_host(void)
376{ 376{
377 xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, 377 xics_host = irq_domain_add_tree(NULL, &xics_host_ops, NULL);
378 XICS_IRQ_SPURIOUS);
379 BUG_ON(xics_host == NULL); 378 BUG_ON(xics_host == NULL);
380 irq_set_default_host(xics_host); 379 irq_set_default_host(xics_host);
381} 380}
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c
index 6183799754af..8d73c3c0bee6 100644
--- a/arch/powerpc/sysdev/xilinx_intc.c
+++ b/arch/powerpc/sysdev/xilinx_intc.c
@@ -40,7 +40,7 @@
40#define XINTC_IVR 24 /* Interrupt Vector */ 40#define XINTC_IVR 24 /* Interrupt Vector */
41#define XINTC_MER 28 /* Master Enable */ 41#define XINTC_MER 28 /* Master Enable */
42 42
43static struct irq_host *master_irqhost; 43static struct irq_domain *master_irqhost;
44 44
45#define XILINX_INTC_MAXIRQS (32) 45#define XILINX_INTC_MAXIRQS (32)
46 46
@@ -141,7 +141,7 @@ static struct irq_chip xilinx_intc_edge_irqchip = {
141/** 141/**
142 * xilinx_intc_xlate - translate virq# from device tree interrupts property 142 * xilinx_intc_xlate - translate virq# from device tree interrupts property
143 */ 143 */
144static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, 144static int xilinx_intc_xlate(struct irq_domain *h, struct device_node *ct,
145 const u32 *intspec, unsigned int intsize, 145 const u32 *intspec, unsigned int intsize,
146 irq_hw_number_t *out_hwirq, 146 irq_hw_number_t *out_hwirq,
147 unsigned int *out_flags) 147 unsigned int *out_flags)
@@ -161,7 +161,7 @@ static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct,
161 161
162 return 0; 162 return 0;
163} 163}
164static int xilinx_intc_map(struct irq_host *h, unsigned int virq, 164static int xilinx_intc_map(struct irq_domain *h, unsigned int virq,
165 irq_hw_number_t irq) 165 irq_hw_number_t irq)
166{ 166{
167 irq_set_chip_data(virq, h->host_data); 167 irq_set_chip_data(virq, h->host_data);
@@ -177,15 +177,15 @@ static int xilinx_intc_map(struct irq_host *h, unsigned int virq,
177 return 0; 177 return 0;
178} 178}
179 179
180static struct irq_host_ops xilinx_intc_ops = { 180static struct irq_domain_ops xilinx_intc_ops = {
181 .map = xilinx_intc_map, 181 .map = xilinx_intc_map,
182 .xlate = xilinx_intc_xlate, 182 .xlate = xilinx_intc_xlate,
183}; 183};
184 184
185struct irq_host * __init 185struct irq_domain * __init
186xilinx_intc_init(struct device_node *np) 186xilinx_intc_init(struct device_node *np)
187{ 187{
188 struct irq_host * irq; 188 struct irq_domain * irq;
189 void * regs; 189 void * regs;
190 190
191 /* Find and map the intc registers */ 191 /* Find and map the intc registers */
@@ -200,12 +200,11 @@ xilinx_intc_init(struct device_node *np)
200 out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */ 200 out_be32(regs + XINTC_IAR, ~(u32) 0); /* Acknowledge pending irqs */
201 out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */ 201 out_be32(regs + XINTC_MER, 0x3UL); /* Turn on the Master Enable. */
202 202
203 /* Allocate and initialize an irq_host structure. */ 203 /* Allocate and initialize an irq_domain structure. */
204 irq = irq_alloc_host(np, IRQ_HOST_MAP_LINEAR, XILINX_INTC_MAXIRQS, 204 irq = irq_domain_add_linear(np, XILINX_INTC_MAXIRQS, &xilinx_intc_ops,
205 &xilinx_intc_ops, -1); 205 regs);
206 if (!irq) 206 if (!irq)
207 panic(__FILE__ ": Cannot allocate IRQ host\n"); 207 panic(__FILE__ ": Cannot allocate IRQ host\n");
208 irq->host_data = regs;
209 208
210 return irq; 209 return irq;
211} 210}
diff --git a/arch/powerpc/xmon/ppc-opc.c b/arch/powerpc/xmon/ppc-opc.c
index af3780e52e76..6845e91ba04a 100644
--- a/arch/powerpc/xmon/ppc-opc.c
+++ b/arch/powerpc/xmon/ppc-opc.c
@@ -22,6 +22,7 @@
22 22
23#include <linux/stddef.h> 23#include <linux/stddef.h>
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/bug.h>
25#include "nonstdio.h" 26#include "nonstdio.h"
26#include "ppc.h" 27#include "ppc.h"
27 28
diff --git a/arch/powerpc/xmon/spu-opc.c b/arch/powerpc/xmon/spu-opc.c
index 530df3d6d7b2..7d37597c4bcd 100644
--- a/arch/powerpc/xmon/spu-opc.c
+++ b/arch/powerpc/xmon/spu-opc.c
@@ -19,6 +19,7 @@
19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ 19 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */
20 20
21#include <linux/kernel.h> 21#include <linux/kernel.h>
22#include <linux/bug.h>
22#include "spu.h" 23#include "spu.h"
23 24
24/* This file holds the Spu opcode table */ 25/* This file holds the Spu opcode table */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index cb95eea74d3d..68a9cbbab450 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -39,7 +39,6 @@
39#include <asm/irq_regs.h> 39#include <asm/irq_regs.h>
40#include <asm/spu.h> 40#include <asm/spu.h>
41#include <asm/spu_priv1.h> 41#include <asm/spu_priv1.h>
42#include <asm/firmware.h>
43#include <asm/setjmp.h> 42#include <asm/setjmp.h>
44#include <asm/reg.h> 43#include <asm/reg.h>
45 44
@@ -1437,7 +1436,8 @@ static void excprint(struct pt_regs *fp)
1437 1436
1438 printf(" current = 0x%lx\n", current); 1437 printf(" current = 0x%lx\n", current);
1439#ifdef CONFIG_PPC64 1438#ifdef CONFIG_PPC64
1440 printf(" paca = 0x%lx\n", get_paca()); 1439 printf(" paca = 0x%lx\t softe: %d\t irq_happened: 0x%02x\n",
1440 local_paca, local_paca->soft_enabled, local_paca->irq_happened);
1441#endif 1441#endif
1442 if (current) { 1442 if (current) {
1443 printf(" pid = %ld, comm = %s\n", 1443 printf(" pid = %ld, comm = %s\n",
@@ -1634,25 +1634,6 @@ static void super_regs(void)
1634 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2)); 1634 mfspr(SPRN_DEC), mfspr(SPRN_SPRG2));
1635 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3)); 1635 printf("sp = "REG" sprg3= "REG"\n", sp, mfspr(SPRN_SPRG3));
1636 printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR)); 1636 printf("toc = "REG" dar = "REG"\n", toc, mfspr(SPRN_DAR));
1637#ifdef CONFIG_PPC_ISERIES
1638 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
1639 struct paca_struct *ptrPaca;
1640 struct lppaca *ptrLpPaca;
1641
1642 /* Dump out relevant Paca data areas. */
1643 printf("Paca: \n");
1644 ptrPaca = get_paca();
1645
1646 printf(" Local Processor Control Area (LpPaca): \n");
1647 ptrLpPaca = ptrPaca->lppaca_ptr;
1648 printf(" Saved Srr0=%.16lx Saved Srr1=%.16lx \n",
1649 ptrLpPaca->saved_srr0, ptrLpPaca->saved_srr1);
1650 printf(" Saved Gpr3=%.16lx Saved Gpr4=%.16lx \n",
1651 ptrLpPaca->saved_gpr3, ptrLpPaca->saved_gpr4);
1652 printf(" Saved Gpr5=%.16lx \n",
1653 ptrLpPaca->gpr5_dword.saved_gpr5);
1654 }
1655#endif
1656 1637
1657 return; 1638 return;
1658 } 1639 }
@@ -2644,7 +2625,7 @@ static void dump_slb(void)
2644static void dump_stab(void) 2625static void dump_stab(void)
2645{ 2626{
2646 int i; 2627 int i;
2647 unsigned long *tmp = (unsigned long *)get_paca()->stab_addr; 2628 unsigned long *tmp = (unsigned long *)local_paca->stab_addr;
2648 2629
2649 printf("Segment table contents of cpu %x\n", smp_processor_id()); 2630 printf("Segment table contents of cpu %x\n", smp_processor_id());
2650 2631
@@ -2855,10 +2836,6 @@ static void dump_tlb_book3e(void)
2855 2836
2856static void xmon_init(int enable) 2837static void xmon_init(int enable)
2857{ 2838{
2858#ifdef CONFIG_PPC_ISERIES
2859 if (firmware_has_feature(FW_FEATURE_ISERIES))
2860 return;
2861#endif
2862 if (enable) { 2839 if (enable) {
2863 __debugger = xmon; 2840 __debugger = xmon;
2864 __debugger_ipi = xmon_ipi; 2841 __debugger_ipi = xmon_ipi;
@@ -2895,10 +2872,6 @@ static struct sysrq_key_op sysrq_xmon_op = {
2895 2872
2896static int __init setup_xmon_sysrq(void) 2873static int __init setup_xmon_sysrq(void)
2897{ 2874{
2898#ifdef CONFIG_PPC_ISERIES
2899 if (firmware_has_feature(FW_FEATURE_ISERIES))
2900 return 0;
2901#endif
2902 register_sysrq_key('x', &sysrq_xmon_op); 2875 register_sysrq_key('x', &sysrq_xmon_op);
2903 return 0; 2876 return 0;
2904} 2877}