aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/CodingStyle70
-rw-r--r--Documentation/IPMI.txt74
-rw-r--r--Documentation/arm/memory.txt2
-rw-r--r--Documentation/cgroups/cgroups.txt4
-rw-r--r--Documentation/devicetree/bindings/ata/sata_rcar.txt11
-rw-r--r--Documentation/devicetree/bindings/clock/qoriq-clock.txt14
-rw-r--r--Documentation/devicetree/bindings/clock/st/st,flexgen.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/atmel-xdma.txt54
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/qcom_bam_dma.txt4
-rw-r--r--Documentation/devicetree/bindings/dma/sun6i-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt55
-rw-r--r--Documentation/devicetree/bindings/iommu/rockchip,iommu.txt26
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt37
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/bmips.txt8
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt11
-rw-r--r--Documentation/devicetree/bindings/mips/brcm/usb.txt11
-rw-r--r--Documentation/devicetree/bindings/mips/cpu_irq.txt4
-rw-r--r--Documentation/devicetree/bindings/powerpc/fsl/fman.txt534
-rw-r--r--Documentation/devicetree/bindings/rtc/rtc-opal.txt16
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/bman-portals.txt56
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/bman.txt125
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/qman-portals.txt154
-rw-r--r--Documentation/devicetree/bindings/soc/fsl/qman.txt165
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/dmaengine/client.txt (renamed from Documentation/dmaengine.txt)0
-rw-r--r--Documentation/dmaengine/dmatest.txt (renamed from Documentation/dmatest.txt)0
-rw-r--r--Documentation/dmaengine/provider.txt366
-rw-r--r--Documentation/email-clients.txt11
-rw-r--r--Documentation/filesystems/proc.txt2
-rw-r--r--Documentation/input/xpad.txt123
-rw-r--r--Documentation/kernel-parameters.txt7
-rw-r--r--Documentation/kobject.txt2
-rw-r--r--Documentation/kselftest.txt (renamed from tools/testing/selftests/README.txt)30
-rw-r--r--Documentation/mailbox.txt2
-rw-r--r--Documentation/mic/mpssd/Makefile2
-rw-r--r--Documentation/power/runtime_pm.txt10
-rw-r--r--Documentation/power/suspend-and-interrupts.txt2
-rw-r--r--Documentation/power/userland-swsusp.txt2
-rw-r--r--Documentation/ramoops.txt13
-rw-r--r--Documentation/s390/Debugging390.txt462
-rw-r--r--Documentation/scsi/scsi_eh.txt4
-rw-r--r--Documentation/usb/gadget_configfs.txt2
-rw-r--r--Documentation/vm/hugetlbpage.txt4
-rw-r--r--MAINTAINERS36
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/bcm63138.dtsi2
-rw-r--r--arch/arm/common/sa1111.c13
-rw-r--r--arch/arm/include/asm/cacheflush.h10
-rw-r--r--arch/arm/include/asm/device.h1
-rw-r--r--arch/arm/include/asm/dma-mapping.h7
-rw-r--r--arch/arm/include/asm/fixmap.h31
-rw-r--r--arch/arm/include/asm/hw_irq.h1
-rw-r--r--arch/arm/include/asm/mcpm.h17
-rw-r--r--arch/arm/include/asm/percpu.h4
-rw-r--r--arch/arm/include/asm/pgalloc.h10
-rw-r--r--arch/arm/include/asm/pgtable-2level-hwdef.h2
-rw-r--r--arch/arm/include/asm/pgtable-3level-hwdef.h1
-rw-r--r--arch/arm/include/asm/pgtable.h62
-rw-r--r--arch/arm/include/asm/ptrace.h5
-rw-r--r--arch/arm/include/asm/thread_info.h9
-rw-r--r--arch/arm/include/asm/vfp.h5
-rw-r--r--arch/arm/include/asm/xen/page-coherent.h66
-rw-r--r--arch/arm/include/asm/xen/page.h4
-rw-r--r--arch/arm/kernel/Makefile4
-rw-r--r--arch/arm/kernel/atags_compat.c6
-rw-r--r--arch/arm/kernel/atags_parse.c5
-rw-r--r--arch/arm/kernel/atags_proc.c4
-rw-r--r--arch/arm/kernel/bios32.c2
-rw-r--r--arch/arm/kernel/dma-isa.c4
-rw-r--r--arch/arm/kernel/dma.c26
-rw-r--r--arch/arm/kernel/entry-common.S235
-rw-r--r--arch/arm/kernel/entry-ftrace.S243
-rw-r--r--arch/arm/kernel/etm.c12
-rw-r--r--arch/arm/kernel/fiq.c2
-rw-r--r--arch/arm/kernel/ftrace.c19
-rw-r--r--arch/arm/kernel/io.c5
-rw-r--r--arch/arm/kernel/irq.c8
-rw-r--r--arch/arm/kernel/iwmmxt.S13
-rw-r--r--arch/arm/kernel/jump_label.c2
-rw-r--r--arch/arm/kernel/kgdb.c29
-rw-r--r--arch/arm/kernel/machine_kexec.c15
-rw-r--r--arch/arm/kernel/module.c2
-rw-r--r--arch/arm/kernel/patch.c92
-rw-r--r--arch/arm/kernel/patch.h12
-rw-r--r--arch/arm/kernel/process.c4
-rw-r--r--arch/arm/kernel/return_address.c3
-rw-r--r--arch/arm/kernel/setup.c1
-rw-r--r--arch/arm/kernel/signal.c1
-rw-r--r--arch/arm/kernel/smp.c15
-rw-r--r--arch/arm/kernel/smp_twd.c4
-rw-r--r--arch/arm/kernel/stacktrace.c4
-rw-r--r--arch/arm/kernel/swp_emulate.c2
-rw-r--r--arch/arm/kernel/thumbee.c2
-rw-r--r--arch/arm/kernel/topology.c4
-rw-r--r--arch/arm/kernel/traps.c42
-rw-r--r--arch/arm/kernel/unwind.c3
-rw-r--r--arch/arm/kernel/vmlinux.lds.S19
-rw-r--r--arch/arm/kernel/xscale-cp0.c7
-rw-r--r--arch/arm/lib/copy_from_user.S5
-rw-r--r--arch/arm/lib/copy_template.S30
-rw-r--r--arch/arm/lib/copy_to_user.S5
-rw-r--r--arch/arm/lib/memcpy.S5
-rw-r--r--arch/arm/lib/memmove.S28
-rw-r--r--arch/arm/lib/memset.S12
-rw-r--r--arch/arm/lib/memzero.S12
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-sa1100/clock.c43
-rw-r--r--arch/arm/mach-sa1100/collie.c55
-rw-r--r--arch/arm/mach-sa1100/include/mach/entry-macro.S41
-rw-r--r--arch/arm/mach-sa1100/include/mach/irqs.h102
-rw-r--r--arch/arm/mach-sa1100/irq.c229
-rw-r--r--arch/arm/mm/Kconfig21
-rw-r--r--arch/arm/mm/Makefile2
-rw-r--r--arch/arm/mm/alignment.c10
-rw-r--r--arch/arm/mm/cache-feroceon-l2.c6
-rw-r--r--arch/arm/mm/cache-tauros2.c12
-rw-r--r--arch/arm/mm/context.c58
-rw-r--r--arch/arm/mm/copypage-v6.c2
-rw-r--r--arch/arm/mm/fault-armv.c6
-rw-r--r--arch/arm/mm/fault.c31
-rw-r--r--arch/arm/mm/flush.c2
-rw-r--r--arch/arm/mm/highmem.c15
-rw-r--r--arch/arm/mm/init.c153
-rw-r--r--arch/arm/mm/mmu.c127
-rw-r--r--arch/arm/mm/pageattr.c91
-rw-r--r--arch/arm/mm/proc-v7.S5
-rw-r--r--arch/arm/nwfpe/fpmodule.c8
-rw-r--r--arch/arm/vfp/vfphw.S6
-rw-r--r--arch/arm/vfp/vfpmodule.c102
-rw-r--r--arch/arm/vfp/vfpsingle.c2
-rw-r--r--arch/arm/xen/Makefile2
-rw-r--r--arch/arm/xen/enlighten.c5
-rw-r--r--arch/arm/xen/mm.c121
-rw-r--r--arch/arm/xen/mm32.c202
-rw-r--r--arch/arm64/include/asm/device.h1
-rw-r--r--arch/arm64/include/asm/dma-mapping.h7
-rw-r--r--arch/arm64/include/asm/xen/page-coherent.h44
-rw-r--r--arch/arm64/kernel/psci.c4
-rw-r--r--arch/mips/Kbuild.platforms2
-rw-r--r--arch/mips/Kconfig98
-rw-r--r--arch/mips/Kconfig.debug13
-rw-r--r--arch/mips/Makefile1
-rw-r--r--arch/mips/alchemy/common/clock.c7
-rw-r--r--arch/mips/alchemy/common/setup.c6
-rw-r--r--arch/mips/ar7/platform.c24
-rw-r--r--arch/mips/ath25/Kconfig16
-rw-r--r--arch/mips/ath25/Makefile16
-rw-r--r--arch/mips/ath25/Platform6
-rw-r--r--arch/mips/ath25/ar2315.c364
-rw-r--r--arch/mips/ath25/ar2315.h22
-rw-r--r--arch/mips/ath25/ar2315_regs.h410
-rw-r--r--arch/mips/ath25/ar5312.c393
-rw-r--r--arch/mips/ath25/ar5312.h22
-rw-r--r--arch/mips/ath25/ar5312_regs.h224
-rw-r--r--arch/mips/ath25/board.c234
-rw-r--r--arch/mips/ath25/devices.c125
-rw-r--r--arch/mips/ath25/devices.h43
-rw-r--r--arch/mips/ath25/early_printk.c44
-rw-r--r--arch/mips/ath25/prom.c26
-rw-r--r--arch/mips/ath79/irq.c1
-rw-r--r--arch/mips/ath79/prom.c38
-rw-r--r--arch/mips/ath79/setup.c5
-rw-r--r--arch/mips/bcm3384/Makefile1
-rw-r--r--arch/mips/bcm3384/Platform7
-rw-r--r--arch/mips/bcm3384/dma.c81
-rw-r--r--arch/mips/bcm3384/irq.c193
-rw-r--r--arch/mips/bcm3384/setup.c97
-rw-r--r--arch/mips/bcm47xx/bcm47xx_private.h6
-rw-r--r--arch/mips/bcm47xx/irq.c8
-rw-r--r--arch/mips/bcm47xx/nvram.c155
-rw-r--r--arch/mips/bcm47xx/setup.c91
-rw-r--r--arch/mips/bcm47xx/sprom.c82
-rw-r--r--arch/mips/bcm63xx/cpu.c2
-rw-r--r--arch/mips/boot/dts/Makefile1
-rw-r--r--arch/mips/boot/dts/bcm3384.dtsi109
-rw-r--r--arch/mips/boot/dts/bcm93384wvg.dts32
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c4
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c49
-rw-r--r--arch/mips/cavium-octeon/setup.c4
-rw-r--r--arch/mips/configs/bcm3384_defconfig78
-rw-r--r--arch/mips/fw/lib/cmdline.c8
-rw-r--r--arch/mips/include/asm/atomic.h374
-rw-r--r--arch/mips/include/asm/bitops.h35
-rw-r--r--arch/mips/include/asm/bmips.h1
-rw-r--r--arch/mips/include/asm/bootinfo.h13
-rw-r--r--arch/mips/include/asm/clock.h3
-rw-r--r--arch/mips/include/asm/cmpxchg.h27
-rw-r--r--arch/mips/include/asm/compiler.h8
-rw-r--r--arch/mips/include/asm/cpu-features.h4
-rw-r--r--arch/mips/include/asm/cpu.h2
-rw-r--r--arch/mips/include/asm/edac.h6
-rw-r--r--arch/mips/include/asm/elf.h74
-rw-r--r--arch/mips/include/asm/fpu.h49
-rw-r--r--arch/mips/include/asm/futex.h27
-rw-r--r--arch/mips/include/asm/hpet.h73
-rw-r--r--arch/mips/include/asm/io.h8
-rw-r--r--arch/mips/include/asm/irq.h3
-rw-r--r--arch/mips/include/asm/irq_cpu.h4
-rw-r--r--arch/mips/include/asm/mach-ath25/ath25_platform.h73
-rw-r--r--arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h64
-rw-r--r--arch/mips/include/asm/mach-ath25/dma-coherence.h82
-rw-r--r--arch/mips/include/asm/mach-ath25/gpio.h16
-rw-r--r--arch/mips/include/asm/mach-ath25/war.h25
-rw-r--r--arch/mips/include/asm/mach-au1x00/ioremap.h10
-rw-r--r--arch/mips/include/asm/mach-bcm3384/dma-coherence.h48
-rw-r--r--arch/mips/include/asm/mach-bcm3384/war.h24
-rw-r--r--arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h36
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/ioremap.h6
-rw-r--r--arch/mips/include/asm/mach-generic/ioremap.h4
-rw-r--r--arch/mips/include/asm/mach-generic/irq.h6
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/boot_param.h49
-rw-r--r--arch/mips/include/asm/mach-loongson/dma-coherence.h6
-rw-r--r--arch/mips/include/asm/mach-loongson/irq.h3
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/loongson_hwmon.h55
-rw-r--r--arch/mips/include/asm/mach-loongson/machine.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/topology.h2
-rw-r--r--arch/mips/include/asm/mach-loongson/workarounds.h7
-rw-r--r--arch/mips/include/asm/mach-loongson1/cpufreq.h23
-rw-r--r--arch/mips/include/asm/mach-loongson1/loongson1.h8
-rw-r--r--arch/mips/include/asm/mach-loongson1/platform.h10
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-clk.h23
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-mux.h67
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-pwm.h29
-rw-r--r--arch/mips/include/asm/mach-loongson1/regs-wdt.h11
-rw-r--r--arch/mips/include/asm/mach-malta/irq.h1
-rw-r--r--arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h25
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h64
-rw-r--r--arch/mips/include/asm/mach-ralink/pinmux.h55
-rw-r--r--arch/mips/include/asm/mach-ralink/ralink_regs.h7
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h35
-rw-r--r--arch/mips/include/asm/mach-ralink/rt3883.h16
-rw-r--r--arch/mips/include/asm/mach-sead3/irq.h1
-rw-r--r--arch/mips/include/asm/mach-tx39xx/ioremap.h4
-rw-r--r--arch/mips/include/asm/mach-tx49xx/ioremap.h4
-rw-r--r--arch/mips/include/asm/mips-boards/maltaint.h24
-rw-r--r--arch/mips/include/asm/mips-boards/sead3int.h15
-rw-r--r--arch/mips/include/asm/mips-cm.h2
-rw-r--r--arch/mips/include/asm/mips-cpc.h4
-rw-r--r--arch/mips/include/asm/mipsregs.h43
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h4
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h69
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h63
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h52
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h3
-rw-r--r--arch/mips/include/asm/paccess.h2
-rw-r--r--arch/mips/include/asm/page.h2
-rw-r--r--arch/mips/include/asm/pci.h2
-rw-r--r--arch/mips/include/asm/pgtable-32.h104
-rw-r--r--arch/mips/include/asm/pgtable-bits.h36
-rw-r--r--arch/mips/include/asm/pgtable.h18
-rw-r--r--arch/mips/include/asm/prom.h1
-rw-r--r--arch/mips/include/asm/r4kcache.h59
-rw-r--r--arch/mips/include/asm/spinlock.h50
-rw-r--r--arch/mips/include/asm/thread_info.h2
-rw-r--r--arch/mips/include/asm/time.h6
-rw-r--r--arch/mips/include/asm/types.h18
-rw-r--r--arch/mips/include/asm/uaccess.h27
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h7
-rw-r--r--arch/mips/jz4740/setup.c2
-rw-r--r--arch/mips/kernel/Makefile10
-rw-r--r--arch/mips/kernel/cevt-gic.c105
-rw-r--r--arch/mips/kernel/cevt-r4k.c6
-rw-r--r--arch/mips/kernel/cpu-probe.c71
-rw-r--r--arch/mips/kernel/crash_dump.c4
-rw-r--r--arch/mips/kernel/csrc-gic.c40
-rw-r--r--arch/mips/kernel/elf.c191
-rw-r--r--arch/mips/kernel/i8259.c24
-rw-r--r--arch/mips/kernel/irq-gic.c402
-rw-r--r--arch/mips/kernel/irq_cpu.c48
-rw-r--r--arch/mips/kernel/mips-cm.c12
-rw-r--r--arch/mips/kernel/mips-cpc.c4
-rw-r--r--arch/mips/kernel/mips_ksyms.c4
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c30
-rw-r--r--arch/mips/kernel/process.c54
-rw-r--r--arch/mips/kernel/prom.c18
-rw-r--r--arch/mips/kernel/setup.c14
-rw-r--r--arch/mips/kernel/signal.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c114
-rw-r--r--arch/mips/kernel/smp-cmp.c2
-rw-r--r--arch/mips/kernel/smp-cps.c6
-rw-r--r--arch/mips/kernel/smp-gic.c2
-rw-r--r--arch/mips/kernel/smp-mt.c6
-rw-r--r--arch/mips/kernel/syscall.c2
-rw-r--r--arch/mips/kernel/traps.c66
-rw-r--r--arch/mips/kernel/vdso.c15
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c11
-rw-r--r--arch/mips/lantiq/irq.c56
-rw-r--r--arch/mips/lantiq/prom.c18
-rw-r--r--arch/mips/lantiq/xway/Makefile2
-rw-r--r--arch/mips/lantiq/xway/reset.c70
-rw-r--r--arch/mips/lantiq/xway/vmmc.c69
-rw-r--r--arch/mips/lantiq/xway/xrx200_phy_fw.c23
-rw-r--r--arch/mips/lib/iomap.c18
-rw-r--r--arch/mips/lib/memset.S6
-rw-r--r--arch/mips/lib/mips-atomic.c20
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c11
-rw-r--r--arch/mips/lib/strlen_user.S3
-rw-r--r--arch/mips/loongson/Kconfig17
-rw-r--r--arch/mips/loongson/common/cs5536/cs5536_pci.c25
-rw-r--r--arch/mips/loongson/common/dma-swiotlb.c14
-rw-r--r--arch/mips/loongson/common/early_printk.c2
-rw-r--r--arch/mips/loongson/common/env.c28
-rw-r--r--arch/mips/loongson/common/gpio.c2
-rw-r--r--arch/mips/loongson/common/init.c1
-rw-r--r--arch/mips/loongson/common/machtype.c23
-rw-r--r--arch/mips/loongson/common/rtc.c2
-rw-r--r--arch/mips/loongson/common/serial.c66
-rw-r--r--arch/mips/loongson/common/setup.c1
-rw-r--r--arch/mips/loongson/common/time.c5
-rw-r--r--arch/mips/loongson/common/uart_base.c30
-rw-r--r--arch/mips/loongson/lemote-2f/irq.c4
-rw-r--r--arch/mips/loongson/lemote-2f/reset.c2
-rw-r--r--arch/mips/loongson/loongson-3/Makefile4
-rw-r--r--arch/mips/loongson/loongson-3/hpet.c257
-rw-r--r--arch/mips/loongson/loongson-3/irq.c16
-rw-r--r--arch/mips/loongson/loongson-3/numa.c12
-rw-r--r--arch/mips/loongson/loongson-3/platform.c43
-rw-r--r--arch/mips/loongson/loongson-3/smp.c70
-rw-r--r--arch/mips/loongson1/Kconfig42
-rw-r--r--arch/mips/loongson1/common/Makefile2
-rw-r--r--arch/mips/loongson1/common/clock.c28
-rw-r--r--arch/mips/loongson1/common/platform.c141
-rw-r--r--arch/mips/loongson1/common/prom.c30
-rw-r--r--arch/mips/loongson1/common/reset.c20
-rw-r--r--arch/mips/loongson1/common/time.c226
-rw-r--r--arch/mips/loongson1/ls1b/board.c12
-rw-r--r--arch/mips/math-emu/cp1emu.c9
-rw-r--r--arch/mips/math-emu/ieee754dp.c2
-rw-r--r--arch/mips/math-emu/ieee754sp.c2
-rw-r--r--arch/mips/mm/Makefile10
-rw-r--r--arch/mips/mm/c-r4k.c43
-rw-r--r--arch/mips/mm/dma-default.c5
-rw-r--r--arch/mips/mm/gup.c2
-rw-r--r--arch/mips/mm/init.c2
-rw-r--r--arch/mips/mm/ioremap.c18
-rw-r--r--arch/mips/mm/sc-r5k.c2
-rw-r--r--arch/mips/mm/tlb-r4k.c2
-rw-r--r--arch/mips/mm/tlbex.c18
-rw-r--r--arch/mips/mm/uasm-mips.c2
-rw-r--r--arch/mips/mm/uasm.c14
-rw-r--r--arch/mips/mti-malta/malta-init.c2
-rw-r--r--arch/mips/mti-malta/malta-int.c327
-rw-r--r--arch/mips/mti-malta/malta-time.c51
-rw-r--r--arch/mips/mti-sead3/sead3-ehci.c8
-rw-r--r--arch/mips/mti-sead3/sead3-int.c131
-rw-r--r--arch/mips/mti-sead3/sead3-net.c14
-rw-r--r--arch/mips/mti-sead3/sead3-platform.c18
-rw-r--r--arch/mips/mti-sead3/sead3-serial.c45
-rw-r--r--arch/mips/mti-sead3/sead3-time.c35
-rw-r--r--arch/mips/oprofile/Makefile1
-rw-r--r--arch/mips/oprofile/backtrace.c5
-rw-r--r--arch/mips/oprofile/common.c11
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c220
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c18
-rw-r--r--arch/mips/pci/Makefile2
-rw-r--r--arch/mips/pci/ops-bcm63xx.c2
-rw-r--r--arch/mips/pci/ops-nile4.c12
-rw-r--r--arch/mips/pci/ops-pmcmsp.c12
-rw-r--r--arch/mips/pci/pci-ar2315.c511
-rw-r--r--arch/mips/pci/pci-ar71xx.c13
-rw-r--r--arch/mips/pci/pci-ar724x.c23
-rw-r--r--arch/mips/pci/pci-octeon.c2
-rw-r--r--arch/mips/pci/pci-rt2880.c285
-rw-r--r--arch/mips/pci/pci-rt3883.c9
-rw-r--r--arch/mips/pci/pci-tx4939.c2
-rw-r--r--arch/mips/pmcs-msp71xx/msp_prom.c2
-rw-r--r--arch/mips/ralink/Kconfig3
-rw-r--r--arch/mips/ralink/Makefile4
-rw-r--r--arch/mips/ralink/bootrom.c48
-rw-r--r--arch/mips/ralink/clk.c6
-rw-r--r--arch/mips/ralink/common.h19
-rw-r--r--arch/mips/ralink/early_printk.c45
-rw-r--r--arch/mips/ralink/ill_acc.c87
-rw-r--r--arch/mips/ralink/irq.c45
-rw-r--r--arch/mips/ralink/mt7620.c465
-rw-r--r--arch/mips/ralink/of.c32
-rw-r--r--arch/mips/ralink/prom.c1
-rw-r--r--arch/mips/ralink/rt288x.c65
-rw-r--r--arch/mips/ralink/rt305x.c153
-rw-r--r--arch/mips/ralink/rt3883.c174
-rw-r--r--arch/mips/rb532/gpio.c2
-rw-r--r--arch/mips/rb532/prom.c8
-rw-r--r--arch/mips/sgi-ip22/ip22-mc.c6
-rw-r--r--arch/mips/sgi-ip22/ip28-berr.c6
-rw-r--r--arch/mips/sgi-ip27/ip27-klnuma.c5
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c5
-rw-r--r--arch/mips/sibyte/common/cfe.c8
-rw-r--r--arch/mips/sibyte/swarm/platform.c2
-rw-r--r--arch/mips/sibyte/swarm/rtc_m41t81.c4
-rw-r--r--arch/mips/sibyte/swarm/rtc_xicor1241.c4
-rw-r--r--arch/mips/sibyte/swarm/setup.c2
-rw-r--r--arch/mips/txx9/generic/setup_tx4927.c4
-rw-r--r--arch/mips/txx9/generic/setup_tx4938.c4
-rw-r--r--arch/mips/txx9/generic/setup_tx4939.c4
-rw-r--r--arch/powerpc/Kconfig5
-rw-r--r--arch/powerpc/boot/dts/b4860emu.dts4
-rw-r--r--arch/powerpc/boot/dts/b4qds.dtsi23
-rw-r--r--arch/powerpc/boot/dts/bsc9131rdb.dtsi50
-rw-r--r--arch/powerpc/boot/dts/fsl/b4420si-post.dtsi28
-rw-r--r--arch/powerpc/boot/dts/fsl/b4860si-post.dtsi28
-rw-r--r--arch/powerpc/boot/dts/fsl/p2041si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/p3041si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/p4080si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/p5020si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/p5040si-post.dtsi48
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi85
-rw-r--r--arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi68
-rw-r--r--arch/powerpc/boot/dts/fsl/t1040si-post.dtsi30
-rw-r--r--arch/powerpc/boot/dts/fsl/t2081si-post.dtsi29
-rw-r--r--arch/powerpc/boot/dts/fsl/t4240si-post.dtsi29
-rw-r--r--arch/powerpc/boot/dts/p3041ds.dts20
-rw-r--r--arch/powerpc/boot/dts/p5020ds.dts20
-rw-r--r--arch/powerpc/boot/dts/p5040ds.dts20
-rw-r--r--arch/powerpc/boot/dts/t104xrdb.dtsi7
-rw-r--r--arch/powerpc/boot/dts/t208xqds.dtsi11
-rw-r--r--arch/powerpc/boot/dts/t4240emu.dts4
-rw-r--r--arch/powerpc/boot/main.c15
-rw-r--r--arch/powerpc/boot/ops.h2
-rw-r--r--arch/powerpc/boot/serial.c6
-rw-r--r--arch/powerpc/configs/corenet32_smp_defconfig1
-rw-r--r--arch/powerpc/configs/corenet64_smp_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_defconfig1
-rw-r--r--arch/powerpc/configs/mpc85xx_smp_defconfig1
-rw-r--r--arch/powerpc/include/asm/bitops.h6
-rw-r--r--arch/powerpc/include/asm/cputable.h10
-rw-r--r--arch/powerpc/include/asm/eeh.h2
-rw-r--r--arch/powerpc/include/asm/elf.h3
-rw-r--r--arch/powerpc/include/asm/fsl_guts.h5
-rw-r--r--arch/powerpc/include/asm/hardirq.h7
-rw-r--r--arch/powerpc/include/asm/hugetlb.h8
-rw-r--r--arch/powerpc/include/asm/io.h3
-rw-r--r--arch/powerpc/include/asm/machdep.h19
-rw-r--r--arch/powerpc/include/asm/mmu-8xx.h2
-rw-r--r--arch/powerpc/include/asm/mmu-hash64.h22
-rw-r--r--arch/powerpc/include/asm/opal.h122
-rw-r--r--arch/powerpc/include/asm/paca.h7
-rw-r--r--arch/powerpc/include/asm/page.h4
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc32.h20
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-4k.h16
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64-64k.h3
-rw-r--r--arch/powerpc/include/asm/pgtable-ppc64.h52
-rw-r--r--arch/powerpc/include/asm/pgtable.h6
-rw-r--r--arch/powerpc/include/asm/processor.h2
-rw-r--r--arch/powerpc/include/asm/pte-8xx.h7
-rw-r--r--arch/powerpc/include/asm/setup.h3
-rw-r--r--arch/powerpc/include/asm/thread_info.h5
-rw-r--r--arch/powerpc/include/asm/tlbflush.h10
-rw-r--r--arch/powerpc/include/asm/vga.h4
-rw-r--r--arch/powerpc/include/asm/xics.h8
-rw-r--r--arch/powerpc/kernel/align.c2
-rw-r--r--arch/powerpc/kernel/asm-offsets.c7
-rw-r--r--arch/powerpc/kernel/crash_dump.c1
-rw-r--r--arch/powerpc/kernel/dbell.c2
-rw-r--r--arch/powerpc/kernel/eeh.c41
-rw-r--r--arch/powerpc/kernel/eeh_driver.c10
-rw-r--r--arch/powerpc/kernel/entry_32.S12
-rw-r--r--arch/powerpc/kernel/entry_64.S35
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S34
-rw-r--r--arch/powerpc/kernel/ftrace.c73
-rw-r--r--arch/powerpc/kernel/head_8xx.S230
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c6
-rw-r--r--arch/powerpc/kernel/idle_power7.S12
-rw-r--r--arch/powerpc/kernel/iommu.c2
-rw-r--r--arch/powerpc/kernel/irq.c5
-rw-r--r--arch/powerpc/kernel/kgdb.c2
-rw-r--r--arch/powerpc/kernel/kprobes.c6
-rw-r--r--arch/powerpc/kernel/mce.c24
-rw-r--r--arch/powerpc/kernel/mce_power.c4
-rw-r--r--arch/powerpc/kernel/pci-common.c3
-rw-r--r--arch/powerpc/kernel/pci_32.c4
-rw-r--r--arch/powerpc/kernel/pci_64.c1
-rw-r--r--arch/powerpc/kernel/process.c36
-rw-r--r--arch/powerpc/kernel/prom.c11
-rw-r--r--arch/powerpc/kernel/rtas-proc.c20
-rw-r--r--arch/powerpc/kernel/rtas.c4
-rw-r--r--arch/powerpc/kernel/rtas_pci.c1
-rw-r--r--arch/powerpc/kernel/setup-common.c6
-rw-r--r--arch/powerpc/kernel/setup_32.c11
-rw-r--r--arch/powerpc/kernel/setup_64.c35
-rw-r--r--arch/powerpc/kernel/smp.c6
-rw-r--r--arch/powerpc/kernel/sysfs.c4
-rw-r--r--arch/powerpc/kernel/time.c23
-rw-r--r--arch/powerpc/kernel/traps.c8
-rw-r--r--arch/powerpc/kernel/udbg_16550.c6
-rw-r--r--arch/powerpc/kernel/vdso.c1
-rw-r--r--arch/powerpc/kvm/book3s_hv_builtin.c3
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S54
-rw-r--r--arch/powerpc/kvm/e500.c14
-rw-r--r--arch/powerpc/kvm/e500_mmu_host.c6
-rw-r--r--arch/powerpc/kvm/e500mc.c4
-rw-r--r--arch/powerpc/lib/Makefile1
-rw-r--r--arch/powerpc/lib/alloc.c4
-rw-r--r--arch/powerpc/lib/copyuser_power7.S2
-rw-r--r--arch/powerpc/lib/devres.c43
-rw-r--r--arch/powerpc/lib/memcpy_power7.S2
-rw-r--r--arch/powerpc/lib/sstep.c6
-rw-r--r--arch/powerpc/mm/Makefile2
-rw-r--r--arch/powerpc/mm/fault.c7
-rw-r--r--arch/powerpc/mm/gup.c235
-rw-r--r--arch/powerpc/mm/hash_low_64.S19
-rw-r--r--arch/powerpc/mm/hash_native_64.c41
-rw-r--r--arch/powerpc/mm/hash_utils_64.c114
-rw-r--r--arch/powerpc/mm/hugepage-hash64.c60
-rw-r--r--arch/powerpc/mm/hugetlbpage-book3e.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage-hash64.c6
-rw-r--r--arch/powerpc/mm/hugetlbpage.c51
-rw-r--r--arch/powerpc/mm/init_32.c10
-rw-r--r--arch/powerpc/mm/init_64.c1
-rw-r--r--arch/powerpc/mm/mem.c77
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c8
-rw-r--r--arch/powerpc/mm/mmu_decl.h2
-rw-r--r--arch/powerpc/mm/numa.c224
-rw-r--r--arch/powerpc/mm/pgtable_32.c3
-rw-r--r--arch/powerpc/mm/pgtable_64.c104
-rw-r--r--arch/powerpc/oprofile/backtrace.c6
-rw-r--r--arch/powerpc/perf/core-book3s.c22
-rw-r--r--arch/powerpc/perf/core-fsl-emb.c6
-rw-r--r--arch/powerpc/platforms/44x/Kconfig1
-rw-r--r--arch/powerpc/platforms/44x/ppc476.c2
-rw-r--r--arch/powerpc/platforms/512x/mpc512x_shared.c9
-rw-r--r--arch/powerpc/platforms/52xx/efika.c3
-rw-r--r--arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c8
-rw-r--r--arch/powerpc/platforms/85xx/corenet_generic.c2
-rw-r--r--arch/powerpc/platforms/85xx/sgy_cts1000.c4
-rw-r--r--arch/powerpc/platforms/8xx/Kconfig4
-rw-r--r--arch/powerpc/platforms/cell/beat_htab.c4
-rw-r--r--arch/powerpc/platforms/cell/celleb_pci.c6
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_epci.c1
-rw-r--r--arch/powerpc/platforms/cell/celleb_scc_pciex.c1
-rw-r--r--arch/powerpc/platforms/cell/celleb_setup.c4
-rw-r--r--arch/powerpc/platforms/cell/interrupt.c6
-rw-r--r--arch/powerpc/platforms/cell/qpace_setup.c2
-rw-r--r--arch/powerpc/platforms/cell/setup.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_base.c5
-rw-r--r--arch/powerpc/platforms/cell/spufs/fault.c2
-rw-r--r--arch/powerpc/platforms/chrp/setup.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/gamecube.c3
-rw-r--r--arch/powerpc/platforms/embedded6xx/linkstation.c4
-rw-r--r--arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c6
-rw-r--r--arch/powerpc/platforms/embedded6xx/wii.c3
-rw-r--r--arch/powerpc/platforms/maple/pci.c1
-rw-r--r--arch/powerpc/platforms/maple/setup.c4
-rw-r--r--arch/powerpc/platforms/powermac/nvram.c6
-rw-r--r--arch/powerpc/platforms/powermac/pci.c1
-rw-r--r--arch/powerpc/platforms/powermac/setup.c3
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c16
-rw-r--r--arch/powerpc/platforms/powernv/opal-async.c3
-rw-r--r--arch/powerpc/platforms/powernv/opal-rtc.c65
-rw-r--r--arch/powerpc/platforms/powernv/opal-tracepoints.c4
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S6
-rw-r--r--arch/powerpc/platforms/powernv/opal.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c217
-rw-r--r--arch/powerpc/platforms/powernv/pci-p5ioc2.c44
-rw-r--r--arch/powerpc/platforms/powernv/pci.c1
-rw-r--r--arch/powerpc/platforms/powernv/pci.h2
-rw-r--r--arch/powerpc/platforms/powernv/setup.c6
-rw-r--r--arch/powerpc/platforms/powernv/smp.c23
-rw-r--r--arch/powerpc/platforms/ps3/htab.c2
-rw-r--r--arch/powerpc/platforms/ps3/interrupt.c2
-rw-r--r--arch/powerpc/platforms/ps3/setup.c9
-rw-r--r--arch/powerpc/platforms/pseries/dtl.c2
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-memory.c21
-rw-r--r--arch/powerpc/platforms/pseries/hvCall.S4
-rw-r--r--arch/powerpc/platforms/pseries/hvCall_inst.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c11
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c10
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c2
-rw-r--r--arch/powerpc/platforms/pseries/pci.c2
-rw-r--r--arch/powerpc/platforms/pseries/ras.c4
-rw-r--r--arch/powerpc/platforms/pseries/setup.c65
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c1
-rw-r--r--arch/powerpc/sysdev/fsl_pci.c3
-rw-r--r--arch/powerpc/sysdev/fsl_rio.c104
-rw-r--r--arch/powerpc/sysdev/fsl_rio.h13
-rw-r--r--arch/powerpc/sysdev/fsl_soc.c5
-rw-r--r--arch/powerpc/sysdev/ipic.c1
-rw-r--r--arch/powerpc/sysdev/mpc5xxx_clocks.c3
-rw-r--r--arch/powerpc/sysdev/mpic.c1
-rw-r--r--arch/powerpc/sysdev/mpic_pasemi_msi.c1
-rw-r--r--arch/powerpc/sysdev/mpic_u3msi.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_cpm.c8
-rw-r--r--arch/powerpc/sysdev/ppc4xx_msi.c1
-rw-r--r--arch/powerpc/sysdev/ppc4xx_pci.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe.c1
-rw-r--r--arch/powerpc/sysdev/qe_lib/qe_ic.c1
-rw-r--r--arch/powerpc/sysdev/uic.c1
-rw-r--r--arch/powerpc/sysdev/xics/xics-common.c2
-rw-r--r--arch/powerpc/xmon/xmon.c82
-rw-r--r--arch/s390/include/asm/cmpxchg.h240
-rw-r--r--arch/s390/include/asm/cputime.h46
-rw-r--r--arch/s390/include/asm/debug.h29
-rw-r--r--arch/s390/include/asm/ftrace.h54
-rw-r--r--arch/s390/include/asm/idle.h3
-rw-r--r--arch/s390/include/asm/io.h9
-rw-r--r--arch/s390/include/asm/irq.h11
-rw-r--r--arch/s390/include/asm/kprobes.h1
-rw-r--r--arch/s390/include/asm/lowcore.h4
-rw-r--r--arch/s390/include/asm/pci.h5
-rw-r--r--arch/s390/include/asm/pci_io.h6
-rw-r--r--arch/s390/include/asm/pgalloc.h2
-rw-r--r--arch/s390/include/asm/pgtable.h33
-rw-r--r--arch/s390/include/asm/processor.h2
-rw-r--r--arch/s390/include/asm/spinlock.h9
-rw-r--r--arch/s390/include/asm/tlb.h1
-rw-r--r--arch/s390/include/uapi/asm/unistd.h4
-rw-r--r--arch/s390/kernel/asm-offsets.c5
-rw-r--r--arch/s390/kernel/compat_signal.c2
-rw-r--r--arch/s390/kernel/compat_wrapper.c2
-rw-r--r--arch/s390/kernel/debug.c12
-rw-r--r--arch/s390/kernel/dumpstack.c3
-rw-r--r--arch/s390/kernel/early.c4
-rw-r--r--arch/s390/kernel/entry.S424
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/entry64.S372
-rw-r--r--arch/s390/kernel/ftrace.c136
-rw-r--r--arch/s390/kernel/idle.c29
-rw-r--r--arch/s390/kernel/irq.c5
-rw-r--r--arch/s390/kernel/kprobes.c178
-rw-r--r--arch/s390/kernel/mcount.S1
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c1
-rw-r--r--arch/s390/kernel/process.c3
-rw-r--r--arch/s390/kernel/ptrace.c115
-rw-r--r--arch/s390/kernel/setup.c2
-rw-r--r--arch/s390/kernel/signal.c2
-rw-r--r--arch/s390/kernel/smp.c1
-rw-r--r--arch/s390/kernel/syscalls.S2
-rw-r--r--arch/s390/kernel/time.c3
-rw-r--r--arch/s390/kernel/traps.c25
-rw-r--r--arch/s390/kvm/kvm-s390.c2
-rw-r--r--arch/s390/kvm/priv.c17
-rw-r--r--arch/s390/mm/fault.c10
-rw-r--r--arch/s390/mm/pgtable.c185
-rw-r--r--arch/s390/pci/Makefile2
-rw-r--r--arch/s390/pci/pci.c9
-rw-r--r--arch/s390/pci/pci_clp.c1
-rw-r--r--arch/s390/pci/pci_debug.c7
-rw-r--r--arch/s390/pci/pci_mmio.c115
-rw-r--r--arch/x86/include/asm/xen/cpuid.h91
-rw-r--r--arch/x86/include/asm/xen/page-coherent.h4
-rw-r--r--arch/x86/include/asm/xen/page.h7
-rw-r--r--arch/x86/pci/xen.c31
-rw-r--r--drivers/acpi/pci_root.c16
-rw-r--r--drivers/amba/bus.c15
-rw-r--r--drivers/ata/ahci_sunxi.c16
-rw-r--r--drivers/ata/libata-core.c20
-rw-r--r--drivers/ata/libata-eh.c7
-rw-r--r--drivers/ata/libata-scsi.c31
-rw-r--r--drivers/ata/libata-transport.c1
-rw-r--r--drivers/bcma/driver_mips.c13
-rw-r--r--drivers/char/ipmi/Kconfig14
-rw-r--r--drivers/char/ipmi/Makefile2
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c560
-rw-r--r--drivers/char/ipmi/ipmi_powernv.c310
-rw-r--r--drivers/char/ipmi/ipmi_si_intf.c497
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c1870
-rw-r--r--drivers/clk/Kconfig2
-rw-r--r--drivers/clk/clk-ls1x.c109
-rw-r--r--drivers/clocksource/Kconfig5
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/mips-gic-timer.c166
-rw-r--r--drivers/cpufreq/cpufreq-dt.c2
-rw-r--r--drivers/dma-buf/fence.c2
-rw-r--r--drivers/dma/Kconfig11
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/amba-pl08x.c2
-rw-r--r--drivers/dma/at_xdmac.c1524
-rw-r--r--drivers/dma/bcm2835-dma.c3
-rw-r--r--drivers/dma/cppi41.c12
-rw-r--r--drivers/dma/dma-jz4740.c4
-rw-r--r--drivers/dma/dmaengine.c3
-rw-r--r--drivers/dma/fsl-edma.c191
-rw-r--r--drivers/dma/fsldma.c1
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/ioat/dma_v3.c35
-rw-r--r--drivers/dma/iop-adma.c1
-rw-r--r--drivers/dma/k3dma.c4
-rw-r--r--drivers/dma/mmp_pdma.c1
-rw-r--r--drivers/dma/mmp_tdma.c1
-rw-r--r--drivers/dma/mpc512x_dma.c13
-rw-r--r--drivers/dma/nbpfaxi.c1
-rw-r--r--drivers/dma/omap-dma.c2
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c106
-rw-r--r--drivers/dma/qcom_bam_dma.c231
-rw-r--r--drivers/dma/s3c24xx-dma.c1
-rw-r--r--drivers/dma/sa11x0-dma.c3
-rw-r--r--drivers/dma/sh/rcar-audmapp.c3
-rw-r--r--drivers/dma/sh/rcar-hpbdma.c3
-rw-r--r--drivers/dma/sh/shdma-base.c4
-rw-r--r--drivers/dma/sh/shdma-of.c1
-rw-r--r--drivers/dma/sh/shdmac.c2
-rw-r--r--drivers/dma/sh/sudmac.c3
-rw-r--r--drivers/dma/sirf-dma.c5
-rw-r--r--drivers/dma/ste_dma40.c1
-rw-r--r--drivers/dma/sun6i-dma.c122
-rw-r--r--drivers/dma/tegra20-apb-dma.c1
-rw-r--r--drivers/dma/timb_dma.c1
-rw-r--r--drivers/dma/txx9dmac.c2
-rw-r--r--drivers/dma/txx9dmac.h4
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c13
-rw-r--r--drivers/gpio/gpio-msm-v1.c2
-rw-r--r--drivers/gpio/gpio-spear-spics.c2
-rw-r--r--drivers/gpu/drm/radeon/mkregtable.c24
-rw-r--r--drivers/hid/Kconfig22
-rw-r--r--drivers/hid/Makefile2
-rw-r--r--drivers/hid/hid-core.c36
-rw-r--r--drivers/hid/hid-ids.h12
-rw-r--r--drivers/hid/hid-input.c4
-rw-r--r--drivers/hid/hid-lenovo.c13
-rw-r--r--drivers/hid/hid-logitech-dj.c397
-rw-r--r--drivers/hid/hid-logitech-dj.h125
-rw-r--r--drivers/hid/hid-logitech-hidpp.c1241
-rw-r--r--drivers/hid/hid-microsoft.c2
-rw-r--r--drivers/hid/hid-multitouch.c27
-rw-r--r--drivers/hid/hid-plantronics.c55
-rw-r--r--drivers/hid/hid-rmi.c83
-rw-r--r--drivers/hid/hid-roccat-kone.c9
-rw-r--r--drivers/hid/hid-saitek.c4
-rw-r--r--drivers/hid/hid-sony.c150
-rw-r--r--drivers/hid/i2c-hid/i2c-hid.c16
-rw-r--r--drivers/hid/usbhid/hid-core.c39
-rw-r--r--drivers/hid/usbhid/hid-quirks.c2
-rw-r--r--drivers/hid/usbhid/usbhid.h1
-rw-r--r--drivers/hid/wacom.h2
-rw-r--r--drivers/hid/wacom_sys.c68
-rw-r--r--drivers/hid/wacom_wac.c216
-rw-r--r--drivers/hid/wacom_wac.h3
-rw-r--r--drivers/iommu/Kconfig25
-rw-r--r--drivers/iommu/Makefile2
-rw-r--r--drivers/iommu/amd_iommu.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c8
-rw-r--r--drivers/iommu/arm-smmu.c135
-rw-r--r--drivers/iommu/dmar.c532
-rw-r--r--drivers/iommu/intel-iommu.c307
-rw-r--r--drivers/iommu/intel_irq_remapping.c249
-rw-r--r--drivers/iommu/iommu.c20
-rw-r--r--drivers/iommu/ipmmu-vmsa.c2
-rw-r--r--drivers/iommu/msm_iommu.c3
-rw-r--r--drivers/iommu/msm_iommu_dev.c10
-rw-r--r--drivers/iommu/omap-iommu-debug.c242
-rw-r--r--drivers/iommu/omap-iommu.c312
-rw-r--r--drivers/iommu/omap-iommu.h98
-rw-r--r--drivers/iommu/omap-iommu2.c337
-rw-r--r--drivers/iommu/rockchip-iommu.c1038
-rw-r--r--drivers/irqchip/Kconfig4
-rw-r--r--drivers/irqchip/Makefile1
-rw-r--r--drivers/irqchip/irq-mips-gic.c789
-rw-r--r--drivers/isdn/mISDN/l1oip_core.c2
-rw-r--r--drivers/media/pci/cx18/cx18-driver.h2
-rw-r--r--drivers/media/pci/ttpci/budget-patch.c2
-rw-r--r--drivers/misc/cxl/cxl.h15
-rw-r--r--drivers/misc/cxl/fault.c8
-rw-r--r--drivers/misc/cxl/irq.c144
-rw-r--r--drivers/misc/cxl/native.c14
-rw-r--r--drivers/mtd/ubi/attach.c126
-rw-r--r--drivers/mtd/ubi/block.c41
-rw-r--r--drivers/mtd/ubi/build.c126
-rw-r--r--drivers/mtd/ubi/cdev.c36
-rw-r--r--drivers/mtd/ubi/debug.c10
-rw-r--r--drivers/mtd/ubi/eba.c53
-rw-r--r--drivers/mtd/ubi/fastmap.c96
-rw-r--r--drivers/mtd/ubi/io.c150
-rw-r--r--drivers/mtd/ubi/kapi.c6
-rw-r--r--drivers/mtd/ubi/misc.c4
-rw-r--r--drivers/mtd/ubi/ubi.h13
-rw-r--r--drivers/mtd/ubi/upd.c10
-rw-r--r--drivers/mtd/ubi/vmt.c69
-rw-r--r--drivers/mtd/ubi/vtbl.c71
-rw-r--r--drivers/mtd/ubi/wl.c80
-rw-r--r--drivers/net/ethernet/atheros/atl1c/atl1c_main.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig14
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile1
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c234
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h28
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c14
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c6
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/sa1100_generic.c1
-rw-r--r--drivers/pcmcia/sa1111_generic.c7
-rw-r--r--drivers/pcmcia/sa11xx_base.c14
-rw-r--r--drivers/pcmcia/soc_common.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-plgpio.c2
-rw-r--r--drivers/platform/x86/intel_ips.c2
-rw-r--r--drivers/rtc/Kconfig11
-rw-r--r--drivers/rtc/Makefile1
-rw-r--r--drivers/rtc/rtc-opal.c261
-rw-r--r--drivers/s390/block/dasd.c31
-rw-r--r--drivers/s390/block/dasd_genhd.c26
-rw-r--r--drivers/s390/block/scm_blk.c222
-rw-r--r--drivers/s390/block/scm_blk.h6
-rw-r--r--drivers/s390/block/scm_blk_cluster.c69
-rw-r--r--drivers/s390/char/Kconfig10
-rw-r--r--drivers/s390/char/sclp_async.c3
-rw-r--r--drivers/s390/char/tape_3590.c4
-rw-r--r--drivers/s390/cio/eadm_sch.c2
-rw-r--r--drivers/scsi/aic94xx/aic94xx_task.c10
-rw-r--r--drivers/scsi/isci/request.c4
-rw-r--r--drivers/scsi/isci/task.c2
-rw-r--r--drivers/scsi/libsas/sas_ata.c66
-rw-r--r--drivers/scsi/mac_scsi.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_ctl.h2
-rw-r--r--drivers/scsi/mvsas/mv_sas.c4
-rw-r--r--drivers/scsi/pm8001/pm8001_hwi.c2
-rw-r--r--drivers/scsi/pm8001/pm80xx_hwi.c2
-rw-r--r--drivers/scsi/ps3rom.c1
-rw-r--r--drivers/ssb/driver_mipscore.c14
-rw-r--r--drivers/target/target_core_xcopy.c2
-rw-r--r--drivers/tc/tc.c36
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/usb/core/quirks.c3
-rw-r--r--drivers/usb/gadget/function/f_hid.c2
-rw-r--r--drivers/usb/gadget/legacy/zero.c2
-rw-r--r--drivers/usb/host/ehci-sysfs.c2
-rw-r--r--drivers/usb/host/fotg210-hcd.c2
-rw-r--r--drivers/usb/host/fusbh200-hcd.c2
-rw-r--r--drivers/usb/serial/usb-serial-simple.c2
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi.c2
-rw-r--r--drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c2
-rw-r--r--drivers/video/fbdev/sa1100fb.c30
-rw-r--r--drivers/video/fbdev/sa1100fb.h1
-rw-r--r--drivers/xen/swiotlb-xen.c19
-rw-r--r--drivers/xen/xen-pciback/passthrough.c14
-rw-r--r--drivers/xen/xen-pciback/pci_stub.c112
-rw-r--r--drivers/xen/xen-pciback/pciback.h7
-rw-r--r--drivers/xen/xen-pciback/vpci.c14
-rw-r--r--drivers/xen/xen-pciback/xenbus.c4
-rw-r--r--fs/Kconfig.binfmt3
-rw-r--r--fs/binfmt_elf.c238
-rw-r--r--fs/btrfs/check-integrity.c163
-rw-r--r--fs/btrfs/compression.c18
-rw-r--r--fs/btrfs/ctree.c2
-rw-r--r--fs/btrfs/ctree.h85
-rw-r--r--fs/btrfs/dev-replace.c32
-rw-r--r--fs/btrfs/dir-item.c10
-rw-r--r--fs/btrfs/disk-io.c49
-rw-r--r--fs/btrfs/extent-tree.c211
-rw-r--r--fs/btrfs/extent_io.c41
-rw-r--r--fs/btrfs/extent_io.h1
-rw-r--r--fs/btrfs/extent_map.c2
-rw-r--r--fs/btrfs/file.c51
-rw-r--r--fs/btrfs/free-space-cache.c117
-rw-r--r--fs/btrfs/free-space-cache.h2
-rw-r--r--fs/btrfs/inode-map.c4
-rw-r--r--fs/btrfs/inode.c152
-rw-r--r--fs/btrfs/ioctl.c36
-rw-r--r--fs/btrfs/ordered-data.c49
-rw-r--r--fs/btrfs/ordered-data.h12
-rw-r--r--fs/btrfs/raid56.c763
-rw-r--r--fs/btrfs/raid56.h16
-rw-r--r--fs/btrfs/scrub.c893
-rw-r--r--fs/btrfs/send.c49
-rw-r--r--fs/btrfs/super.c94
-rw-r--r--fs/btrfs/sysfs.c34
-rw-r--r--fs/btrfs/transaction.c166
-rw-r--r--fs/btrfs/transaction.h6
-rw-r--r--fs/btrfs/tree-log.c50
-rw-r--r--fs/btrfs/volumes.c90
-rw-r--r--fs/btrfs/volumes.h32
-rw-r--r--fs/btrfs/xattr.c150
-rw-r--r--fs/ext4/ext4.h41
-rw-r--r--fs/ext4/extents.c223
-rw-r--r--fs/ext4/extents_status.c321
-rw-r--r--fs/ext4/extents_status.h82
-rw-r--r--fs/ext4/file.c220
-rw-r--r--fs/ext4/inline.c35
-rw-r--r--fs/ext4/inode.c37
-rw-r--r--fs/ext4/ioctl.c2
-rw-r--r--fs/ext4/mballoc.c15
-rw-r--r--fs/ext4/migrate.c2
-rw-r--r--fs/ext4/move_extent.c8
-rw-r--r--fs/ext4/namei.c1
-rw-r--r--fs/ext4/resize.c6
-rw-r--r--fs/ext4/super.c51
-rw-r--r--fs/jbd2/journal.c3
-rw-r--r--fs/pstore/ram.c13
-rw-r--r--fs/pstore/ram_core.c31
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/ubifs/file.c1
-rw-r--r--fs/ubifs/journal.c7
-rw-r--r--fs/xfs/libxfs/xfs_ag.h281
-rw-r--r--fs/xfs/libxfs/xfs_alloc.c1
-rw-r--r--fs/xfs/libxfs/xfs_alloc.h3
-rw-r--r--fs/xfs/libxfs/xfs_alloc_btree.c1
-rw-r--r--fs/xfs/libxfs/xfs_attr.c3
-rw-r--r--fs/xfs/libxfs/xfs_attr_leaf.c2
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_bmap.c77
-rw-r--r--fs/xfs/libxfs/xfs_bmap_btree.c3
-rw-r--r--fs/xfs/libxfs/xfs_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c6
-rw-r--r--fs/xfs/libxfs/xfs_da_format.c2
-rw-r--r--fs/xfs/libxfs/xfs_dinode.h243
-rw-r--r--fs/xfs/libxfs/xfs_dir2.c20
-rw-r--r--fs/xfs/libxfs/xfs_dir2.h140
-rw-r--r--fs/xfs/libxfs/xfs_dir2_block.c11
-rw-r--r--fs/xfs/libxfs/xfs_dir2_data.c2
-rw-r--r--fs/xfs/libxfs/xfs_dir2_leaf.c12
-rw-r--r--fs/xfs/libxfs/xfs_dir2_node.c14
-rw-r--r--fs/xfs/libxfs/xfs_dir2_priv.h140
-rw-r--r--fs/xfs/libxfs/xfs_dir2_sf.c13
-rw-r--r--fs/xfs/libxfs/xfs_dquot_buf.c2
-rw-r--r--fs/xfs/libxfs/xfs_format.h1107
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.c43
-rw-r--r--fs/xfs/libxfs/xfs_ialloc.h4
-rw-r--r--fs/xfs/libxfs/xfs_ialloc_btree.c2
-rw-r--r--fs/xfs/libxfs/xfs_inode_buf.c3
-rw-r--r--fs/xfs/libxfs/xfs_inode_fork.c4
-rw-r--r--fs/xfs/libxfs/xfs_inum.h60
-rw-r--r--fs/xfs/libxfs/xfs_log_format.h2
-rw-r--r--fs/xfs/libxfs/xfs_log_rlimit.c2
-rw-r--r--fs/xfs/libxfs/xfs_rtbitmap.c3
-rw-r--r--fs/xfs/libxfs/xfs_sb.c2
-rw-r--r--fs/xfs/libxfs/xfs_sb.h584
-rw-r--r--fs/xfs/libxfs/xfs_symlink_remote.c2
-rw-r--r--fs/xfs/libxfs/xfs_trans_resv.c2
-rw-r--r--fs/xfs/xfs_acl.c2
-rw-r--r--fs/xfs/xfs_acl.h36
-rw-r--r--fs/xfs/xfs_aops.c3
-rw-r--r--fs/xfs/xfs_attr_inactive.c3
-rw-r--r--fs/xfs/xfs_attr_list.c3
-rw-r--r--fs/xfs/xfs_bmap_util.c3
-rw-r--r--fs/xfs/xfs_buf.c27
-rw-r--r--fs/xfs/xfs_buf.h3
-rw-r--r--fs/xfs/xfs_buf_item.c2
-rw-r--r--fs/xfs/xfs_dir2_readdir.c21
-rw-r--r--fs/xfs/xfs_discard.c1
-rw-r--r--fs/xfs/xfs_dquot.c2
-rw-r--r--fs/xfs/xfs_dquot_item.c2
-rw-r--r--fs/xfs/xfs_error.c2
-rw-r--r--fs/xfs/xfs_export.c3
-rw-r--r--fs/xfs/xfs_extent_busy.c1
-rw-r--r--fs/xfs/xfs_extfree_item.c3
-rw-r--r--fs/xfs/xfs_file.c9
-rw-r--r--fs/xfs/xfs_filestream.c3
-rw-r--r--fs/xfs/xfs_fsops.c2
-rw-r--r--fs/xfs/xfs_icache.c4
-rw-r--r--fs/xfs/xfs_icache.h8
-rw-r--r--fs/xfs/xfs_icreate_item.c3
-rw-r--r--fs/xfs/xfs_inode.c29
-rw-r--r--fs/xfs/xfs_inode.h2
-rw-r--r--fs/xfs/xfs_inode_item.c3
-rw-r--r--fs/xfs/xfs_ioctl.c3
-rw-r--r--fs/xfs/xfs_ioctl32.c2
-rw-r--r--fs/xfs/xfs_iomap.c18
-rw-r--r--fs/xfs/xfs_iops.c5
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_linux.h6
-rw-r--r--fs/xfs/xfs_log.c8
-rw-r--r--fs/xfs/xfs_log_cil.c3
-rw-r--r--fs/xfs/xfs_log_recover.c4
-rw-r--r--fs/xfs/xfs_message.c3
-rw-r--r--fs/xfs/xfs_mount.c33
-rw-r--r--fs/xfs/xfs_mount.h8
-rw-r--r--fs/xfs/xfs_qm.c14
-rw-r--r--fs/xfs/xfs_qm_bhv.c2
-rw-r--r--fs/xfs/xfs_qm_syscalls.c27
-rw-r--r--fs/xfs/xfs_quotaops.c2
-rw-r--r--fs/xfs/xfs_rtalloc.c3
-rw-r--r--fs/xfs/xfs_super.c19
-rw-r--r--fs/xfs/xfs_symlink.c3
-rw-r--r--fs/xfs/xfs_trace.c2
-rw-r--r--fs/xfs/xfs_trans.c2
-rw-r--r--fs/xfs/xfs_trans_ail.c3
-rw-r--r--fs/xfs/xfs_trans_buf.c137
-rw-r--r--fs/xfs/xfs_trans_dquot.c2
-rw-r--r--fs/xfs/xfs_trans_extfree.c3
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_xattr.c2
-rw-r--r--include/asm-generic/pgtable.h11
-rw-r--r--include/dt-bindings/dma/at91.h25
-rw-r--r--include/dt-bindings/interrupt-controller/mips-gic.h9
-rw-r--r--include/linux/amba/bus.h10
-rw-r--r--include/linux/cgroup.h4
-rw-r--r--include/linux/cpuset.h37
-rw-r--r--include/linux/device.h5
-rw-r--r--include/linux/dmaengine.h3
-rw-r--r--include/linux/dmar.h50
-rw-r--r--include/linux/elf.h5
-rw-r--r--include/linux/fence.h4
-rw-r--r--include/linux/hid.h43
-rw-r--r--include/linux/hugetlb.h46
-rw-r--r--include/linux/i2c.h2
-rw-r--r--include/linux/iommu.h3
-rw-r--r--include/linux/ipmi.h6
-rw-r--r--include/linux/ipmi_smi.h10
-rw-r--r--include/linux/irqchip/mips-gic.h (renamed from arch/mips/include/asm/gic.h)267
-rw-r--r--include/linux/kprobes.h1
-rw-r--r--include/linux/libata.h6
-rw-r--r--include/linux/list.h34
-rw-r--r--include/linux/mm.h11
-rw-r--r--include/linux/mod_devicetable.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/percpu-refcount.h4
-rw-r--r--include/linux/platform_data/dma-imx.h1
-rw-r--r--include/linux/plist.h10
-rw-r--r--include/linux/pm_runtime.h6
-rw-r--r--include/linux/pstore_ram.h4
-rw-r--r--include/linux/rculist.h8
-rw-r--r--include/scsi/libsas.h11
-rw-r--r--include/scsi/scsi_host.h2
-rw-r--r--include/trace/events/ext4.h17
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/xen/interface/features.h3
-rw-r--r--include/xen/interface/grant_table.h19
-rw-r--r--kernel/cgroup.c175
-rw-r--r--kernel/cpuset.c162
-rw-r--r--kernel/irq_work.c4
-rw-r--r--kernel/kprobes.c18
-rw-r--r--kernel/sys_ni.c2
-rw-r--r--kernel/time/clocksource.c2
-rw-r--r--kernel/time/tick-sched.c2
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/workqueue.c30
-rw-r--r--mm/gup.c81
-rw-r--r--mm/huge_memory.c5
-rw-r--r--mm/hugetlb.c2
-rw-r--r--mm/memory.c2
-rw-r--r--mm/oom_kill.c2
-rw-r--r--mm/page_alloc.c6
-rw-r--r--mm/percpu.c2
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slub.c3
-rw-r--r--mm/vmscan.c5
-rw-r--r--mm/zbud.c2
-rw-r--r--mm/zswap.c2
-rw-r--r--scripts/kconfig/list.h6
-rw-r--r--scripts/recordmcount.c2
-rwxr-xr-xscripts/recordmcount.pl2
-rw-r--r--tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c2
-rw-r--r--tools/usb/usbip/libsrc/list.h2
1036 files changed, 32375 insertions, 14409 deletions
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index 3171822c22a5..618a33c940df 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -392,7 +392,12 @@ The goto statement comes in handy when a function exits from multiple
392locations and some common work such as cleanup has to be done. If there is no 392locations and some common work such as cleanup has to be done. If there is no
393cleanup needed then just return directly. 393cleanup needed then just return directly.
394 394
395The rationale is: 395Choose label names which say what the goto does or why the goto exists. An
396example of a good name could be "out_buffer:" if the goto frees "buffer". Avoid
397using GW-BASIC names like "err1:" and "err2:". Also don't name them after the
398goto location like "err_kmalloc_failed:"
399
400The rationale for using gotos is:
396 401
397- unconditional statements are easier to understand and follow 402- unconditional statements are easier to understand and follow
398- nesting is reduced 403- nesting is reduced
@@ -403,9 +408,10 @@ The rationale is:
403int fun(int a) 408int fun(int a)
404{ 409{
405 int result = 0; 410 int result = 0;
406 char *buffer = kmalloc(SIZE); 411 char *buffer;
407 412
408 if (buffer == NULL) 413 buffer = kmalloc(SIZE, GFP_KERNEL);
414 if (!buffer)
409 return -ENOMEM; 415 return -ENOMEM;
410 416
411 if (condition1) { 417 if (condition1) {
@@ -413,14 +419,25 @@ int fun(int a)
413 ... 419 ...
414 } 420 }
415 result = 1; 421 result = 1;
416 goto out; 422 goto out_buffer;
417 } 423 }
418 ... 424 ...
419out: 425out_buffer:
420 kfree(buffer); 426 kfree(buffer);
421 return result; 427 return result;
422} 428}
423 429
430A common type of bug to be aware of it "one err bugs" which look like this:
431
432err:
433 kfree(foo->bar);
434 kfree(foo);
435 return ret;
436
437The bug in this code is that on some exit paths "foo" is NULL. Normally the
438fix for this is to split it up into two error labels "err_bar:" and "err_foo:".
439
440
424 Chapter 8: Commenting 441 Chapter 8: Commenting
425 442
426Comments are good, but there is also a danger of over-commenting. NEVER 443Comments are good, but there is also a danger of over-commenting. NEVER
@@ -845,6 +862,49 @@ next instruction in the assembly output:
845 : /* outputs */ : /* inputs */ : /* clobbers */); 862 : /* outputs */ : /* inputs */ : /* clobbers */);
846 863
847 864
865 Chapter 20: Conditional Compilation
866
867Wherever possible, don't use preprocessor conditionals (#if, #ifdef) in .c
868files; doing so makes code harder to read and logic harder to follow. Instead,
869use such conditionals in a header file defining functions for use in those .c
870files, providing no-op stub versions in the #else case, and then call those
871functions unconditionally from .c files. The compiler will avoid generating
872any code for the stub calls, producing identical results, but the logic will
873remain easy to follow.
874
875Prefer to compile out entire functions, rather than portions of functions or
876portions of expressions. Rather than putting an ifdef in an expression, factor
877out part or all of the expression into a separate helper function and apply the
878conditional to that function.
879
880If you have a function or variable which may potentially go unused in a
881particular configuration, and the compiler would warn about its definition
882going unused, mark the definition as __maybe_unused rather than wrapping it in
883a preprocessor conditional. (However, if a function or variable *always* goes
884unused, delete it.)
885
886Within code, where possible, use the IS_ENABLED macro to convert a Kconfig
887symbol into a C boolean expression, and use it in a normal C conditional:
888
889 if (IS_ENABLED(CONFIG_SOMETHING)) {
890 ...
891 }
892
893The compiler will constant-fold the conditional away, and include or exclude
894the block of code just as with an #ifdef, so this will not add any runtime
895overhead. However, this approach still allows the C compiler to see the code
896inside the block, and check it for correctness (syntax, types, symbol
897references, etc). Thus, you still have to use an #ifdef if the code inside the
898block references symbols that will not exist if the condition is not met.
899
900At the end of any non-trivial #if or #ifdef block (more than a few lines),
901place a comment after the #endif on the same line, noting the conditional
902expression used. For instance:
903
904#ifdef CONFIG_SOMETHING
905...
906#endif /* CONFIG_SOMETHING */
907
848 908
849 Appendix I: References 909 Appendix I: References
850 910
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index f13c9132e9f2..653d5d739d7f 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -42,7 +42,13 @@ The driver interface depends on your hardware. If your system
42properly provides the SMBIOS info for IPMI, the driver will detect it 42properly provides the SMBIOS info for IPMI, the driver will detect it
43and just work. If you have a board with a standard interface (These 43and just work. If you have a board with a standard interface (These
44will generally be either "KCS", "SMIC", or "BT", consult your hardware 44will generally be either "KCS", "SMIC", or "BT", consult your hardware
45manual), choose the 'IPMI SI handler' option. 45manual), choose the 'IPMI SI handler' option. A driver also exists
46for direct I2C access to the IPMI management controller. Some boards
47support this, but it is unknown if it will work on every board. For
48this, choose 'IPMI SMBus handler', but be ready to try to do some
49figuring to see if it will work on your system if the SMBIOS/APCI
50information is wrong or not present. It is fairly safe to have both
51these enabled and let the drivers auto-detect what is present.
46 52
47You should generally enable ACPI on your system, as systems with IPMI 53You should generally enable ACPI on your system, as systems with IPMI
48can have ACPI tables describing them. 54can have ACPI tables describing them.
@@ -52,7 +58,8 @@ their job correctly, the IPMI controller should be automatically
52detected (via ACPI or SMBIOS tables) and should just work. Sadly, 58detected (via ACPI or SMBIOS tables) and should just work. Sadly,
53many boards do not have this information. The driver attempts 59many boards do not have this information. The driver attempts
54standard defaults, but they may not work. If you fall into this 60standard defaults, but they may not work. If you fall into this
55situation, you need to read the section below named 'The SI Driver'. 61situation, you need to read the section below named 'The SI Driver' or
62"The SMBus Driver" on how to hand-configure your system.
56 63
57IPMI defines a standard watchdog timer. You can enable this with the 64IPMI defines a standard watchdog timer. You can enable this with the
58'IPMI Watchdog Timer' config option. If you compile the driver into 65'IPMI Watchdog Timer' config option. If you compile the driver into
@@ -97,7 +104,12 @@ driver, each open file for this device ties in to the message handler
97as an IPMI user. 104as an IPMI user.
98 105
99ipmi_si - A driver for various system interfaces. This supports KCS, 106ipmi_si - A driver for various system interfaces. This supports KCS,
100SMIC, and BT interfaces. 107SMIC, and BT interfaces. Unless you have an SMBus interface or your
108own custom interface, you probably need to use this.
109
110ipmi_ssif - A driver for accessing BMCs on the SMBus. It uses the
111I2C kernel driver's SMBus interfaces to send and receive IPMI messages
112over the SMBus.
101 113
102ipmi_watchdog - IPMI requires systems to have a very capable watchdog 114ipmi_watchdog - IPMI requires systems to have a very capable watchdog
103timer. This driver implements the standard Linux watchdog timer 115timer. This driver implements the standard Linux watchdog timer
@@ -476,6 +488,62 @@ for specifying an interface. Note that when removing an interface,
476only the first three parameters (si type, address type, and address) 488only the first three parameters (si type, address type, and address)
477are used for the comparison. Any options are ignored for removing. 489are used for the comparison. Any options are ignored for removing.
478 490
491The SMBus Driver (SSIF)
492-----------------------
493
494The SMBus driver allows up to 4 SMBus devices to be configured in the
495system. By default, the driver will only register with something it
496finds in DMI or ACPI tables. You can change this
497at module load time (for a module) with:
498
499 modprobe ipmi_ssif.o
500 addr=<i2caddr1>[,<i2caddr2>[,...]]
501 adapter=<adapter1>[,<adapter2>[...]]
502 dbg=<flags1>,<flags2>...
503 slave_addrs=<addr1>,<addr2>,...
504 [dbg_probe=1]
505
506The addresses are normal I2C addresses. The adapter is the string
507name of the adapter, as shown in /sys/class/i2c-adapter/i2c-<n>/name.
508It is *NOT* i2c-<n> itself.
509
510The debug flags are bit flags for each BMC found, they are:
511IPMI messages: 1, driver state: 2, timing: 4, I2C probe: 8
512
513Setting dbg_probe to 1 will enable debugging of the probing and
514detection process for BMCs on the SMBusses.
515
516The slave_addrs specifies the IPMI address of the local BMC. This is
517usually 0x20 and the driver defaults to that, but in case it's not, it
518can be specified when the driver starts up.
519
520Discovering the IPMI compliant BMC on the SMBus can cause devices on
521the I2C bus to fail. The SMBus driver writes a "Get Device ID" IPMI
522message as a block write to the I2C bus and waits for a response.
523This action can be detrimental to some I2C devices. It is highly
524recommended that the known I2C address be given to the SMBus driver in
525the smb_addr parameter unless you have DMI or ACPI data to tell the
526driver what to use.
527
528When compiled into the kernel, the addresses can be specified on the
529kernel command line as:
530
531 ipmb_ssif.addr=<i2caddr1>[,<i2caddr2>[...]]
532 ipmi_ssif.adapter=<adapter1>[,<adapter2>[...]]
533 ipmi_ssif.dbg=<flags1>[,<flags2>[...]]
534 ipmi_ssif.dbg_probe=1
535 ipmi_ssif.slave_addrs=<addr1>[,<addr2>[...]]
536
537These are the same options as on the module command line.
538
539The I2C driver does not support non-blocking access or polling, so
540this driver cannod to IPMI panic events, extend the watchdog at panic
541time, or other panic-related IPMI functions without special kernel
542patches and driver modifications. You can get those at the openipmi
543web page.
544
545The driver supports a hot add and remove of interfaces through the I2C
546sysfs interface.
479 547
480Other Pieces 548Other Pieces
481------------ 549------------
diff --git a/Documentation/arm/memory.txt b/Documentation/arm/memory.txt
index 38dc06d0a791..4178ebda6e66 100644
--- a/Documentation/arm/memory.txt
+++ b/Documentation/arm/memory.txt
@@ -41,7 +41,7 @@ fffe8000 fffeffff DTCM mapping area for platforms with
41fffe0000 fffe7fff ITCM mapping area for platforms with 41fffe0000 fffe7fff ITCM mapping area for platforms with
42 ITCM mounted inside the CPU. 42 ITCM mounted inside the CPU.
43 43
44ffc00000 ffdfffff Fixmap mapping region. Addresses provided 44ffc00000 ffefffff Fixmap mapping region. Addresses provided
45 by fix_to_virt() will be located here. 45 by fix_to_virt() will be located here.
46 46
47fee00000 feffffff Mapping of PCI I/O space. This is a static 47fee00000 feffffff Mapping of PCI I/O space. This is a static
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt
index 10c949b293e4..f935fac1e73b 100644
--- a/Documentation/cgroups/cgroups.txt
+++ b/Documentation/cgroups/cgroups.txt
@@ -312,10 +312,10 @@ the "cpuset" cgroup subsystem, the steps are something like:
312 2) mkdir /sys/fs/cgroup/cpuset 312 2) mkdir /sys/fs/cgroup/cpuset
313 3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset 313 3) mount -t cgroup -ocpuset cpuset /sys/fs/cgroup/cpuset
314 4) Create the new cgroup by doing mkdir's and write's (or echo's) in 314 4) Create the new cgroup by doing mkdir's and write's (or echo's) in
315 the /sys/fs/cgroup virtual file system. 315 the /sys/fs/cgroup/cpuset virtual file system.
316 5) Start a task that will be the "founding father" of the new job. 316 5) Start a task that will be the "founding father" of the new job.
317 6) Attach that task to the new cgroup by writing its PID to the 317 6) Attach that task to the new cgroup by writing its PID to the
318 /sys/fs/cgroup/cpuset/tasks file for that cgroup. 318 /sys/fs/cgroup/cpuset tasks file for that cgroup.
319 7) fork, exec or clone the job tasks from this founding father task. 319 7) fork, exec or clone the job tasks from this founding father task.
320 320
321For example, the following sequence of commands will setup a cgroup 321For example, the following sequence of commands will setup a cgroup
diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt
index 80ae87a0784b..2493a5a31655 100644
--- a/Documentation/devicetree/bindings/ata/sata_rcar.txt
+++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt
@@ -3,18 +3,21 @@
3Required properties: 3Required properties:
4- compatible : should contain one of the following: 4- compatible : should contain one of the following:
5 - "renesas,sata-r8a7779" for R-Car H1 5 - "renesas,sata-r8a7779" for R-Car H1
6 ("renesas,rcar-sata" is deprecated)
6 - "renesas,sata-r8a7790-es1" for R-Car H2 ES1 7 - "renesas,sata-r8a7790-es1" for R-Car H2 ES1
7 - "renesas,sata-r8a7790" for R-Car H2 other than ES1 8 - "renesas,sata-r8a7790" for R-Car H2 other than ES1
8 - "renesas,sata-r8a7791" for R-Car M2-W 9 - "renesas,sata-r8a7791" for R-Car M2-W
9 - "renesas,sata-r8a7793" for R-Car M2-N 10 - "renesas,sata-r8a7793" for R-Car M2-N
10- reg : address and length of the SATA registers; 11- reg : address and length of the SATA registers;
11- interrupts : must consist of one interrupt specifier. 12- interrupts : must consist of one interrupt specifier.
13- clocks : must contain a reference to the functional clock.
12 14
13Example: 15Example:
14 16
15sata: sata@fc600000 { 17sata0: sata@ee300000 {
16 compatible = "renesas,sata-r8a7779"; 18 compatible = "renesas,sata-r8a7791";
17 reg = <0xfc600000 0x2000>; 19 reg = <0 0xee300000 0 0x2000>;
18 interrupt-parent = <&gic>; 20 interrupt-parent = <&gic>;
19 interrupts = <0 100 IRQ_TYPE_LEVEL_HIGH>; 21 interrupts = <0 105 IRQ_TYPE_LEVEL_HIGH>;
22 clocks = <&mstp8_clks R8A7791_CLK_SATA0>;
20}; 23};
diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
index 5666812fc42b..266ff9d23229 100644
--- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt
+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt
@@ -62,6 +62,8 @@ Required properties:
62 It takes parent's clock-frequency as its clock. 62 It takes parent's clock-frequency as its clock.
63 * "fsl,qoriq-sysclk-2.0": for input system clock (v2.0). 63 * "fsl,qoriq-sysclk-2.0": for input system clock (v2.0).
64 It takes parent's clock-frequency as its clock. 64 It takes parent's clock-frequency as its clock.
65 * "fsl,qoriq-platform-pll-1.0" for the platform PLL clock (v1.0)
66 * "fsl,qoriq-platform-pll-2.0" for the platform PLL clock (v2.0)
65- #clock-cells: From common clock binding. The number of cells in a 67- #clock-cells: From common clock binding. The number of cells in a
66 clock-specifier. Should be <0> for "fsl,qoriq-sysclk-[1,2].0" 68 clock-specifier. Should be <0> for "fsl,qoriq-sysclk-[1,2].0"
67 clocks, or <1> for "fsl,qoriq-core-pll-[1,2].0" clocks. 69 clocks, or <1> for "fsl,qoriq-core-pll-[1,2].0" clocks.
@@ -128,8 +130,16 @@ Example for clock block and clock provider:
128 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2"; 130 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
129 clock-output-names = "cmux1"; 131 clock-output-names = "cmux1";
130 }; 132 };
133
134 platform-pll: platform-pll@c00 {
135 #clock-cells = <1>;
136 reg = <0xc00 0x4>;
137 compatible = "fsl,qoriq-platform-pll-1.0";
138 clocks = <&sysclk>;
139 clock-output-names = "platform-pll", "platform-pll-div2";
140 };
131 }; 141 };
132 } 142};
133 143
134Example for clock consumer: 144Example for clock consumer:
135 145
@@ -139,4 +149,4 @@ Example for clock consumer:
139 clocks = <&mux0>; 149 clocks = <&mux0>;
140 ... 150 ...
141 }; 151 };
142 } 152};
diff --git a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
index 1d3ace088172..b7ee5c7e0f75 100644
--- a/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
+++ b/Documentation/devicetree/bindings/clock/st/st,flexgen.txt
@@ -11,7 +11,7 @@ Please find an example below:
11 11
12 Clockgen block diagram 12 Clockgen block diagram
13 ------------------------------------------------------------------- 13 -------------------------------------------------------------------
14 | Flexgen stucture | 14 | Flexgen structure |
15 | --------------------------------------------- | 15 | --------------------------------------------- |
16 | | ------- -------- -------- | | 16 | | ------- -------- -------- | |
17clk_sysin | | | | | | | | | 17clk_sysin | | | | | | | | |
diff --git a/Documentation/devicetree/bindings/dma/atmel-xdma.txt b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
new file mode 100644
index 000000000000..0eb2b3207e08
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/atmel-xdma.txt
@@ -0,0 +1,54 @@
1* Atmel Extensible Direct Memory Access Controller (XDMAC)
2
3* XDMA Controller
4Required properties:
5- compatible: Should be "atmel,<chip>-dma".
6 <chip> compatible description:
7 - sama5d4: first SoC adding the XDMAC
8- reg: Should contain DMA registers location and length.
9- interrupts: Should contain DMA interrupt.
10- #dma-cells: Must be <1>, used to represent the number of integer cells in
11the dmas property of client devices.
12 - The 1st cell specifies the channel configuration register:
13 - bit 13: SIF, source interface identifier, used to get the memory
14 interface identifier,
15 - bit 14: DIF, destination interface identifier, used to get the peripheral
16 interface identifier,
17 - bit 30-24: PERID, peripheral identifier.
18
19Example:
20
21dma1: dma-controller@f0004000 {
22 compatible = "atmel,sama5d4-dma";
23 reg = <0xf0004000 0x200>;
24 interrupts = <50 4 0>;
25 #dma-cells = <1>;
26};
27
28
29* DMA clients
30DMA clients connected to the Atmel XDMA controller must use the format
31described in the dma.txt file, using a one-cell specifier for each channel.
32The two cells in order are:
331. A phandle pointing to the DMA controller.
342. Channel configuration register. Configurable fields are:
35 - bit 13: SIF, source interface identifier, used to get the memory
36 interface identifier,
37 - bit 14: DIF, destination interface identifier, used to get the peripheral
38 interface identifier,
39 - bit 30-24: PERID, peripheral identifier.
40
41Example:
42
43i2c2: i2c@f8024000 {
44 compatible = "atmel,at91sam9x5-i2c";
45 reg = <0xf8024000 0x4000>;
46 interrupts = <34 4 6>;
47 dmas = <&dma1
48 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
49 | AT91_XDMAC_DT_PERID(6))>,
50 <&dma1
51 (AT91_XDMAC_DT_MEM_IF(0) | AT91_XDMAC_DT_PER_IF(1)
52 | AT91_XDMAC_DT_PERID(7))>;
53 dma-names = "tx", "rx";
54};
diff --git a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
index 4659fd952301..dc8d3aac1aa9 100644
--- a/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
+++ b/Documentation/devicetree/bindings/dma/fsl-imx-sdma.txt
@@ -48,6 +48,7 @@ The full ID of peripheral types can be found below.
48 21 ESAI 48 21 ESAI
49 22 SSI Dual FIFO (needs firmware ver >= 2) 49 22 SSI Dual FIFO (needs firmware ver >= 2)
50 23 Shared ASRC 50 23 Shared ASRC
51 24 SAI
51 52
52The third cell specifies the transfer priority as below. 53The third cell specifies the transfer priority as below.
53 54
diff --git a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
index d75a9d767022..f8c3311b7153 100644
--- a/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
+++ b/Documentation/devicetree/bindings/dma/qcom_bam_dma.txt
@@ -1,7 +1,9 @@
1QCOM BAM DMA controller 1QCOM BAM DMA controller
2 2
3Required properties: 3Required properties:
4- compatible: must contain "qcom,bam-v1.4.0" for MSM8974 4- compatible: must be one of the following:
5 * "qcom,bam-v1.4.0" for MSM8974, APQ8074 and APQ8084
6 * "qcom,bam-v1.3.0" for APQ8064, IPQ8064 and MSM8960
5- reg: Address range for DMA registers 7- reg: Address range for DMA registers
6- interrupts: Should contain the one interrupt shared by all channels 8- interrupts: Should contain the one interrupt shared by all channels
7- #dma-cells: must be <1>, the cell in the dmas property of the client device 9- #dma-cells: must be <1>, the cell in the dmas property of the client device
diff --git a/Documentation/devicetree/bindings/dma/sun6i-dma.txt b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
index 3e145c1675b1..9cdcba24d7c3 100644
--- a/Documentation/devicetree/bindings/dma/sun6i-dma.txt
+++ b/Documentation/devicetree/bindings/dma/sun6i-dma.txt
@@ -4,7 +4,7 @@ This driver follows the generic DMA bindings defined in dma.txt.
4 4
5Required properties: 5Required properties:
6 6
7- compatible: Must be "allwinner,sun6i-a31-dma" 7- compatible: Must be "allwinner,sun6i-a31-dma" or "allwinner,sun8i-a23-dma"
8- reg: Should contain the registers base address and length 8- reg: Should contain the registers base address and length
9- interrupts: Should contain a reference to the interrupt used by this device 9- interrupts: Should contain a reference to the interrupt used by this device
10- clocks: Should contain a reference to the parent AHB clock 10- clocks: Should contain a reference to the parent AHB clock
diff --git a/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
new file mode 100644
index 000000000000..5a65478e5d40
--- /dev/null
+++ b/Documentation/devicetree/bindings/interrupt-controller/mips-gic.txt
@@ -0,0 +1,55 @@
1MIPS Global Interrupt Controller (GIC)
2
3The MIPS GIC routes external interrupts to individual VPEs and IRQ pins.
4It also supports local (per-processor) interrupts and software-generated
5interrupts which can be used as IPIs. The GIC also includes a free-running
6global timer, per-CPU count/compare timers, and a watchdog.
7
8Required properties:
9- compatible : Should be "mti,gic".
10- interrupt-controller : Identifies the node as an interrupt controller
11- #interrupt-cells : Specifies the number of cells needed to encode an
12 interrupt specifier. Should be 3.
13 - The first cell is the type of interrupt, local or shared.
14 See <include/dt-bindings/interrupt-controller/mips-gic.h>.
15 - The second cell is the GIC interrupt number.
16 - The third cell encodes the interrupt flags.
17 See <include/dt-bindings/interrupt-controller/irq.h> for a list of valid
18 flags.
19
20Optional properties:
21- reg : Base address and length of the GIC registers. If not present,
22 the base address reported by the hardware GCR_GIC_BASE will be used.
23- mti,reserved-cpu-vectors : Specifies the list of CPU interrupt vectors
24 to which the GIC may not route interrupts. Valid values are 2 - 7.
25 This property is ignored if the CPU is started in EIC mode.
26
27Required properties for timer sub-node:
28- compatible : Should be "mti,gic-timer".
29- interrupts : Interrupt for the GIC local timer.
30- clock-frequency : Clock frequency at which the GIC timers operate.
31
32Example:
33
34 gic: interrupt-controller@1bdc0000 {
35 compatible = "mti,gic";
36 reg = <0x1bdc0000 0x20000>;
37
38 interrupt-controller;
39 #interrupt-cells = <3>;
40
41 mti,reserved-cpu-vectors = <7>;
42
43 timer {
44 compatible = "mti,gic-timer";
45 interrupts = <GIC_LOCAL 1 IRQ_TYPE_NONE>;
46 clock-frequency = <50000000>;
47 };
48 };
49
50 uart@18101400 {
51 ...
52 interrupt-parent = <&gic>;
53 interrupts = <GIC_SHARED 24 IRQ_TYPE_LEVEL_HIGH>;
54 ...
55 };
diff --git a/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
new file mode 100644
index 000000000000..9a55ac3735e5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iommu/rockchip,iommu.txt
@@ -0,0 +1,26 @@
1Rockchip IOMMU
2==============
3
4A Rockchip DRM iommu translates io virtual addresses to physical addresses for
5its master device. Each slave device is bound to a single master device, and
6shares its clocks, power domain and irq.
7
8Required properties:
9- compatible : Should be "rockchip,iommu"
10- reg : Address space for the configuration registers
11- interrupts : Interrupt specifier for the IOMMU instance
12- interrupt-names : Interrupt name for the IOMMU instance
13- #iommu-cells : Should be <0>. This indicates the iommu is a
14 "single-master" device, and needs no additional information
15 to associate with its master device. See:
16 Documentation/devicetree/bindings/iommu/iommu.txt
17
18Example:
19
20 vopl_mmu: iommu@ff940300 {
21 compatible = "rockchip,iommu";
22 reg = <0xff940300 0x100>;
23 interrupts = <GIC_SPI 16 IRQ_TYPE_LEVEL_HIGH>;
24 interrupt-names = "vopl_mmu";
25 #iommu-cells = <0>;
26 };
diff --git a/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt b/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt
new file mode 100644
index 000000000000..d4e0141d3620
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/bcm3384-intc.txt
@@ -0,0 +1,37 @@
1* Interrupt Controller
2
3Properties:
4- compatible: "brcm,bcm3384-intc"
5
6 Compatibility with BCM3384 and possibly other BCM33xx/BCM63xx SoCs.
7
8- reg: Address/length pairs for each mask/status register set. Length must
9 be 8. If multiple register sets are specified, the first set will
10 handle IRQ offsets 0..31, the second set 32..63, and so on.
11
12- interrupt-controller: This is an interrupt controller.
13
14- #interrupt-cells: Must be <1>. Just a simple IRQ offset; no level/edge
15 or polarity configuration is possible with this controller.
16
17- interrupt-parent: This controller is cascaded from a MIPS CPU HW IRQ, or
18 from another INTC.
19
20- interrupts: The IRQ on the parent controller.
21
22Example:
23 periph_intc: periph_intc@14e00038 {
24 compatible = "brcm,bcm3384-intc";
25
26 /*
27 * IRQs 0..31: mask reg 0x14e00038, status reg 0x14e0003c
28 * IRQs 32..63: mask reg 0x14e00340, status reg 0x14e00344
29 */
30 reg = <0x14e00038 0x8 0x14e00340 0x8>;
31
32 interrupt-controller;
33 #interrupt-cells = <1>;
34
35 interrupt-parent = <&cpu_intc>;
36 interrupts = <4>;
37 };
diff --git a/Documentation/devicetree/bindings/mips/brcm/bmips.txt b/Documentation/devicetree/bindings/mips/brcm/bmips.txt
new file mode 100644
index 000000000000..8ef71b4085ca
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/bmips.txt
@@ -0,0 +1,8 @@
1* Broadcom MIPS (BMIPS) CPUs
2
3Required properties:
4- compatible: "brcm,bmips3300", "brcm,bmips4350", "brcm,bmips4380",
5 "brcm,bmips5000"
6
7- mips-hpt-frequency: This is common to all CPUs in the system so it lives
8 under the "cpus" node.
diff --git a/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt b/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt
new file mode 100644
index 000000000000..8a139cb3c0b5
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/cm-dsl.txt
@@ -0,0 +1,11 @@
1* Broadcom cable/DSL platforms
2
3SoCs:
4
5Required properties:
6- compatible: "brcm,bcm3384", "brcm,bcm33843"
7
8Boards:
9
10Required properties:
11- compatible: "brcm,bcm93384wvg"
diff --git a/Documentation/devicetree/bindings/mips/brcm/usb.txt b/Documentation/devicetree/bindings/mips/brcm/usb.txt
new file mode 100644
index 000000000000..452c45c7bf29
--- /dev/null
+++ b/Documentation/devicetree/bindings/mips/brcm/usb.txt
@@ -0,0 +1,11 @@
1* Broadcom USB controllers
2
3Required properties:
4- compatible: "brcm,bcm3384-ohci", "brcm,bcm3384-ehci"
5
6 These currently use the generic-ohci and generic-ehci drivers. On some
7 systems, special handling may be needed in the following cases:
8
9 - Restoring state after systemwide power save modes
10 - Sharing PHYs with the USBD (UDC) hardware
11 - Figuring out which controllers are disabled on ASIC bondout variants
diff --git a/Documentation/devicetree/bindings/mips/cpu_irq.txt b/Documentation/devicetree/bindings/mips/cpu_irq.txt
index 13aa4b62c62a..fc149f326dae 100644
--- a/Documentation/devicetree/bindings/mips/cpu_irq.txt
+++ b/Documentation/devicetree/bindings/mips/cpu_irq.txt
@@ -1,6 +1,6 @@
1MIPS CPU interrupt controller 1MIPS CPU interrupt controller
2 2
3On MIPS the mips_cpu_intc_init() helper can be used to initialize the 8 CPU 3On MIPS the mips_cpu_irq_of_init() helper can be used to initialize the 8 CPU
4IRQs from a devicetree file and create a irq_domain for IRQ controller. 4IRQs from a devicetree file and create a irq_domain for IRQ controller.
5 5
6With the irq_domain in place we can describe how the 8 IRQs are wired to the 6With the irq_domain in place we can describe how the 8 IRQs are wired to the
@@ -36,7 +36,7 @@ Example devicetree:
36 36
37Example platform irq.c: 37Example platform irq.c:
38static struct of_device_id __initdata of_irq_ids[] = { 38static struct of_device_id __initdata of_irq_ids[] = {
39 { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init }, 39 { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_irq_of_init },
40 { .compatible = "ralink,rt2880-intc", .data = intc_of_init }, 40 { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
41 {}, 41 {},
42}; 42};
diff --git a/Documentation/devicetree/bindings/powerpc/fsl/fman.txt b/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
new file mode 100644
index 000000000000..edeea160ca39
--- /dev/null
+++ b/Documentation/devicetree/bindings/powerpc/fsl/fman.txt
@@ -0,0 +1,534 @@
1=============================================================================
2Freescale Frame Manager Device Bindings
3
4CONTENTS
5 - FMan Node
6 - FMan Port Node
7 - FMan MURAM Node
8 - FMan dTSEC/XGEC/mEMAC Node
9 - FMan IEEE 1588 Node
10 - Example
11
12=============================================================================
13FMan Node
14
15DESCRIPTION
16
17Due to the fact that the FMan is an aggregation of sub-engines (ports, MACs,
18etc.) the FMan node will have child nodes for each of them.
19
20PROPERTIES
21
22- compatible
23 Usage: required
24 Value type: <stringlist>
25 Definition: Must include "fsl,fman"
26 FMan version can be determined via FM_IP_REV_1 register in the
27 FMan block. The offset is 0xc4 from the beginning of the
28 Frame Processing Manager memory map (0xc3000 from the
29 beginning of the FMan node).
30
31- cell-index
32 Usage: required
33 Value type: <u32>
34 Definition: Specifies the index of the FMan unit.
35
36 The cell-index value may be used by the SoC, to identify the
37 FMan unit in the SoC memory map. In the table bellow,
38 there's a description of the cell-index use in each SoC:
39
40 - P1023:
41 register[bit] FMan unit cell-index
42 ============================================================
43 DEVDISR[1] 1 0
44
45 - P2041, P3041, P4080 P5020, P5040:
46 register[bit] FMan unit cell-index
47 ============================================================
48 DCFG_DEVDISR2[6] 1 0
49 DCFG_DEVDISR2[14] 2 1
50 (Second FM available only in P4080 and P5040)
51
52 - B4860, T1040, T2080, T4240:
53 register[bit] FMan unit cell-index
54 ============================================================
55 DCFG_CCSR_DEVDISR2[24] 1 0
56 DCFG_CCSR_DEVDISR2[25] 2 1
57 (Second FM available only in T4240)
58
59 DEVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in
60 the specific SoC "Device Configuration/Pin Control" Memory
61 Map.
62
63- reg
64 Usage: required
65 Value type: <prop-encoded-array>
66 Definition: A standard property. Specifies the offset of the
67 following configuration registers:
68 - BMI configuration registers.
69 - QMI configuration registers.
70 - DMA configuration registers.
71 - FPM configuration registers.
72 - FMan controller configuration registers.
73
74- ranges
75 Usage: required
76 Value type: <prop-encoded-array>
77 Definition: A standard property.
78
79- clocks
80 Usage: required
81 Value type: <prop-encoded-array>
82 Definition: phandle for the fman input clock.
83
84- clock-names
85 usage: required
86 Value type: <stringlist>
87 Definition: "fmanclk" for the fman input clock.
88
89- interrupts
90 Usage: required
91 Value type: <prop-encoded-array>
92 Definition: A pair of IRQs are specified in this property.
93 The first element is associated with the event interrupts and
94 the second element is associated with the error interrupts.
95
96- fsl,qman-channel-range
97 Usage: required
98 Value type: <prop-encoded-array>
99 Definition: Specifies the range of the available dedicated
100 channels in the FMan. The first cell specifies the beginning
101 of the range and the second cell specifies the number of
102 channels.
103 Further information available at:
104 "Work Queue (WQ) Channel Assignments in the QMan" section
105 in DPAA Reference Manual.
106
107- fsl,qman
108- fsl,bman
109 Usage: required
110 Definition: See soc/fsl/qman.txt and soc/fsl/bman.txt
111
112=============================================================================
113FMan MURAM Node
114
115DESCRIPTION
116
117FMan Internal memory - shared between all the FMan modules.
118It contains data structures that are common and written to or read by
119the modules.
120FMan internal memory is split into the following parts:
121 Packet buffering (Tx/Rx FIFOs)
122 Frames internal context
123
124PROPERTIES
125
126- compatible
127 Usage: required
128 Value type: <stringlist>
129 Definition: Must include "fsl,fman-muram"
130
131- ranges
132 Usage: required
133 Value type: <prop-encoded-array>
134 Definition: A standard property.
135 Specifies the multi-user memory offset and the size within
136 the FMan.
137
138EXAMPLE
139
140muram@0 {
141 compatible = "fsl,fman-muram";
142 ranges = <0 0x000000 0x28000>;
143};
144
145=============================================================================
146FMan Port Node
147
148DESCRIPTION
149
150The Frame Manager (FMan) supports several types of hardware ports:
151 Ethernet receiver (RX)
152 Ethernet transmitter (TX)
153 Offline/Host command (O/H)
154
155PROPERTIES
156
157- compatible
158 Usage: required
159 Value type: <stringlist>
160 Definition: A standard property.
161 Must include one of the following:
162 - "fsl,fman-v2-port-oh" for FManV2 OH ports
163 - "fsl,fman-v2-port-rx" for FManV2 RX ports
164 - "fsl,fman-v2-port-tx" for FManV2 TX ports
165 - "fsl,fman-v3-port-oh" for FManV3 OH ports
166 - "fsl,fman-v3-port-rx" for FManV3 RX ports
167 - "fsl,fman-v3-port-tx" for FManV3 TX ports
168
169- cell-index
170 Usage: required
171 Value type: <u32>
172 Definition: Specifies the hardware port id.
173 Each hardware port on the FMan has its own hardware PortID.
174 Super set of all hardware Port IDs available at FMan Reference
175 Manual under "FMan Hardware Ports in Freescale Devices" table.
176
177 Each hardware port is assigned a 4KB, port-specific page in
178 the FMan hardware port memory region (which is part of the
179 FMan memory map). The first 4 KB in the FMan hardware ports
180 memory region is used for what are called common registers.
181 The subsequent 63 4KB pages are allocated to the hardware
182 ports.
183 The page of a specific port is determined by the cell-index.
184
185- reg
186 Usage: required
187 Value type: <prop-encoded-array>
188 Definition: There is one reg region describing the port
189 configuration registers.
190
191EXAMPLE
192
193port@a8000 {
194 cell-index = <0x28>;
195 compatible = "fsl,fman-v2-port-tx";
196 reg = <0xa8000 0x1000>;
197};
198
199port@88000 {
200 cell-index = <0x8>;
201 compatible = "fsl,fman-v2-port-rx";
202 reg = <0x88000 0x1000>;
203};
204
205port@81000 {
206 cell-index = <0x1>;
207 compatible = "fsl,fman-v2-port-oh";
208 reg = <0x81000 0x1000>;
209};
210
211=============================================================================
212FMan dTSEC/XGEC/mEMAC Node
213
214DESCRIPTION
215
216mEMAC/dTSEC/XGEC are the Ethernet network interfaces
217
218PROPERTIES
219
220- compatible
221 Usage: required
222 Value type: <stringlist>
223 Definition: A standard property.
224 Must include one of the following:
225 - "fsl,fman-dtsec" for dTSEC MAC
226 - "fsl,fman-xgec" for XGEC MAC
227 - "fsl,fman-memac for mEMAC MAC
228
229- cell-index
230 Usage: required
231 Value type: <u32>
232 Definition: Specifies the MAC id.
233
234 The cell-index value may be used by the FMan or the SoC, to
235 identify the MAC unit in the FMan (or SoC) memory map.
236 In the tables bellow there's a description of the cell-index
237 use, there are two tables, one describes the use of cell-index
238 by the FMan, the second describes the use by the SoC:
239
240 1. FMan Registers
241
242 FManV2:
243 register[bit] MAC cell-index
244 ============================================================
245 FM_EPI[16] XGEC 8
246 FM_EPI[16+n] dTSECn n-1
247 FM_NPI[11+n] dTSECn n-1
248 n = 1,..,5
249
250 FManV3:
251 register[bit] MAC cell-index
252 ============================================================
253 FM_EPI[16+n] mEMACn n-1
254 FM_EPI[25] mEMAC10 9
255
256 FM_NPI[11+n] mEMACn n-1
257 FM_NPI[10] mEMAC10 9
258 FM_NPI[11] mEMAC9 8
259 n = 1,..8
260
261 FM_EPI and FM_NPI are located in the FMan memory map.
262
263 2. SoC registers:
264
265 - P2041, P3041, P4080 P5020, P5040:
266 register[bit] FMan MAC cell
267 Unit index
268 ============================================================
269 DCFG_DEVDISR2[7] 1 XGEC 8
270 DCFG_DEVDISR2[7+n] 1 dTSECn n-1
271 DCFG_DEVDISR2[15] 2 XGEC 8
272 DCFG_DEVDISR2[15+n] 2 dTSECn n-1
273 n = 1,..5
274
275 - T1040, T2080, T4240, B4860:
276 register[bit] FMan MAC cell
277 Unit index
278 ============================================================
279 DCFG_CCSR_DEVDISR2[n-1] 1 mEMACn n-1
280 DCFG_CCSR_DEVDISR2[11+n] 2 mEMACn n-1
281 n = 1,..6,9,10
282
283 EVDISR, DCFG_DEVDISR2 and DCFG_CCSR_DEVDISR2 are located in
284 the specific SoC "Device Configuration/Pin Control" Memory
285 Map.
286
287- reg
288 Usage: required
289 Value type: <prop-encoded-array>
290 Definition: A standard property.
291
292- fsl,fman-ports
293 Usage: required
294 Value type: <prop-encoded-array>
295 Definition: An array of two phandles - the first references is
296 the FMan RX port and the second is the TX port used by this
297 MAC.
298
299- ptp-timer
300 Usage required
301 Value type: <phandle>
302 Definition: A phandle for 1EEE1588 timer.
303
304EXAMPLE
305
306fman1_tx28: port@a8000 {
307 cell-index = <0x28>;
308 compatible = "fsl,fman-v2-port-tx";
309 reg = <0xa8000 0x1000>;
310};
311
312fman1_rx8: port@88000 {
313 cell-index = <0x8>;
314 compatible = "fsl,fman-v2-port-rx";
315 reg = <0x88000 0x1000>;
316};
317
318ptp-timer: ptp_timer@fe000 {
319 compatible = "fsl,fman-ptp-timer";
320 reg = <0xfe000 0x1000>;
321};
322
323ethernet@e0000 {
324 compatible = "fsl,fman-dtsec";
325 cell-index = <0>;
326 reg = <0xe0000 0x1000>;
327 fsl,fman-ports = <&fman1_rx8 &fman1_tx28>;
328 ptp-timer = <&ptp-timer>;
329};
330
331============================================================================
332FMan IEEE 1588 Node
333
334DESCRIPTION
335
336The FMan interface to support IEEE 1588
337
338
339PROPERTIES
340
341- compatible
342 Usage: required
343 Value type: <stringlist>
344 Definition: A standard property.
345 Must include "fsl,fman-ptp-timer".
346
347- reg
348 Usage: required
349 Value type: <prop-encoded-array>
350 Definition: A standard property.
351
352EXAMPLE
353
354ptp-timer@fe000 {
355 compatible = "fsl,fman-ptp-timer";
356 reg = <0xfe000 0x1000>;
357};
358
359=============================================================================
360Example
361
362fman@400000 {
363 #address-cells = <1>;
364 #size-cells = <1>;
365 cell-index = <1>;
366 compatible = "fsl,fman"
367 ranges = <0 0x400000 0x100000>;
368 reg = <0x400000 0x100000>;
369 clocks = <&fman_clk>;
370 clock-names = "fmanclk";
371 interrupts = <
372 96 2 0 0
373 16 2 1 1>;
374 fsl,qman-channel-range = <0x40 0xc>;
375
376 muram@0 {
377 compatible = "fsl,fman-muram";
378 reg = <0x0 0x28000>;
379 };
380
381 port@81000 {
382 cell-index = <1>;
383 compatible = "fsl,fman-v2-port-oh";
384 reg = <0x81000 0x1000>;
385 };
386
387 port@82000 {
388 cell-index = <2>;
389 compatible = "fsl,fman-v2-port-oh";
390 reg = <0x82000 0x1000>;
391 };
392
393 port@83000 {
394 cell-index = <3>;
395 compatible = "fsl,fman-v2-port-oh";
396 reg = <0x83000 0x1000>;
397 };
398
399 port@84000 {
400 cell-index = <4>;
401 compatible = "fsl,fman-v2-port-oh";
402 reg = <0x84000 0x1000>;
403 };
404
405 port@85000 {
406 cell-index = <5>;
407 compatible = "fsl,fman-v2-port-oh";
408 reg = <0x85000 0x1000>;
409 };
410
411 port@86000 {
412 cell-index = <6>;
413 compatible = "fsl,fman-v2-port-oh";
414 reg = <0x86000 0x1000>;
415 };
416
417 fman1_rx_0x8: port@88000 {
418 cell-index = <0x8>;
419 compatible = "fsl,fman-v2-port-rx";
420 reg = <0x88000 0x1000>;
421 };
422
423 fman1_rx_0x9: port@89000 {
424 cell-index = <0x9>;
425 compatible = "fsl,fman-v2-port-rx";
426 reg = <0x89000 0x1000>;
427 };
428
429 fman1_rx_0xa: port@8a000 {
430 cell-index = <0xa>;
431 compatible = "fsl,fman-v2-port-rx";
432 reg = <0x8a000 0x1000>;
433 };
434
435 fman1_rx_0xb: port@8b000 {
436 cell-index = <0xb>;
437 compatible = "fsl,fman-v2-port-rx";
438 reg = <0x8b000 0x1000>;
439 };
440
441 fman1_rx_0xc: port@8c000 {
442 cell-index = <0xc>;
443 compatible = "fsl,fman-v2-port-rx";
444 reg = <0x8c000 0x1000>;
445 };
446
447 fman1_rx_0x10: port@90000 {
448 cell-index = <0x10>;
449 compatible = "fsl,fman-v2-port-rx";
450 reg = <0x90000 0x1000>;
451 };
452
453 fman1_tx_0x28: port@a8000 {
454 cell-index = <0x28>;
455 compatible = "fsl,fman-v2-port-tx";
456 reg = <0xa8000 0x1000>;
457 };
458
459 fman1_tx_0x29: port@a9000 {
460 cell-index = <0x29>;
461 compatible = "fsl,fman-v2-port-tx";
462 reg = <0xa9000 0x1000>;
463 };
464
465 fman1_tx_0x2a: port@aa000 {
466 cell-index = <0x2a>;
467 compatible = "fsl,fman-v2-port-tx";
468 reg = <0xaa000 0x1000>;
469 };
470
471 fman1_tx_0x2b: port@ab000 {
472 cell-index = <0x2b>;
473 compatible = "fsl,fman-v2-port-tx";
474 reg = <0xab000 0x1000>;
475 };
476
477 fman1_tx_0x2c: port@ac0000 {
478 cell-index = <0x2c>;
479 compatible = "fsl,fman-v2-port-tx";
480 reg = <0xac000 0x1000>;
481 };
482
483 fman1_tx_0x30: port@b0000 {
484 cell-index = <0x30>;
485 compatible = "fsl,fman-v2-port-tx";
486 reg = <0xb0000 0x1000>;
487 };
488
489 ethernet@e0000 {
490 compatible = "fsl,fman-dtsec";
491 cell-index = <0>;
492 reg = <0xe0000 0x1000>;
493 fsl,fman-ports = <&fman1_rx_0x8 &fman1_tx_0x28>;
494 };
495
496 ethernet@e2000 {
497 compatible = "fsl,fman-dtsec";
498 cell-index = <1>;
499 reg = <0xe2000 0x1000>;
500 fsl,fman-ports = <&fman1_rx_0x9 &fman1_tx_0x29>;
501 };
502
503 ethernet@e4000 {
504 compatible = "fsl,fman-dtsec";
505 cell-index = <2>;
506 reg = <0xe4000 0x1000>;
507 fsl,fman-ports = <&fman1_rx_0xa &fman1_tx_0x2a>;
508 };
509
510 ethernet@e6000 {
511 compatible = "fsl,fman-dtsec";
512 cell-index = <3>;
513 reg = <0xe6000 0x1000>;
514 fsl,fman-ports = <&fman1_rx_0xb &fman1_tx_0x2b>;
515 };
516
517 ethernet@e8000 {
518 compatible = "fsl,fman-dtsec";
519 cell-index = <4>;
520 reg = <0xf0000 0x1000>;
521 fsl,fman-ports = <&fman1_rx_0xc &fman1_tx_0x2c>;
522
523 ethernet@f0000 {
524 cell-index = <8>;
525 compatible = "fsl,fman-xgec";
526 reg = <0xf0000 0x1000>;
527 fsl,fman-ports = <&fman1_rx_0x10 &fman1_tx_0x30>;
528 };
529
530 ptp-timer@fe000 {
531 compatible = "fsl,fman-ptp-timer";
532 reg = <0xfe000 0x1000>;
533 };
534};
diff --git a/Documentation/devicetree/bindings/rtc/rtc-opal.txt b/Documentation/devicetree/bindings/rtc/rtc-opal.txt
new file mode 100644
index 000000000000..af87e5ecac54
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/rtc-opal.txt
@@ -0,0 +1,16 @@
1IBM OPAL real-time clock
2------------------------
3
4Required properties:
5- comapatible: Should be "ibm,opal-rtc"
6
7Optional properties:
8- has-tpo: Decides if the wakeup is supported or not.
9
10Example:
11 rtc {
12 compatible = "ibm,opal-rtc";
13 has-tpo;
14 phandle = <0x10000029>;
15 linux,phandle = <0x10000029>;
16 };
diff --git a/Documentation/devicetree/bindings/soc/fsl/bman-portals.txt b/Documentation/devicetree/bindings/soc/fsl/bman-portals.txt
new file mode 100644
index 000000000000..2a00e14e11e0
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/bman-portals.txt
@@ -0,0 +1,56 @@
1QorIQ DPAA Buffer Manager Portals Device Tree Binding
2
3Copyright (C) 2008 - 2014 Freescale Semiconductor Inc.
4
5CONTENTS
6
7 - BMan Portal
8 - Example
9
10BMan Portal Node
11
12Portals are memory mapped interfaces to BMan that allow low-latency, lock-less
13interaction by software running on processor cores, accelerators and network
14interfaces with the BMan
15
16PROPERTIES
17
18- compatible
19 Usage: Required
20 Value type: <stringlist>
21 Definition: Must include "fsl,bman-portal-<hardware revision>"
22 May include "fsl,<SoC>-bman-portal" or "fsl,bman-portal"
23
24- reg
25 Usage: Required
26 Value type: <prop-encoded-array>
27 Definition: Two regions. The first is the cache-enabled region of
28 the portal. The second is the cache-inhibited region of
29 the portal
30
31- interrupts
32 Usage: Required
33 Value type: <prop-encoded-array>
34 Definition: Standard property
35
36EXAMPLE
37
38The example below shows a (P4080) BMan portals container/bus node with two portals
39
40 bman-portals@ff4000000 {
41 #address-cells = <1>;
42 #size-cells = <1>;
43 compatible = "simple-bus";
44 ranges = <0 0xf 0xf4000000 0x200000>;
45
46 bman-portal@0 {
47 compatible = "fsl,bman-portal-1.0.0", "fsl,bman-portal";
48 reg = <0x0 0x4000>, <0x100000 0x1000>;
49 interrupts = <105 2 0 0>;
50 };
51 bman-portal@4000 {
52 compatible = "fsl,bman-portal-1.0.0", "fsl,bman-portal";
53 reg = <0x4000 0x4000>, <0x101000 0x1000>;
54 interrupts = <107 2 0 0>;
55 };
56 };
diff --git a/Documentation/devicetree/bindings/soc/fsl/bman.txt b/Documentation/devicetree/bindings/soc/fsl/bman.txt
new file mode 100644
index 000000000000..9f80bf8709ac
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/bman.txt
@@ -0,0 +1,125 @@
1QorIQ DPAA Buffer Manager Device Tree Bindings
2
3Copyright (C) 2008 - 2014 Freescale Semiconductor Inc.
4
5CONTENTS
6
7 - BMan Node
8 - BMan Private Memory Node
9 - Example
10
11BMan Node
12
13The Buffer Manager is part of the Data-Path Acceleration Architecture (DPAA).
14BMan supports hardware allocation and deallocation of buffers belonging to pools
15originally created by software with configurable depletion thresholds. This
16binding covers the CCSR space programming model
17
18PROPERTIES
19
20- compatible
21 Usage: Required
22 Value type: <stringlist>
23 Definition: Must include "fsl,bman"
24 May include "fsl,<SoC>-bman"
25
26- reg
27 Usage: Required
28 Value type: <prop-encoded-array>
29 Definition: Registers region within the CCSR address space
30
31The BMan revision information is located in the BMAN_IP_REV_1/2 registers which
32are located at offsets 0xbf8 and 0xbfc
33
34- interrupts
35 Usage: Required
36 Value type: <prop-encoded-array>
37 Definition: Standard property. The error interrupt
38
39- fsl,liodn
40 Usage: See pamu.txt
41 Value type: <prop-encoded-array>
42 Definition: PAMU property used for static LIODN assignment
43
44- fsl,iommu-parent
45 Usage: See pamu.txt
46 Value type: <phandle>
47 Definition: PAMU property used for dynamic LIODN assignment
48
49 For additional details about the PAMU/LIODN binding(s) see pamu.txt
50
51Devices connected to a BMan instance via Direct Connect Portals (DCP) must link
52to the respective BMan instance
53
54- fsl,bman
55 Usage: Required
56 Value type: <prop-encoded-array>
57 Description: List of phandle and DCP index pairs, to the BMan instance
58 to which this device is connected via the DCP
59
60BMan Private Memory Node
61
62BMan requires a contiguous range of physical memory used for the backing store
63for BMan Free Buffer Proxy Records (FBPR). This memory is reserved/allocated as a
64node under the /reserved-memory node
65
66The BMan FBPR memory node must be named "bman-fbpr"
67
68PROPERTIES
69
70- compatible
71 Usage: required
72 Value type: <stringlist>
73 Definition: Must inclide "fsl,bman-fbpr"
74
75The following constraints are relevant to the FBPR private memory:
76 - The size must be 2^(size + 1), with size = 11..33. That is 4 KiB to
77 16 GiB
78 - The alignment must be a muliptle of the memory size
79
80The size of the FBPR must be chosen by observing the hardware features configured
81via the Reset Configuration Word (RCW) and that are relevant to a specific board
82(e.g. number of MAC(s) pinned-out, number of offline/host command FMan ports,
83etc.). The size configured in the DT must reflect the hardware capabilities and
84not the specific needs of an application
85
86For additional details about reserved memory regions see reserved-memory.txt
87
88EXAMPLE
89
90The example below shows a BMan FBPR dynamic allocation memory node
91
92 reserved-memory {
93 #address-cells = <2>;
94 #size-cells = <2>;
95 ranges;
96
97 bman_fbpr: bman-fbpr {
98 compatible = "fsl,bman-fbpr";
99 alloc-ranges = <0 0 0xf 0xffffffff>;
100 size = <0 0x1000000>;
101 alignment = <0 0x1000000>;
102 };
103 };
104
105The example below shows a (P4080) BMan CCSR-space node
106
107 crypto@300000 {
108 ...
109 fsl,bman = <&bman, 2>;
110 ...
111 };
112
113 bman: bman@31a000 {
114 compatible = "fsl,bman";
115 reg = <0x31a000 0x1000>;
116 interrupts = <16 2 1 2>;
117 fsl,liodn = <0x17>;
118 memory-region = <&bman_fbpr>;
119 };
120
121 fman@400000 {
122 ...
123 fsl,bman = <&bman, 0>;
124 ...
125 };
diff --git a/Documentation/devicetree/bindings/soc/fsl/qman-portals.txt b/Documentation/devicetree/bindings/soc/fsl/qman-portals.txt
new file mode 100644
index 000000000000..48c4dae5d6f9
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/qman-portals.txt
@@ -0,0 +1,154 @@
1QorIQ DPAA Queue Manager Portals Device Tree Binding
2
3Copyright (C) 2008 - 2014 Freescale Semiconductor Inc.
4
5CONTENTS
6
7 - QMan Portal
8 - QMan Pool Channel
9 - Example
10
11QMan Portal Node
12
13Portals are memory mapped interfaces to QMan that allow low-latency, lock-less
14interaction by software running on processor cores, accelerators and network
15interfaces with the QMan
16
17PROPERTIES
18
19- compatible
20 Usage: Required
21 Value type: <stringlist>
22 Definition: Must include "fsl,qman-portal-<hardware revision>"
23 May include "fsl,<SoC>-qman-portal" or "fsl,qman-portal"
24
25- reg
26 Usage: Required
27 Value type: <prop-encoded-array>
28 Definition: Two regions. The first is the cache-enabled region of
29 the portal. The second is the cache-inhibited region of
30 the portal
31
32- interrupts
33 Usage: Required
34 Value type: <prop-encoded-array>
35 Definition: Standard property
36
37- fsl,liodn
38 Usage: See pamu.txt
39 Value type: <prop-encoded-array>
40 Definition: Two LIODN(s). DQRR LIODN (DLIODN) and Frame LIODN
41 (FLIODN)
42
43- fsl,iommu-parent
44 Usage: See pamu.txt
45 Value type: <phandle>
46 Definition: PAMU property used for dynamic LIODN assignment
47
48 For additional details about the PAMU/LIODN binding(s) see pamu.txt
49
50- fsl,qman-channel-id
51 Usage: Required
52 Value type: <u32>
53 Definition: The hardware index of the channel. This can also be
54 determined by dividing any of the channel's 8 work queue
55 IDs by 8
56
57In addition to these properties the qman-portals should have sub-nodes to
58represent the HW devices/portals that are connected to the software portal
59described here
60
61The currently supported sub-nodes are:
62 * fman0
63 * fman1
64 * pme
65 * crypto
66
67These subnodes should have the following properties:
68
69- fsl,liodn
70 Usage: See pamu.txt
71 Value type: <prop-encoded-array>
72 Definition: PAMU property used for static LIODN assignment
73
74- fsl,iommu-parent
75 Usage: See pamu.txt
76 Value type: <phandle>
77 Definition: PAMU property used for dynamic LIODN assignment
78
79- dev-handle
80 Usage: Required
81 Value type: <phandle>
82 Definition: The phandle to the particular hardware device that this
83 portal is connected to.
84
85DPAA QMan Pool Channel Nodes
86
87Pool Channels are defined with the following properties.
88
89PROPERTIES
90
91- compatible
92 Usage: Required
93 Value type: <stringlist>
94 Definition: Must include "fsl,qman-pool-channel"
95 May include "fsl,<SoC>-qman-pool-channel"
96
97- fsl,qman-channel-id
98 Usage: Required
99 Value type: <u32>
100 Definition: The hardware index of the channel. This can also be
101 determined by dividing any of the channel's 8 work queue
102 IDs by 8
103
104EXAMPLE
105
106The example below shows a (P4080) QMan portals container/bus node with two portals
107
108 qman-portals@ff4200000 {
109 #address-cells = <1>;
110 #size-cells = <1>;
111 compatible = "simple-bus";
112 ranges = <0 0xf 0xf4200000 0x200000>;
113
114 qman-portal@0 {
115 compatible = "fsl,qman-portal-1.2.0", "fsl,qman-portal";
116 reg = <0 0x4000>, <0x100000 0x1000>;
117 interrupts = <104 2 0 0>;
118 fsl,liodn = <1 2>;
119 fsl,qman-channel-id = <0>;
120
121 fman0 {
122 fsl,liodn = <0x21>;
123 dev-handle = <&fman0>;
124 };
125 fman1 {
126 fsl,liodn = <0xa1>;
127 dev-handle = <&fman1>;
128 };
129 crypto {
130 fsl,liodn = <0x41 0x66>;
131 dev-handle = <&crypto>;
132 };
133 };
134 qman-portal@4000 {
135 compatible = "fsl,qman-portal-1.2.0", "fsl,qman-portal";
136 reg = <0x4000 0x4000>, <0x101000 0x1000>;
137 interrupts = <106 2 0 0>;
138 fsl,liodn = <3 4>;
139 fsl,qman-channel-id = <1>;
140
141 fman0 {
142 fsl,liodn = <0x22>;
143 dev-handle = <&fman0>;
144 };
145 fman1 {
146 fsl,liodn = <0xa2>;
147 dev-handle = <&fman1>;
148 };
149 crypto {
150 fsl,liodn = <0x42 0x67>;
151 dev-handle = <&crypto>;
152 };
153 };
154 };
diff --git a/Documentation/devicetree/bindings/soc/fsl/qman.txt b/Documentation/devicetree/bindings/soc/fsl/qman.txt
new file mode 100644
index 000000000000..063e3a0b9d04
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/fsl/qman.txt
@@ -0,0 +1,165 @@
1QorIQ DPAA Queue Manager Device Tree Binding
2
3Copyright (C) 2008 - 2014 Freescale Semiconductor Inc.
4
5CONTENTS
6
7 - QMan Node
8 - QMan Private Memory Nodes
9 - Example
10
11QMan Node
12
13The Queue Manager is part of the Data-Path Acceleration Architecture (DPAA). QMan
14supports queuing and QoS scheduling of frames to CPUs, network interfaces and
15DPAA logic modules, maintains packet ordering within flows. Besides providing
16flow-level queuing, is also responsible for congestion management functions such
17as RED/WRED, congestion notifications and tail discards. This binding covers the
18CCSR space programming model
19
20PROPERTIES
21
22- compatible
23 Usage: Required
24 Value type: <stringlist>
25 Definition: Must include "fsl,qman"
26 May include "fsl,<SoC>-qman"
27
28- reg
29 Usage: Required
30 Value type: <prop-encoded-array>
31 Definition: Registers region within the CCSR address space
32
33The QMan revision information is located in the QMAN_IP_REV_1/2 registers which
34are located at offsets 0xbf8 and 0xbfc
35
36- interrupts
37 Usage: Required
38 Value type: <prop-encoded-array>
39 Definition: Standard property. The error interrupt
40
41- fsl,liodn
42 Usage: See pamu.txt
43 Value type: <prop-encoded-array>
44 Definition: PAMU property used for static LIODN assignment
45
46- fsl,iommu-parent
47 Usage: See pamu.txt
48 Value type: <phandle>
49 Definition: PAMU property used for dynamic LIODN assignment
50
51 For additional details about the PAMU/LIODN binding(s) see pamu.txt
52
53- clocks
54 Usage: See clock-bindings.txt and qoriq-clock.txt
55 Value type: <prop-encoded-array>
56 Definition: Reference input clock. Its frequency is half of the
57 platform clock
58
59Devices connected to a QMan instance via Direct Connect Portals (DCP) must link
60to the respective QMan instance
61
62- fsl,qman
63 Usage: Required
64 Value type: <prop-encoded-array>
65 Description: List of phandle and DCP index pairs, to the QMan instance
66 to which this device is connected via the DCP
67
68QMan Private Memory Nodes
69
70QMan requires two contiguous range of physical memory used for the backing store
71for QMan Frame Queue Descriptor (FQD) and Packed Frame Descriptor Record (PFDR).
72This memory is reserved/allocated as a nodes under the /reserved-memory node
73
74The QMan FQD memory node must be named "qman-fqd"
75
76PROPERTIES
77
78- compatible
79 Usage: required
80 Value type: <stringlist>
81 Definition: Must inclide "fsl,qman-fqd"
82
83The QMan PFDR memory node must be named "qman-pfdr"
84
85PROPERTIES
86
87- compatible
88 Usage: required
89 Value type: <stringlist>
90 Definition: Must inclide "fsl,qman-pfdr"
91
92The following constraints are relevant to the FQD and PFDR private memory:
93 - The size must be 2^(size + 1), with size = 11..29. That is 4 KiB to
94 1 GiB
95 - The alignment must be a muliptle of the memory size
96
97The size of the FQD and PFDP must be chosen by observing the hardware features
98configured via the Reset Configuration Word (RCW) and that are relevant to a
99specific board (e.g. number of MAC(s) pinned-out, number of offline/host command
100FMan ports, etc.). The size configured in the DT must reflect the hardware
101capabilities and not the specific needs of an application
102
103For additional details about reserved memory regions see reserved-memory.txt
104
105EXAMPLE
106
107The example below shows a QMan FQD and a PFDR dynamic allocation memory nodes
108
109 reserved-memory {
110 #address-cells = <2>;
111 #size-cells = <2>;
112 ranges;
113
114 qman_fqd: qman-fqd {
115 compatible = "fsl,qman-fqd";
116 alloc-ranges = <0 0 0xf 0xffffffff>;
117 size = <0 0x400000>;
118 alignment = <0 0x400000>;
119 };
120 qman_pfdr: qman-pfdr {
121 compatible = "fsl,qman-pfdr";
122 alloc-ranges = <0 0 0xf 0xffffffff>;
123 size = <0 0x2000000>;
124 alignment = <0 0x2000000>;
125 };
126 };
127
128The example below shows a (P4080) QMan CCSR-space node
129
130 clockgen: global-utilities@e1000 {
131 ...
132 sysclk: sysclk {
133 ...
134 };
135 ...
136 platform_pll: platform-pll@c00 {
137 #clock-cells = <1>;
138 reg = <0xc00 0x4>;
139 compatible = "fsl,qoriq-platform-pll-1.0";
140 clocks = <&sysclk>;
141 clock-output-names = "platform-pll", "platform-pll-div2";
142 };
143 ...
144 };
145
146 crypto@300000 {
147 ...
148 fsl,qman = <&qman, 2>;
149 ...
150 };
151
152 qman: qman@318000 {
153 compatible = "fsl,qman";
154 reg = <0x318000 0x1000>;
155 interrupts = <16 2 1 3>
156 fsl,liodn = <0x16>;
157 memory-region = <&qman_fqd &qman_pfdr>;
158 clocks = <&platform_pll 1>;
159 };
160
161 fman@400000 {
162 ...
163 fsl,qman = <&qman, 0>;
164 ...
165 };
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 2417cb0b493b..cc6151c431c8 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -101,6 +101,7 @@ mitsubishi Mitsubishi Electric Corporation
101mosaixtech Mosaix Technologies, Inc. 101mosaixtech Mosaix Technologies, Inc.
102moxa Moxa 102moxa Moxa
103mpl MPL AG 103mpl MPL AG
104mti Imagination Technologies Ltd. (formerly MIPS Technologies Inc.)
104mundoreader Mundo Reader S.L. 105mundoreader Mundo Reader S.L.
105murata Murata Manufacturing Co., Ltd. 106murata Murata Manufacturing Co., Ltd.
106mxicy Macronix International Co., Ltd. 107mxicy Macronix International Co., Ltd.
diff --git a/Documentation/dmaengine.txt b/Documentation/dmaengine/client.txt
index 11fb87ff6cd0..11fb87ff6cd0 100644
--- a/Documentation/dmaengine.txt
+++ b/Documentation/dmaengine/client.txt
diff --git a/Documentation/dmatest.txt b/Documentation/dmaengine/dmatest.txt
index dd77a81bdb80..dd77a81bdb80 100644
--- a/Documentation/dmatest.txt
+++ b/Documentation/dmaengine/dmatest.txt
diff --git a/Documentation/dmaengine/provider.txt b/Documentation/dmaengine/provider.txt
new file mode 100644
index 000000000000..766658ccf235
--- /dev/null
+++ b/Documentation/dmaengine/provider.txt
@@ -0,0 +1,366 @@
1DMAengine controller documentation
2==================================
3
4Hardware Introduction
5+++++++++++++++++++++
6
7Most of the Slave DMA controllers have the same general principles of
8operations.
9
10They have a given number of channels to use for the DMA transfers, and
11a given number of requests lines.
12
13Requests and channels are pretty much orthogonal. Channels can be used
14to serve several to any requests. To simplify, channels are the
15entities that will be doing the copy, and requests what endpoints are
16involved.
17
18The request lines actually correspond to physical lines going from the
19DMA-eligible devices to the controller itself. Whenever the device
20will want to start a transfer, it will assert a DMA request (DRQ) by
21asserting that request line.
22
23A very simple DMA controller would only take into account a single
24parameter: the transfer size. At each clock cycle, it would transfer a
25byte of data from one buffer to another, until the transfer size has
26been reached.
27
28That wouldn't work well in the real world, since slave devices might
29require a specific number of bits to be transferred in a single
30cycle. For example, we may want to transfer as much data as the
31physical bus allows to maximize performances when doing a simple
32memory copy operation, but our audio device could have a narrower FIFO
33that requires data to be written exactly 16 or 24 bits at a time. This
34is why most if not all of the DMA controllers can adjust this, using a
35parameter called the transfer width.
36
37Moreover, some DMA controllers, whenever the RAM is used as a source
38or destination, can group the reads or writes in memory into a buffer,
39so instead of having a lot of small memory accesses, which is not
40really efficient, you'll get several bigger transfers. This is done
41using a parameter called the burst size, that defines how many single
42reads/writes it's allowed to do without the controller splitting the
43transfer into smaller sub-transfers.
44
45Our theoretical DMA controller would then only be able to do transfers
46that involve a single contiguous block of data. However, some of the
47transfers we usually have are not, and want to copy data from
48non-contiguous buffers to a contiguous buffer, which is called
49scatter-gather.
50
51DMAEngine, at least for mem2dev transfers, require support for
52scatter-gather. So we're left with two cases here: either we have a
53quite simple DMA controller that doesn't support it, and we'll have to
54implement it in software, or we have a more advanced DMA controller,
55that implements in hardware scatter-gather.
56
57The latter are usually programmed using a collection of chunks to
58transfer, and whenever the transfer is started, the controller will go
59over that collection, doing whatever we programmed there.
60
61This collection is usually either a table or a linked list. You will
62then push either the address of the table and its number of elements,
63or the first item of the list to one channel of the DMA controller,
64and whenever a DRQ will be asserted, it will go through the collection
65to know where to fetch the data from.
66
67Either way, the format of this collection is completely dependent on
68your hardware. Each DMA controller will require a different structure,
69but all of them will require, for every chunk, at least the source and
70destination addresses, whether it should increment these addresses or
71not and the three parameters we saw earlier: the burst size, the
72transfer width and the transfer size.
73
74The one last thing is that usually, slave devices won't issue DRQ by
75default, and you have to enable this in your slave device driver first
76whenever you're willing to use DMA.
77
78These were just the general memory-to-memory (also called mem2mem) or
79memory-to-device (mem2dev) kind of transfers. Most devices often
80support other kind of transfers or memory operations that dmaengine
81support and will be detailed later in this document.
82
83DMA Support in Linux
84++++++++++++++++++++
85
86Historically, DMA controller drivers have been implemented using the
87async TX API, to offload operations such as memory copy, XOR,
88cryptography, etc., basically any memory to memory operation.
89
90Over time, the need for memory to device transfers arose, and
91dmaengine was extended. Nowadays, the async TX API is written as a
92layer on top of dmaengine, and acts as a client. Still, dmaengine
93accommodates that API in some cases, and made some design choices to
94ensure that it stayed compatible.
95
96For more information on the Async TX API, please look the relevant
97documentation file in Documentation/crypto/async-tx-api.txt.
98
99DMAEngine Registration
100++++++++++++++++++++++
101
102struct dma_device Initialization
103--------------------------------
104
105Just like any other kernel framework, the whole DMAEngine registration
106relies on the driver filling a structure and registering against the
107framework. In our case, that structure is dma_device.
108
109The first thing you need to do in your driver is to allocate this
110structure. Any of the usual memory allocators will do, but you'll also
111need to initialize a few fields in there:
112
113 * channels: should be initialized as a list using the
114 INIT_LIST_HEAD macro for example
115
116 * dev: should hold the pointer to the struct device associated
117 to your current driver instance.
118
119Supported transaction types
120---------------------------
121
122The next thing you need is to set which transaction types your device
123(and driver) supports.
124
125Our dma_device structure has a field called cap_mask that holds the
126various types of transaction supported, and you need to modify this
127mask using the dma_cap_set function, with various flags depending on
128transaction types you support as an argument.
129
130All those capabilities are defined in the dma_transaction_type enum,
131in include/linux/dmaengine.h
132
133Currently, the types available are:
134 * DMA_MEMCPY
135 - The device is able to do memory to memory copies
136
137 * DMA_XOR
138 - The device is able to perform XOR operations on memory areas
139 - Used to accelerate XOR intensive tasks, such as RAID5
140
141 * DMA_XOR_VAL
142 - The device is able to perform parity check using the XOR
143 algorithm against a memory buffer.
144
145 * DMA_PQ
146 - The device is able to perform RAID6 P+Q computations, P being a
147 simple XOR, and Q being a Reed-Solomon algorithm.
148
149 * DMA_PQ_VAL
150 - The device is able to perform parity check using RAID6 P+Q
151 algorithm against a memory buffer.
152
153 * DMA_INTERRUPT
154 - The device is able to trigger a dummy transfer that will
155 generate periodic interrupts
156 - Used by the client drivers to register a callback that will be
157 called on a regular basis through the DMA controller interrupt
158
159 * DMA_SG
160 - The device supports memory to memory scatter-gather
161 transfers.
162 - Even though a plain memcpy can look like a particular case of a
163 scatter-gather transfer, with a single chunk to transfer, it's a
164 distinct transaction type in the mem2mem transfers case
165
166 * DMA_PRIVATE
167 - The devices only supports slave transfers, and as such isn't
168 available for async transfers.
169
170 * DMA_ASYNC_TX
171 - Must not be set by the device, and will be set by the framework
172 if needed
173 - /* TODO: What is it about? */
174
175 * DMA_SLAVE
176 - The device can handle device to memory transfers, including
177 scatter-gather transfers.
178 - While in the mem2mem case we were having two distinct types to
179 deal with a single chunk to copy or a collection of them, here,
180 we just have a single transaction type that is supposed to
181 handle both.
182 - If you want to transfer a single contiguous memory buffer,
183 simply build a scatter list with only one item.
184
185 * DMA_CYCLIC
186 - The device can handle cyclic transfers.
187 - A cyclic transfer is a transfer where the chunk collection will
188 loop over itself, with the last item pointing to the first.
189 - It's usually used for audio transfers, where you want to operate
190 on a single ring buffer that you will fill with your audio data.
191
192 * DMA_INTERLEAVE
193 - The device supports interleaved transfer.
194 - These transfers can transfer data from a non-contiguous buffer
195 to a non-contiguous buffer, opposed to DMA_SLAVE that can
196 transfer data from a non-contiguous data set to a continuous
197 destination buffer.
198 - It's usually used for 2d content transfers, in which case you
199 want to transfer a portion of uncompressed data directly to the
200 display to print it
201
202These various types will also affect how the source and destination
203addresses change over time.
204
205Addresses pointing to RAM are typically incremented (or decremented)
206after each transfer. In case of a ring buffer, they may loop
207(DMA_CYCLIC). Addresses pointing to a device's register (e.g. a FIFO)
208are typically fixed.
209
210Device operations
211-----------------
212
213Our dma_device structure also requires a few function pointers in
214order to implement the actual logic, now that we described what
215operations we were able to perform.
216
217The functions that we have to fill in there, and hence have to
218implement, obviously depend on the transaction types you reported as
219supported.
220
221 * device_alloc_chan_resources
222 * device_free_chan_resources
223 - These functions will be called whenever a driver will call
224 dma_request_channel or dma_release_channel for the first/last
225 time on the channel associated to that driver.
226 - They are in charge of allocating/freeing all the needed
227 resources in order for that channel to be useful for your
228 driver.
229 - These functions can sleep.
230
231 * device_prep_dma_*
232 - These functions are matching the capabilities you registered
233 previously.
234 - These functions all take the buffer or the scatterlist relevant
235 for the transfer being prepared, and should create a hardware
236 descriptor or a list of hardware descriptors from it
237 - These functions can be called from an interrupt context
238 - Any allocation you might do should be using the GFP_NOWAIT
239 flag, in order not to potentially sleep, but without depleting
240 the emergency pool either.
241 - Drivers should try to pre-allocate any memory they might need
242 during the transfer setup at probe time to avoid putting to
243 much pressure on the nowait allocator.
244
245 - It should return a unique instance of the
246 dma_async_tx_descriptor structure, that further represents this
247 particular transfer.
248
249 - This structure can be initialized using the function
250 dma_async_tx_descriptor_init.
251 - You'll also need to set two fields in this structure:
252 + flags:
253 TODO: Can it be modified by the driver itself, or
254 should it be always the flags passed in the arguments
255
256 + tx_submit: A pointer to a function you have to implement,
257 that is supposed to push the current
258 transaction descriptor to a pending queue, waiting
259 for issue_pending to be called.
260
261 * device_issue_pending
262 - Takes the first transaction descriptor in the pending queue,
263 and starts the transfer. Whenever that transfer is done, it
264 should move to the next transaction in the list.
265 - This function can be called in an interrupt context
266
267 * device_tx_status
268 - Should report the bytes left to go over on the given channel
269 - Should only care about the transaction descriptor passed as
270 argument, not the currently active one on a given channel
271 - The tx_state argument might be NULL
272 - Should use dma_set_residue to report it
273 - In the case of a cyclic transfer, it should only take into
274 account the current period.
275 - This function can be called in an interrupt context.
276
277 * device_control
278 - Used by client drivers to control and configure the channel it
279 has a handle on.
280 - Called with a command and an argument
281 + The command is one of the values listed by the enum
282 dma_ctrl_cmd. The valid commands are:
283 + DMA_PAUSE
284 + Pauses a transfer on the channel
285 + This command should operate synchronously on the channel,
286 pausing right away the work of the given channel
287 + DMA_RESUME
288 + Restarts a transfer on the channel
289 + This command should operate synchronously on the channel,
290 resuming right away the work of the given channel
291 + DMA_TERMINATE_ALL
292 + Aborts all the pending and ongoing transfers on the
293 channel
294 + This command should operate synchronously on the channel,
295 terminating right away all the channels
296 + DMA_SLAVE_CONFIG
297 + Reconfigures the channel with passed configuration
298 + This command should NOT perform synchronously, or on any
299 currently queued transfers, but only on subsequent ones
300 + In this case, the function will receive a
301 dma_slave_config structure pointer as an argument, that
302 will detail which configuration to use.
303 + Even though that structure contains a direction field,
304 this field is deprecated in favor of the direction
305 argument given to the prep_* functions
306 + FSLDMA_EXTERNAL_START
307 + TODO: Why does that even exist?
308 + The argument is an opaque unsigned long. This actually is a
309 pointer to a struct dma_slave_config that should be used only
310 in the DMA_SLAVE_CONFIG.
311
312 * device_slave_caps
313 - Called through the framework by client drivers in order to have
314 an idea of what are the properties of the channel allocated to
315 them.
316 - Such properties are the buswidth, available directions, etc.
317 - Required for every generic layer doing DMA transfers, such as
318 ASoC.
319
320Misc notes (stuff that should be documented, but don't really know
321where to put them)
322------------------------------------------------------------------
323 * dma_run_dependencies
324 - Should be called at the end of an async TX transfer, and can be
325 ignored in the slave transfers case.
326 - Makes sure that dependent operations are run before marking it
327 as complete.
328
329 * dma_cookie_t
330 - it's a DMA transaction ID that will increment over time.
331 - Not really relevant any more since the introduction of virt-dma
332 that abstracts it away.
333
334 * DMA_CTRL_ACK
335 - Undocumented feature
336 - No one really has an idea of what it's about, besides being
337 related to reusing the DMA transaction descriptors or having
338 additional transactions added to it in the async-tx API
339 - Useless in the case of the slave API
340
341General Design Notes
342--------------------
343
344Most of the DMAEngine drivers you'll see are based on a similar design
345that handles the end of transfer interrupts in the handler, but defer
346most work to a tasklet, including the start of a new transfer whenever
347the previous transfer ended.
348
349This is a rather inefficient design though, because the inter-transfer
350latency will be not only the interrupt latency, but also the
351scheduling latency of the tasklet, which will leave the channel idle
352in between, which will slow down the global transfer rate.
353
354You should avoid this kind of practice, and instead of electing a new
355transfer in your tasklet, move that part to the interrupt handler in
356order to have a shorter idle window (that we can't really avoid
357anyway).
358
359Glossary
360--------
361
362Burst: A number of consecutive read or write operations
363 that can be queued to buffers before being flushed to
364 memory.
365Chunk: A contiguous collection of bursts
366Transfer: A collection of chunks (be it contiguous or not)
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 9af538be3751..eede6088f978 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -77,6 +77,17 @@ should appear, and then pressing CTRL-R let you specify the patch file
77to insert into the message. 77to insert into the message.
78 78
79~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 79~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
80Claws Mail (GUI)
81
82Works. Some people use this successfully for patches.
83
84To insert a patch use Message->Insert File (CTRL+i) or an external editor.
85
86If the inserted patch has to be edited in the Claws composition window
87"Auto wrapping" in Configuration->Preferences->Compose->Wrapping should be
88disabled.
89
90~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
80Evolution (GUI) 91Evolution (GUI)
81 92
82Some people use this successfully for patches. 93Some people use this successfully for patches.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index eb8a10e22f7c..aae9dd13c91f 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -1272,7 +1272,7 @@ softirq.
1272 1272
1273 1273
12741.9 Ext4 file system parameters 12741.9 Ext4 file system parameters
1275------------------------------ 1275-------------------------------
1276 1276
1277Information about mounted ext4 file systems can be found in 1277Information about mounted ext4 file systems can be found in
1278/proc/fs/ext4. Each mounted filesystem will have a directory in 1278/proc/fs/ext4. Each mounted filesystem will have a directory in
diff --git a/Documentation/input/xpad.txt b/Documentation/input/xpad.txt
index 7cc9a436e6a1..d1b23f295db4 100644
--- a/Documentation/input/xpad.txt
+++ b/Documentation/input/xpad.txt
@@ -1,18 +1,22 @@
1xpad - Linux USB driver for X-Box gamepads 1xpad - Linux USB driver for Xbox compatible controllers
2 2
3This is the very first release of a driver for X-Box gamepads. 3This driver exposes all first-party and third-party Xbox compatible
4Basically, this was hacked away in just a few hours, so don't expect 4controllers. It has a long history and has enjoyed considerable usage
5miracles. 5as Window's xinput library caused most PC games to focus on Xbox
6controller compatibility.
6 7
7In particular, there is currently NO support for the rumble pack. 8Due to backwards compatibility all buttons are reported as digital.
8You won't find many ff-aware linux applications anyway. 9This only effects Original Xbox controllers. All later controller models
10have only digital face buttons.
11
12Rumble is supported on some models of Xbox 360 controllers but not of
13Original Xbox controllers nor on Xbox One controllers. As of writing
14the Xbox One's rumble protocol has not been reverse engineered but in
15the future could be supported.
9 16
10 17
110. Notes 180. Notes
12-------- 19--------
13
14Driver updated for kernel 2.6.17.11. (Based on a patch for 2.6.11.4.)
15
16The number of buttons/axes reported varies based on 3 things: 20The number of buttons/axes reported varies based on 3 things:
17- if you are using a known controller 21- if you are using a known controller
18- if you are using a known dance pad 22- if you are using a known dance pad
@@ -20,12 +24,16 @@ The number of buttons/axes reported varies based on 3 things:
20 module configuration for "Map D-PAD to buttons rather than axes for unknown 24 module configuration for "Map D-PAD to buttons rather than axes for unknown
21 pads" (module option dpad_to_buttons) 25 pads" (module option dpad_to_buttons)
22 26
23If you set dpad_to_buttons to 0 and you are using an unknown device (one 27If you set dpad_to_buttons to N and you are using an unknown device
24not listed below), the driver will map the directional pad to axes (X/Y), 28the driver will map the directional pad to axes (X/Y).
25if you said N it will map the d-pad to buttons, which is needed for dance 29If you said Y it will map the d-pad to buttons, which is needed for dance
26style games to function correctly. The default is Y. 30style games to function correctly. The default is Y.
31
32dpad_to_buttons has no effect for known pads. A erroneous commit message
33claimed dpad_to_buttons could be used to force behavior on known devices.
34This is not true. Both dpad_to_buttons and triggers_to_buttons only affect
35unknown controllers.
27 36
28dpad_to_buttons has no effect for known pads.
29 37
300.1 Normal Controllers 380.1 Normal Controllers
31---------------------- 39----------------------
@@ -80,17 +88,29 @@ to the list of supported devices, ensuring that it will work out of the
80box in the future. 88box in the future.
81 89
82 90
831. USB adapter 911. USB adapters
84-------------- 92--------------
93All generations of Xbox controllers speak USB over the wire.
94- Original Xbox controllers use a proprietary connector and require adapters.
95- Wireless Xbox 360 controllers require a 'Xbox 360 Wireless Gaming Receiver
96 for Windows'
97- Wired Xbox 360 controllers use standard USB connectors.
98- Xbox One controllers can be wireless but speak Wi-Fi Direct and are not
99 yet supported.
100- Xbox One controllers can be wired and use standard Micro-USB connectors.
101
85 102
86Before you can actually use the driver, you need to get yourself an 103
87adapter cable to connect the X-Box controller to your Linux-Box. You 1041.1 Original Xbox USB adapters
88can buy these online fairly cheap, or build your own. 105--------------
106Using this driver with an Original Xbox controller requires an
107adapter cable to break out the proprietary connector's pins to USB.
108You can buy these online fairly cheap, or build your own.
89 109
90Such a cable is pretty easy to build. The Controller itself is a USB 110Such a cable is pretty easy to build. The Controller itself is a USB
91compound device (a hub with three ports for two expansion slots and 111compound device (a hub with three ports for two expansion slots and
92the controller device) with the only difference in a nonstandard connector 112the controller device) with the only difference in a nonstandard connector
93(5 pins vs. 4 on standard USB connector). 113(5 pins vs. 4 on standard USB 1.0 connectors).
94 114
95You just need to solder a USB connector onto the cable and keep the 115You just need to solder a USB connector onto the cable and keep the
96yellow wire unconnected. The other pins have the same order on both 116yellow wire unconnected. The other pins have the same order on both
@@ -102,26 +122,41 @@ original one. You can buy an extension cable and cut that instead. That way,
102you can still use the controller with your X-Box, if you have one ;) 122you can still use the controller with your X-Box, if you have one ;)
103 123
104 124
125
1052. Driver Installation 1262. Driver Installation
106---------------------- 127----------------------
107 128
108Once you have the adapter cable and the controller is connected, you need 129Once you have the adapter cable, if needed, and the controller connected
109to load your USB subsystem and should cat /proc/bus/usb/devices. 130the xpad module should be auto loaded. To confirm you can cat
110There should be an entry like the one at the end [4]. 131/proc/bus/usb/devices. There should be an entry like the one at the end [4].
132
133
111 134
112Currently (as of version 0.0.6), the following devices are included: 1353. Supported Controllers
113 original Microsoft XBOX controller (US), vendor=0x045e, product=0x0202 136------------------------
114 smaller Microsoft XBOX controller (US), vendor=0x045e, product=0x0289 137For a full list of supported controllers and associated vendor and product
138IDs see the xpad_device[] array[6].
139
140As of the historic version 0.0.6 (2006-10-10) the following devices
141were supported:
142 original Microsoft XBOX controller (US), vendor=0x045e, product=0x0202
143 smaller Microsoft XBOX controller (US), vendor=0x045e, product=0x0289
115 original Microsoft XBOX controller (Japan), vendor=0x045e, product=0x0285 144 original Microsoft XBOX controller (Japan), vendor=0x045e, product=0x0285
116 InterAct PowerPad Pro (Germany), vendor=0x05fd, product=0x107a 145 InterAct PowerPad Pro (Germany), vendor=0x05fd, product=0x107a
117 RedOctane Xbox Dance Pad (US), vendor=0x0c12, product=0x8809 146 RedOctane Xbox Dance Pad (US), vendor=0x0c12, product=0x8809
147
148Unrecognized models of Xbox controllers should function as Generic
149Xbox controllers. Unrecognized Dance Pad controllers require setting
150the module option 'dpad_to_buttons'.
151
152If you have an unrecognized controller please see 0.3 - Unknown Controllers
118 153
119The driver should work with xbox pads not listed above as well, however
120you will need to do something extra for dance pads to work.
121 154
122If you have a controller not listed above, see 0.3 - Unknown Controllers 1554. Manual Testing
156-----------------
157To test this driver's functionality you may use 'jstest'.
123 158
124If you compiled and installed the driver, test the functionality: 159For example:
125> modprobe xpad 160> modprobe xpad
126> modprobe joydev 161> modprobe joydev
127> jstest /dev/js0 162> jstest /dev/js0
@@ -134,7 +169,8 @@ show 20 inputs (6 axes, 14 buttons).
134It works? Voila, you're done ;) 169It works? Voila, you're done ;)
135 170
136 171
1373. Thanks 172
1735. Thanks
138--------- 174---------
139 175
140I have to thank ITO Takayuki for the detailed info on his site 176I have to thank ITO Takayuki for the detailed info on his site
@@ -145,14 +181,14 @@ His useful info and both the usb-skeleton as well as the iforce input driver
145the basic functionality. 181the basic functionality.
146 182
147 183
1484. References
149-------------
150 184
1511. http://euc.jp/periphs/xbox-controller.ja.html (ITO Takayuki) 1856. References
1522. http://xpad.xbox-scene.com/ 186-------------
1533. http://www.markosweb.com/www/xboxhackz.com/
154 187
1554. /proc/bus/usb/devices - dump from InterAct PowerPad Pro (Germany): 188[1]: http://euc.jp/periphs/xbox-controller.ja.html (ITO Takayuki)
189[2]: http://xpad.xbox-scene.com/
190[3]: http://www.markosweb.com/www/xboxhackz.com/
191[4]: /proc/bus/usb/devices - dump from InterAct PowerPad Pro (Germany):
156 192
157T: Bus=01 Lev=03 Prnt=04 Port=00 Cnt=01 Dev#= 5 Spd=12 MxCh= 0 193T: Bus=01 Lev=03 Prnt=04 Port=00 Cnt=01 Dev#= 5 Spd=12 MxCh= 0
158D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=32 #Cfgs= 1 194D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS=32 #Cfgs= 1
@@ -162,7 +198,7 @@ I: If#= 0 Alt= 0 #EPs= 2 Cls=58(unk. ) Sub=42 Prot=00 Driver=(none)
162E: Ad=81(I) Atr=03(Int.) MxPS= 32 Ivl= 10ms 198E: Ad=81(I) Atr=03(Int.) MxPS= 32 Ivl= 10ms
163E: Ad=02(O) Atr=03(Int.) MxPS= 32 Ivl= 10ms 199E: Ad=02(O) Atr=03(Int.) MxPS= 32 Ivl= 10ms
164 200
1655. /proc/bus/usb/devices - dump from Redoctane Xbox Dance Pad (US): 201[5]: /proc/bus/usb/devices - dump from Redoctane Xbox Dance Pad (US):
166 202
167T: Bus=01 Lev=02 Prnt=09 Port=00 Cnt=01 Dev#= 10 Spd=12 MxCh= 0 203T: Bus=01 Lev=02 Prnt=09 Port=00 Cnt=01 Dev#= 10 Spd=12 MxCh= 0
168D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1 204D: Ver= 1.10 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 8 #Cfgs= 1
@@ -173,7 +209,12 @@ I: If#= 0 Alt= 0 #EPs= 2 Cls=58(unk. ) Sub=42 Prot=00 Driver=xpad
173E: Ad=82(I) Atr=03(Int.) MxPS= 32 Ivl=4ms 209E: Ad=82(I) Atr=03(Int.) MxPS= 32 Ivl=4ms
174E: Ad=02(O) Atr=03(Int.) MxPS= 32 Ivl=4ms 210E: Ad=02(O) Atr=03(Int.) MxPS= 32 Ivl=4ms
175 211
176-- 212[6]: http://lxr.free-electrons.com/ident?i=xpad_device
213
214
215
2167. Historic Edits
217-----------------
177Marko Friedemann <mfr@bmx-chemnitz.de> 218Marko Friedemann <mfr@bmx-chemnitz.de>
1782002-07-16 2192002-07-16
179 - original doc 220 - original doc
@@ -181,3 +222,5 @@ Marko Friedemann <mfr@bmx-chemnitz.de>
181Dominic Cerquetti <binary1230@yahoo.com> 222Dominic Cerquetti <binary1230@yahoo.com>
1822005-03-19 2232005-03-19
183 - added stuff for dance pads, new d-pad->axes mappings 224 - added stuff for dance pads, new d-pad->axes mappings
225
226Later changes may be viewed with 'git log Documentation/input/xpad.txt'
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index eacb2e0397ae..43ecdcd39df2 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -3434,6 +3434,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3434 neutralize any effect of /proc/sys/kernel/sysrq. 3434 neutralize any effect of /proc/sys/kernel/sysrq.
3435 Useful for debugging. 3435 Useful for debugging.
3436 3436
3437 tcpmhash_entries= [KNL,NET]
3438 Set the number of tcp_metrics_hash slots.
3439 Default value is 8192 or 16384 depending on total
3440 ram pages. This is used to specify the TCP metrics
3441 cache size. See Documentation/networking/ip-sysctl.txt
3442 "tcp_no_metrics_save" section for more details.
3443
3437 tdfx= [HW,DRM] 3444 tdfx= [HW,DRM]
3438 3445
3439 test_suspend= [SUSPEND][,N] 3446 test_suspend= [SUSPEND][,N]
diff --git a/Documentation/kobject.txt b/Documentation/kobject.txt
index f87241dfed87..1be59a3a521c 100644
--- a/Documentation/kobject.txt
+++ b/Documentation/kobject.txt
@@ -173,7 +173,7 @@ This should be done only after any attributes or children of the kobject
173have been initialized properly, as userspace will instantly start to look 173have been initialized properly, as userspace will instantly start to look
174for them when this call happens. 174for them when this call happens.
175 175
176When the kobject is removed from the kernel (details on how to do that is 176When the kobject is removed from the kernel (details on how to do that are
177below), the uevent for KOBJ_REMOVE will be automatically created by the 177below), the uevent for KOBJ_REMOVE will be automatically created by the
178kobject core, so the caller does not have to worry about doing that by 178kobject core, so the caller does not have to worry about doing that by
179hand. 179hand.
diff --git a/tools/testing/selftests/README.txt b/Documentation/kselftest.txt
index 2660d5ff9179..a87d840bacfe 100644
--- a/tools/testing/selftests/README.txt
+++ b/Documentation/kselftest.txt
@@ -15,37 +15,45 @@ Running the selftests (hotplug tests are run in limited mode)
15============================================================= 15=============================================================
16 16
17To build the tests: 17To build the tests:
18
19 $ make -C tools/testing/selftests 18 $ make -C tools/testing/selftests
20 19
21 20
22To run the tests: 21To run the tests:
23
24 $ make -C tools/testing/selftests run_tests 22 $ make -C tools/testing/selftests run_tests
25 23
24To build and run the tests with a single command, use:
25 $ make kselftest
26
26- note that some tests will require root privileges. 27- note that some tests will require root privileges.
27 28
28To run only tests targeted for a single subsystem: (including
29hotplug targets in limited mode)
30 29
31 $ make -C tools/testing/selftests TARGETS=cpu-hotplug run_tests 30Running a subset of selftests
31========================================
32You can use the "TARGETS" variable on the make command line to specify
33single test to run, or a list of tests to run.
34
35To run only tests targeted for a single subsystem:
36 $ make -C tools/testing/selftests TARGETS=ptrace run_tests
37
38You can specify multiple tests to build and run:
39 $ make TARGETS="size timers" kselftest
40
41See the top-level tools/testing/selftests/Makefile for the list of all
42possible targets.
32 43
33See the top-level tools/testing/selftests/Makefile for the list of all possible
34targets.
35 44
36Running the full range hotplug selftests 45Running the full range hotplug selftests
37======================================== 46========================================
38 47
39To build the tests: 48To build the hotplug tests:
40
41 $ make -C tools/testing/selftests hotplug 49 $ make -C tools/testing/selftests hotplug
42 50
43To run the tests: 51To run the hotplug tests:
44
45 $ make -C tools/testing/selftests run_hotplug 52 $ make -C tools/testing/selftests run_hotplug
46 53
47- note that some tests will require root privileges. 54- note that some tests will require root privileges.
48 55
56
49Contributing new tests 57Contributing new tests
50====================== 58======================
51 59
diff --git a/Documentation/mailbox.txt b/Documentation/mailbox.txt
index 60f43ff629aa..1092ad9578da 100644
--- a/Documentation/mailbox.txt
+++ b/Documentation/mailbox.txt
@@ -53,7 +53,7 @@ static void message_from_remote(struct mbox_client *cl, void *mssg)
53{ 53{
54 struct demo_client *dc = container_of(mbox_client, 54 struct demo_client *dc = container_of(mbox_client,
55 struct demo_client, cl); 55 struct demo_client, cl);
56 if (dc->aysnc) { 56 if (dc->async) {
57 if (is_an_ack(mssg)) { 57 if (is_an_ack(mssg)) {
58 /* An ACK to our last sample sent */ 58 /* An ACK to our last sample sent */
59 return; /* Or do something else here */ 59 return; /* Or do something else here */
diff --git a/Documentation/mic/mpssd/Makefile b/Documentation/mic/mpssd/Makefile
index 0f3156888048..f47fe6ba7300 100644
--- a/Documentation/mic/mpssd/Makefile
+++ b/Documentation/mic/mpssd/Makefile
@@ -1,5 +1,5 @@
1# List of programs to build 1# List of programs to build
2hostprogs-y := mpssd 2hostprogs-$(CONFIG_X86_64) := mpssd
3 3
4mpssd-objs := mpssd.o sysfs.o 4mpssd-objs := mpssd.o sysfs.o
5 5
diff --git a/Documentation/power/runtime_pm.txt b/Documentation/power/runtime_pm.txt
index f32ce5419573..44fe1d28a163 100644
--- a/Documentation/power/runtime_pm.txt
+++ b/Documentation/power/runtime_pm.txt
@@ -229,13 +229,13 @@ defined in include/linux/pm.h:
229 - if set, the value of child_count is ignored (but still updated) 229 - if set, the value of child_count is ignored (but still updated)
230 230
231 unsigned int disable_depth; 231 unsigned int disable_depth;
232 - used for disabling the helper funcions (they work normally if this is 232 - used for disabling the helper functions (they work normally if this is
233 equal to zero); the initial value of it is 1 (i.e. runtime PM is 233 equal to zero); the initial value of it is 1 (i.e. runtime PM is
234 initially disabled for all devices) 234 initially disabled for all devices)
235 235
236 int runtime_error; 236 int runtime_error;
237 - if set, there was a fatal error (one of the callbacks returned error code 237 - if set, there was a fatal error (one of the callbacks returned error code
238 as described in Section 2), so the helper funtions will not work until 238 as described in Section 2), so the helper functions will not work until
239 this flag is cleared; this is the error code returned by the failing 239 this flag is cleared; this is the error code returned by the failing
240 callback 240 callback
241 241
@@ -468,6 +468,10 @@ drivers/base/power/runtime.c and include/linux/pm_runtime.h:
468 - set the power.irq_safe flag for the device, causing the runtime-PM 468 - set the power.irq_safe flag for the device, causing the runtime-PM
469 callbacks to be invoked with interrupts off 469 callbacks to be invoked with interrupts off
470 470
471 bool pm_runtime_is_irq_safe(struct device *dev);
472 - return true if power.irq_safe flag was set for the device, causing
473 the runtime-PM callbacks to be invoked with interrupts off
474
471 void pm_runtime_mark_last_busy(struct device *dev); 475 void pm_runtime_mark_last_busy(struct device *dev);
472 - set the power.last_busy field to the current time 476 - set the power.last_busy field to the current time
473 477
@@ -524,7 +528,7 @@ pm_runtime_put_sync_autosuspend()
5245. Runtime PM Initialization, Device Probing and Removal 5285. Runtime PM Initialization, Device Probing and Removal
525 529
526Initially, the runtime PM is disabled for all devices, which means that the 530Initially, the runtime PM is disabled for all devices, which means that the
527majority of the runtime PM helper funtions described in Section 4 will return 531majority of the runtime PM helper functions described in Section 4 will return
528-EAGAIN until pm_runtime_enable() is called for the device. 532-EAGAIN until pm_runtime_enable() is called for the device.
529 533
530In addition to that, the initial runtime PM status of all devices is 534In addition to that, the initial runtime PM status of all devices is
diff --git a/Documentation/power/suspend-and-interrupts.txt b/Documentation/power/suspend-and-interrupts.txt
index 69663640dea5..2f9c5a5fcb25 100644
--- a/Documentation/power/suspend-and-interrupts.txt
+++ b/Documentation/power/suspend-and-interrupts.txt
@@ -77,7 +77,7 @@ Calling enable_irq_wake() causes suspend_device_irqs() to treat the given IRQ
77in a special way. Namely, the IRQ remains enabled, by on the first interrupt 77in a special way. Namely, the IRQ remains enabled, by on the first interrupt
78it will be disabled, marked as pending and "suspended" so that it will be 78it will be disabled, marked as pending and "suspended" so that it will be
79re-enabled by resume_device_irqs() during the subsequent system resume. Also 79re-enabled by resume_device_irqs() during the subsequent system resume. Also
80the PM core is notified about the event which casues the system suspend in 80the PM core is notified about the event which causes the system suspend in
81progress to be aborted (that doesn't have to happen immediately, but at one 81progress to be aborted (that doesn't have to happen immediately, but at one
82of the points where the suspend thread looks for pending wakeup events). 82of the points where the suspend thread looks for pending wakeup events).
83 83
diff --git a/Documentation/power/userland-swsusp.txt b/Documentation/power/userland-swsusp.txt
index 0e870825c1b9..bbfcd1bbedc5 100644
--- a/Documentation/power/userland-swsusp.txt
+++ b/Documentation/power/userland-swsusp.txt
@@ -99,7 +99,7 @@ SNAPSHOT_S2RAM - suspend to RAM; using this call causes the kernel to
99The device's read() operation can be used to transfer the snapshot image from 99The device's read() operation can be used to transfer the snapshot image from
100the kernel. It has the following limitations: 100the kernel. It has the following limitations:
101- you cannot read() more than one virtual memory page at a time 101- you cannot read() more than one virtual memory page at a time
102- read()s across page boundaries are impossible (ie. if ypu read() 1/2 of 102- read()s across page boundaries are impossible (ie. if you read() 1/2 of
103 a page in the previous call, you will only be able to read() 103 a page in the previous call, you will only be able to read()
104 _at_ _most_ 1/2 of the page in the next call) 104 _at_ _most_ 1/2 of the page in the next call)
105 105
diff --git a/Documentation/ramoops.txt b/Documentation/ramoops.txt
index 69b3cac4749d..5d8675615e59 100644
--- a/Documentation/ramoops.txt
+++ b/Documentation/ramoops.txt
@@ -14,11 +14,19 @@ survive after a restart.
14 14
151. Ramoops concepts 151. Ramoops concepts
16 16
17Ramoops uses a predefined memory area to store the dump. The start and size of 17Ramoops uses a predefined memory area to store the dump. The start and size
18the memory area are set using two variables: 18and type of the memory area are set using three variables:
19 * "mem_address" for the start 19 * "mem_address" for the start
20 * "mem_size" for the size. The memory size will be rounded down to a 20 * "mem_size" for the size. The memory size will be rounded down to a
21 power of two. 21 power of two.
22 * "mem_type" to specifiy if the memory type (default is pgprot_writecombine).
23
24Typically the default value of mem_type=0 should be used as that sets the pstore
25mapping to pgprot_writecombine. Setting mem_type=1 attempts to use
26pgprot_noncached, which only works on some platforms. This is because pstore
27depends on atomic operations. At least on ARM, pgprot_noncached causes the
28memory to be mapped strongly ordered, and atomic operations on strongly ordered
29memory are implementation defined, and won't work on many ARMs such as omaps.
22 30
23The memory area is divided into "record_size" chunks (also rounded down to 31The memory area is divided into "record_size" chunks (also rounded down to
24power of two) and each oops/panic writes a "record_size" chunk of 32power of two) and each oops/panic writes a "record_size" chunk of
@@ -55,6 +63,7 @@ Setting the ramoops parameters can be done in 2 different manners:
55static struct ramoops_platform_data ramoops_data = { 63static struct ramoops_platform_data ramoops_data = {
56 .mem_size = <...>, 64 .mem_size = <...>,
57 .mem_address = <...>, 65 .mem_address = <...>,
66 .mem_type = <...>,
58 .record_size = <...>, 67 .record_size = <...>,
59 .dump_oops = <...>, 68 .dump_oops = <...>,
60 .ecc = <...>, 69 .ecc = <...>,
diff --git a/Documentation/s390/Debugging390.txt b/Documentation/s390/Debugging390.txt
index 462321c1aeea..08911b5c6b0e 100644
--- a/Documentation/s390/Debugging390.txt
+++ b/Documentation/s390/Debugging390.txt
@@ -26,11 +26,6 @@ The Linux for s/390 & z/Architecture Kernel Task Structure
26Register Usage & Stackframes on Linux for s/390 & z/Architecture 26Register Usage & Stackframes on Linux for s/390 & z/Architecture
27A sample program with comments 27A sample program with comments
28Compiling programs for debugging on Linux for s/390 & z/Architecture 28Compiling programs for debugging on Linux for s/390 & z/Architecture
29Figuring out gcc compile errors
30Debugging Tools
31objdump
32strace
33Performance Debugging
34Debugging under VM 29Debugging under VM
35s/390 & z/Architecture IO Overview 30s/390 & z/Architecture IO Overview
36Debugging IO on s/390 & z/Architecture under VM 31Debugging IO on s/390 & z/Architecture under VM
@@ -114,28 +109,25 @@ s/390 z/Architecture
114 109
11516-17 16-17 Address Space Control 11016-17 16-17 Address Space Control
116 111
117 00 Primary Space Mode when DAT on 112 00 Primary Space Mode:
118 The linux kernel currently runs in this mode, CR1 is affiliated with 113 The register CR1 contains the primary address-space control ele-
119 this mode & points to the primary segment table origin etc. 114 ment (PASCE), which points to the primary space region/segment
120 115 table origin.
121 01 Access register mode this mode is used in functions to 116
122 copy data between kernel & user space. 117 01 Access register mode
123 118
124 10 Secondary space mode not used in linux however CR7 the 119 10 Secondary Space Mode:
125 register affiliated with this mode is & this & normally 120 The register CR7 contains the secondary address-space control
126 CR13=CR7 to allow us to copy data between kernel & user space. 121 element (SASCE), which points to the secondary space region or
127 We do this as follows: 122 segment table origin.
128 We set ar2 to 0 to designate its 123
129 affiliated gpr ( gpr2 )to point to primary=kernel space. 124 11 Home Space Mode:
130 We set ar4 to 1 to designate its 125 The register CR13 contains the home space address-space control
131 affiliated gpr ( gpr4 ) to point to secondary=home=user space 126 element (HASCE), which points to the home space region/segment
132 & then essentially do a memcopy(gpr2,gpr4,size) to 127 table origin.
133 copy data between the address spaces, the reason we use home space for the 128
134 kernel & don't keep secondary space free is that code will not run in 129 See "Address Spaces on Linux for s/390 & z/Architecture" below
135 secondary space. 130 for more information about address space usage in Linux.
136
137 11 Home Space Mode all user programs run in this mode.
138 it is affiliated with CR13.
139 131
14018-19 18-19 Condition codes (CC) 13218-19 18-19 Condition codes (CC)
141 133
@@ -249,9 +241,9 @@ currently 4TB of physical memory currently on z/Architecture.
249Address Spaces on Linux for s/390 & z/Architecture 241Address Spaces on Linux for s/390 & z/Architecture
250================================================== 242==================================================
251 243
252Our addressing scheme is as follows 244Our addressing scheme is basically as follows:
253
254 245
246 Primary Space Home Space
255Himem 0x7fffffff 2GB on s/390 ***************** **************** 247Himem 0x7fffffff 2GB on s/390 ***************** ****************
256currently 0x3ffffffffff (2^42)-1 * User Stack * * * 248currently 0x3ffffffffff (2^42)-1 * User Stack * * *
257on z/Architecture. ***************** * * 249on z/Architecture. ***************** * *
@@ -264,9 +256,46 @@ on z/Architecture. ***************** * *
264 * Sections * * * 256 * Sections * * *
2650x00000000 ***************** **************** 2570x00000000 ***************** ****************
266 258
267This also means that we need to look at the PSW problem state bit 259This also means that we need to look at the PSW problem state bit and the
268or the addressing mode to decide whether we are looking at 260addressing mode to decide whether we are looking at user or kernel space.
269user or kernel space. 261
262User space runs in primary address mode (or access register mode within
263the vdso code).
264
265The kernel usually also runs in home space mode, however when accessing
266user space the kernel switches to primary or secondary address mode if
267the mvcos instruction is not available or if a compare-and-swap (futex)
268instruction on a user space address is performed.
269
270When also looking at the ASCE control registers, this means:
271
272User space:
273- runs in primary or access register mode
274- cr1 contains the user asce
275- cr7 contains the user asce
276- cr13 contains the kernel asce
277
278Kernel space:
279- runs in home space mode
280- cr1 contains the user or kernel asce
281 -> the kernel asce is loaded when a uaccess requires primary or
282 secondary address mode
283- cr7 contains the user or kernel asce, (changed with set_fs())
284- cr13 contains the kernel asce
285
286In case of uaccess the kernel changes to:
287- primary space mode in case of a uaccess (copy_to_user) and uses
288 e.g. the mvcp instruction to access user space. However the kernel
289 will stay in home space mode if the mvcos instruction is available
290- secondary space mode in case of futex atomic operations, so that the
291 instructions come from primary address space and data from secondary
292 space
293
294In case of KVM, the kernel runs in home space mode, but cr1 gets switched
295to contain the gmap asce before the SIE instruction gets executed. When
296the SIE instruction is finished, cr1 will be switched back to contain the
297user asce.
298
270 299
271Virtual Addresses on s/390 & z/Architecture 300Virtual Addresses on s/390 & z/Architecture
272=========================================== 301===========================================
@@ -706,376 +735,7 @@ Debugging with optimisation has since much improved after fixing
706some bugs, please make sure you are using gdb-5.0 or later developed 735some bugs, please make sure you are using gdb-5.0 or later developed
707after Nov'2000. 736after Nov'2000.
708 737
709Figuring out gcc compile errors
710===============================
711If you are getting a lot of syntax errors compiling a program & the problem
712isn't blatantly obvious from the source.
713It often helps to just preprocess the file, this is done with the -E
714option in gcc.
715What this does is that it runs through the very first phase of compilation
716( compilation in gcc is done in several stages & gcc calls many programs to
717achieve its end result ) with the -E option gcc just calls the gcc preprocessor (cpp).
718The c preprocessor does the following, it joins all the files #included together
719recursively ( #include files can #include other files ) & also the c file you wish to compile.
720It puts a fully qualified path of the #included files in a comment & it
721does macro expansion.
722This is useful for debugging because
7231) You can double check whether the files you expect to be included are the ones
724that are being included ( e.g. double check that you aren't going to the i386 asm directory ).
7252) Check that macro definitions aren't clashing with typedefs,
7263) Check that definitions aren't being used before they are being included.
7274) Helps put the line emitting the error under the microscope if it contains macros.
728
729For convenience the Linux kernel's makefile will do preprocessing automatically for you
730by suffixing the file you want built with .i ( instead of .o )
731
732e.g.
733from the linux directory type
734make arch/s390/kernel/signal.i
735this will build
736
737s390-gcc -D__KERNEL__ -I/home1/barrow/linux/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
738-fno-strict-aliasing -D__SMP__ -pipe -fno-strength-reduce -E arch/s390/kernel/signal.c
739> arch/s390/kernel/signal.i
740
741Now look at signal.i you should see something like.
742
743
744# 1 "/home1/barrow/linux/include/asm/types.h" 1
745typedef unsigned short umode_t;
746typedef __signed__ char __s8;
747typedef unsigned char __u8;
748typedef __signed__ short __s16;
749typedef unsigned short __u16;
750
751If instead you are getting errors further down e.g.
752unknown instruction:2515 "move.l" or better still unknown instruction:2515
753"Fixme not implemented yet, call Martin" you are probably are attempting to compile some code
754meant for another architecture or code that is simply not implemented, with a fixme statement
755stuck into the inline assembly code so that the author of the file now knows he has work to do.
756To look at the assembly emitted by gcc just before it is about to call gas ( the gnu assembler )
757use the -S option.
758Again for your convenience the Linux kernel's Makefile will hold your hand &
759do all this donkey work for you also by building the file with the .s suffix.
760e.g.
761from the Linux directory type
762make arch/s390/kernel/signal.s
763
764s390-gcc -D__KERNEL__ -I/home1/barrow/linux/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer
765-fno-strict-aliasing -D__SMP__ -pipe -fno-strength-reduce -S arch/s390/kernel/signal.c
766-o arch/s390/kernel/signal.s
767
768
769This will output something like, ( please note the constant pool & the useful comments
770in the prologue to give you a hand at interpreting it ).
771
772.LC54:
773 .string "misaligned (__u16 *) in __xchg\n"
774.LC57:
775 .string "misaligned (__u32 *) in __xchg\n"
776.L$PG1: # Pool sys_sigsuspend
777.LC192:
778 .long -262401
779.LC193:
780 .long -1
781.LC194:
782 .long schedule-.L$PG1
783.LC195:
784 .long do_signal-.L$PG1
785 .align 4
786.globl sys_sigsuspend
787 .type sys_sigsuspend,@function
788sys_sigsuspend:
789# leaf function 0
790# automatics 16
791# outgoing args 0
792# need frame pointer 0
793# call alloca 0
794# has varargs 0
795# incoming args (stack) 0
796# function length 168
797 STM 8,15,32(15)
798 LR 0,15
799 AHI 15,-112
800 BASR 13,0
801.L$CO1: AHI 13,.L$PG1-.L$CO1
802 ST 0,0(15)
803 LR 8,2
804 N 5,.LC192-.L$PG1(13)
805
806Adding -g to the above output makes the output even more useful
807e.g. typing
808make CC:="s390-gcc -g" kernel/sched.s
809
810which compiles.
811s390-gcc -g -D__KERNEL__ -I/home/barrow/linux-2.3/include -Wall -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-strict-aliasing -pipe -fno-strength-reduce -S kernel/sched.c -o kernel/sched.s
812
813also outputs stabs ( debugger ) info, from this info you can find out the
814offsets & sizes of various elements in structures.
815e.g. the stab for the structure
816struct rlimit {
817 unsigned long rlim_cur;
818 unsigned long rlim_max;
819};
820is
821.stabs "rlimit:T(151,2)=s8rlim_cur:(0,5),0,32;rlim_max:(0,5),32,32;;",128,0,0,0
822from this stab you can see that
823rlimit_cur starts at bit offset 0 & is 32 bits in size
824rlimit_max starts at bit offset 32 & is 32 bits in size.
825
826
827Debugging Tools:
828================
829
830objdump
831=======
832This is a tool with many options the most useful being ( if compiled with -g).
833objdump --source <victim program or object file> > <victims debug listing >
834
835
836The whole kernel can be compiled like this ( Doing this will make a 17MB kernel
837& a 200 MB listing ) however you have to strip it before building the image
838using the strip command to make it a more reasonable size to boot it.
839
840A source/assembly mixed dump of the kernel can be done with the line
841objdump --source vmlinux > vmlinux.lst
842Also, if the file isn't compiled -g, this will output as much debugging information
843as it can (e.g. function names). This is very slow as it spends lots
844of time searching for debugging info. The following self explanatory line should be used
845instead if the code isn't compiled -g, as it is much faster:
846objdump --disassemble-all --syms vmlinux > vmlinux.lst
847
848As hard drive space is valuable most of us use the following approach.
8491) Look at the emitted psw on the console to find the crash address in the kernel.
8502) Look at the file System.map ( in the linux directory ) produced when building
851the kernel to find the closest address less than the current PSW to find the
852offending function.
8533) use grep or similar to search the source tree looking for the source file
854 with this function if you don't know where it is.
8554) rebuild this object file with -g on, as an example suppose the file was
856( /arch/s390/kernel/signal.o )
8575) Assuming the file with the erroneous function is signal.c Move to the base of the
858Linux source tree.
8596) rm /arch/s390/kernel/signal.o
8607) make /arch/s390/kernel/signal.o
8618) watch the gcc command line emitted
8629) type it in again or alternatively cut & paste it on the console adding the -g option.
86310) objdump --source arch/s390/kernel/signal.o > signal.lst
864This will output the source & the assembly intermixed, as the snippet below shows
865This will unfortunately output addresses which aren't the same
866as the kernel ones you should be able to get around the mental arithmetic
867by playing with the --adjust-vma parameter to objdump.
868
869
870
871
872static inline void spin_lock(spinlock_t *lp)
873{
874 a0: 18 34 lr %r3,%r4
875 a2: a7 3a 03 bc ahi %r3,956
876 __asm__ __volatile(" lhi 1,-1\n"
877 a6: a7 18 ff ff lhi %r1,-1
878 aa: 1f 00 slr %r0,%r0
879 ac: ba 01 30 00 cs %r0,%r1,0(%r3)
880 b0: a7 44 ff fd jm aa <sys_sigsuspend+0x2e>
881 saveset = current->blocked;
882 b4: d2 07 f0 68 mvc 104(8,%r15),972(%r4)
883 b8: 43 cc
884 return (set->sig[0] & mask) != 0;
885}
886
8876) If debugging under VM go down to that section in the document for more info.
888
889
890I now have a tool which takes the pain out of --adjust-vma
891& you are able to do something like
892make /arch/s390/kernel/traps.lst
893& it automatically generates the correctly relocated entries for
894the text segment in traps.lst.
895This tool is now standard in linux distro's in scripts/makelst
896
897strace:
898-------
899Q. What is it ?
900A. It is a tool for intercepting calls to the kernel & logging them
901to a file & on the screen.
902
903Q. What use is it ?
904A. You can use it to find out what files a particular program opens.
905
906
907 738
908Example 1
909---------
910If you wanted to know does ping work but didn't have the source
911strace ping -c 1 127.0.0.1
912& then look at the man pages for each of the syscalls below,
913( In fact this is sometimes easier than looking at some spaghetti
914source which conditionally compiles for several architectures ).
915Not everything that it throws out needs to make sense immediately.
916
917Just looking quickly you can see that it is making up a RAW socket
918for the ICMP protocol.
919Doing an alarm(10) for a 10 second timeout
920& doing a gettimeofday call before & after each read to see
921how long the replies took, & writing some text to stdout so the user
922has an idea what is going on.
923
924socket(PF_INET, SOCK_RAW, IPPROTO_ICMP) = 3
925getuid() = 0
926setuid(0) = 0
927stat("/usr/share/locale/C/libc.cat", 0xbffff134) = -1 ENOENT (No such file or directory)
928stat("/usr/share/locale/libc/C", 0xbffff134) = -1 ENOENT (No such file or directory)
929stat("/usr/local/share/locale/C/libc.cat", 0xbffff134) = -1 ENOENT (No such file or directory)
930getpid() = 353
931setsockopt(3, SOL_SOCKET, SO_BROADCAST, [1], 4) = 0
932setsockopt(3, SOL_SOCKET, SO_RCVBUF, [49152], 4) = 0
933fstat(1, {st_mode=S_IFCHR|0620, st_rdev=makedev(3, 1), ...}) = 0
934mmap(0, 4096, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0) = 0x40008000
935ioctl(1, TCGETS, {B9600 opost isig icanon echo ...}) = 0
936write(1, "PING 127.0.0.1 (127.0.0.1): 56 d"..., 42PING 127.0.0.1 (127.0.0.1): 56 data bytes
937) = 42
938sigaction(SIGINT, {0x8049ba0, [], SA_RESTART}, {SIG_DFL}) = 0
939sigaction(SIGALRM, {0x8049600, [], SA_RESTART}, {SIG_DFL}) = 0
940gettimeofday({948904719, 138951}, NULL) = 0
941sendto(3, "\10\0D\201a\1\0\0\17#\2178\307\36"..., 64, 0, {sin_family=AF_INET,
942sin_port=htons(0), sin_addr=inet_addr("127.0.0.1")}, 16) = 64
943sigaction(SIGALRM, {0x8049600, [], SA_RESTART}, {0x8049600, [], SA_RESTART}) = 0
944sigaction(SIGALRM, {0x8049ba0, [], SA_RESTART}, {0x8049600, [], SA_RESTART}) = 0
945alarm(10) = 0
946recvfrom(3, "E\0\0T\0005\0\0@\1|r\177\0\0\1\177"..., 192, 0,
947{sin_family=AF_INET, sin_port=htons(50882), sin_addr=inet_addr("127.0.0.1")}, [16]) = 84
948gettimeofday({948904719, 160224}, NULL) = 0
949recvfrom(3, "E\0\0T\0006\0\0\377\1\275p\177\0"..., 192, 0,
950{sin_family=AF_INET, sin_port=htons(50882), sin_addr=inet_addr("127.0.0.1")}, [16]) = 84
951gettimeofday({948904719, 166952}, NULL) = 0
952write(1, "64 bytes from 127.0.0.1: icmp_se"...,
9535764 bytes from 127.0.0.1: icmp_seq=0 ttl=255 time=28.0 ms
954
955Example 2
956---------
957strace passwd 2>&1 | grep open
958produces the following output
959open("/etc/ld.so.cache", O_RDONLY) = 3
960open("/opt/kde/lib/libc.so.5", O_RDONLY) = -1 ENOENT (No such file or directory)
961open("/lib/libc.so.5", O_RDONLY) = 3
962open("/dev", O_RDONLY) = 3
963open("/var/run/utmp", O_RDONLY) = 3
964open("/etc/passwd", O_RDONLY) = 3
965open("/etc/shadow", O_RDONLY) = 3
966open("/etc/login.defs", O_RDONLY) = 4
967open("/dev/tty", O_RDONLY) = 4
968
969The 2>&1 is done to redirect stderr to stdout & grep is then filtering this input
970through the pipe for each line containing the string open.
971
972
973Example 3
974---------
975Getting sophisticated
976telnetd crashes & I don't know why
977
978Steps
979-----
9801) Replace the following line in /etc/inetd.conf
981telnet stream tcp nowait root /usr/sbin/in.telnetd -h
982with
983telnet stream tcp nowait root /blah
984
9852) Create the file /blah with the following contents to start tracing telnetd
986#!/bin/bash
987/usr/bin/strace -o/t1 -f /usr/sbin/in.telnetd -h
9883) chmod 700 /blah to make it executable only to root
9894)
990killall -HUP inetd
991or ps aux | grep inetd
992get inetd's process id
993& kill -HUP inetd to restart it.
994
995Important options
996-----------------
997-o is used to tell strace to output to a file in our case t1 in the root directory
998-f is to follow children i.e.
999e.g in our case above telnetd will start the login process & subsequently a shell like bash.
1000You will be able to tell which is which from the process ID's listed on the left hand side
1001of the strace output.
1002-p<pid> will tell strace to attach to a running process, yup this can be done provided
1003 it isn't being traced or debugged already & you have enough privileges,
1004the reason 2 processes cannot trace or debug the same program is that strace
1005becomes the parent process of the one being debugged & processes ( unlike people )
1006can have only one parent.
1007
1008
1009However the file /t1 will get big quite quickly
1010to test it telnet 127.0.0.1
1011
1012now look at what files in.telnetd execve'd
1013413 execve("/usr/sbin/in.telnetd", ["/usr/sbin/in.telnetd", "-h"], [/* 17 vars */]) = 0
1014414 execve("/bin/login", ["/bin/login", "-h", "localhost", "-p"], [/* 2 vars */]) = 0
1015
1016Whey it worked!.
1017
1018
1019Other hints:
1020------------
1021If the program is not very interactive ( i.e. not much keyboard input )
1022& is crashing in one architecture but not in another you can do
1023an strace of both programs under as identical a scenario as you can
1024on both architectures outputting to a file then.
1025do a diff of the two traces using the diff program
1026i.e.
1027diff output1 output2
1028& maybe you'll be able to see where the call paths differed, this
1029is possibly near the cause of the crash.
1030
1031More info
1032---------
1033Look at man pages for strace & the various syscalls
1034e.g. man strace, man alarm, man socket.
1035
1036
1037Performance Debugging
1038=====================
1039gcc is capable of compiling in profiling code just add the -p option
1040to the CFLAGS, this obviously affects program size & performance.
1041This can be used by the gprof gnu profiling tool or the
1042gcov the gnu code coverage tool ( code coverage is a means of testing
1043code quality by checking if all the code in an executable in exercised by
1044a tester ).
1045
1046
1047Using top to find out where processes are sleeping in the kernel
1048----------------------------------------------------------------
1049To do this copy the System.map from the root directory where
1050the linux kernel was built to the /boot directory on your
1051linux machine.
1052Start top
1053Now type fU<return>
1054You should see a new field called WCHAN which
1055tells you where each process is sleeping here is a typical output.
1056
1057 6:59pm up 41 min, 1 user, load average: 0.00, 0.00, 0.00
105828 processes: 27 sleeping, 1 running, 0 zombie, 0 stopped
1059CPU states: 0.0% user, 0.1% system, 0.0% nice, 99.8% idle
1060Mem: 254900K av, 45976K used, 208924K free, 0K shrd, 28636K buff
1061Swap: 0K av, 0K used, 0K free 8620K cached
1062
1063 PID USER PRI NI SIZE RSS SHARE WCHAN STAT LIB %CPU %MEM TIME COMMAND
1064 750 root 12 0 848 848 700 do_select S 0 0.1 0.3 0:00 in.telnetd
1065 767 root 16 0 1140 1140 964 R 0 0.1 0.4 0:00 top
1066 1 root 8 0 212 212 180 do_select S 0 0.0 0.0 0:00 init
1067 2 root 9 0 0 0 0 down_inte SW 0 0.0 0.0 0:00 kmcheck
1068
1069The time command
1070----------------
1071Another related command is the time command which gives you an indication
1072of where a process is spending the majority of its time.
1073e.g.
1074time ping -c 5 nc
1075outputs
1076real 0m4.054s
1077user 0m0.010s
1078sys 0m0.010s
1079 739
1080Debugging under VM 740Debugging under VM
1081================== 741==================
diff --git a/Documentation/scsi/scsi_eh.txt b/Documentation/scsi/scsi_eh.txt
index a0c85110a07e..8638f61c8c9d 100644
--- a/Documentation/scsi/scsi_eh.txt
+++ b/Documentation/scsi/scsi_eh.txt
@@ -172,7 +172,7 @@ ways.
172 172
173 - eh_strategy_handler() callback 173 - eh_strategy_handler() callback
174 This is one big callback which should perform whole error 174 This is one big callback which should perform whole error
175 handling. As such, it should do all choirs SCSI midlayer 175 handling. As such, it should do all chores the SCSI midlayer
176 performs during recovery. This will be discussed in [2-2]. 176 performs during recovery. This will be discussed in [2-2].
177 177
178 Once recovery is complete, SCSI EH resumes normal operation by 178 Once recovery is complete, SCSI EH resumes normal operation by
@@ -428,7 +428,7 @@ scmd->allowed.
428scsi_unjam_host() and it is responsible for whole recovery process. 428scsi_unjam_host() and it is responsible for whole recovery process.
429On completion, the handler should have made lower layers forget about 429On completion, the handler should have made lower layers forget about
430all failed scmds and either ready for new commands or offline. Also, 430all failed scmds and either ready for new commands or offline. Also,
431it should perform SCSI EH maintenance choirs to maintain integrity of 431it should perform SCSI EH maintenance chores to maintain integrity of
432SCSI midlayer. IOW, of the steps described in [2-1-2], all steps 432SCSI midlayer. IOW, of the steps described in [2-1-2], all steps
433except for #1 must be implemented by eh_strategy_handler(). 433except for #1 must be implemented by eh_strategy_handler().
434 434
diff --git a/Documentation/usb/gadget_configfs.txt b/Documentation/usb/gadget_configfs.txt
index 4cf53e406613..635e57493709 100644
--- a/Documentation/usb/gadget_configfs.txt
+++ b/Documentation/usb/gadget_configfs.txt
@@ -376,7 +376,7 @@ functions and binds them. This way the whole gadget is bound.
376configured, so config_groups for particular functions are defined 376configured, so config_groups for particular functions are defined
377in the functions implementation files drivers/usb/gadget/f_*.c. 377in the functions implementation files drivers/usb/gadget/f_*.c.
378 378
3795. Funciton's code is written in such a way that it uses 3795. Function's code is written in such a way that it uses
380 380
381usb_get_function_instance(), which, in turn, calls request_module. 381usb_get_function_instance(), which, in turn, calls request_module.
382So, provided that modprobe works, modules for particular functions 382So, provided that modprobe works, modules for particular functions
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index b64e0af9cc56..f2d3a100fe38 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -1,8 +1,8 @@
1 1
2The intent of this file is to give a brief summary of hugetlbpage support in 2The intent of this file is to give a brief summary of hugetlbpage support in
3the Linux kernel. This support is built on top of multiple page size support 3the Linux kernel. This support is built on top of multiple page size support
4that is provided by most modern architectures. For example, i386 4that is provided by most modern architectures. For example, x86 CPUs normally
5architecture supports 4K and 4M (2M in PAE mode) page sizes, ia64 5support 4K and 2M (1G if architecturally supported) page sizes, ia64
6architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M, 6architecture supports multiple page sizes 4K, 8K, 64K, 256K, 1M, 4M, 16M,
7256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical 7256M and ppc64 supports 4K and 16M. A TLB is a cache of virtual-to-physical
8translations. Typically this is a very scarce resource on processor. 8translations. Typically this is a very scarce resource on processor.
diff --git a/MAINTAINERS b/MAINTAINERS
index 34b4b841da40..326dc2d1652d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1722,6 +1722,13 @@ F: drivers/dma/at_hdmac.c
1722F: drivers/dma/at_hdmac_regs.h 1722F: drivers/dma/at_hdmac_regs.h
1723F: include/linux/platform_data/dma-atmel.h 1723F: include/linux/platform_data/dma-atmel.h
1724 1724
1725ATMEL XDMA DRIVER
1726M: Ludovic Desroches <ludovic.desroches@atmel.com>
1727L: linux-arm-kernel@lists.infradead.org
1728L: dmaengine@vger.kernel.org
1729S: Supported
1730F: drivers/dma/at_xdmac.c
1731
1725ATMEL I2C DRIVER 1732ATMEL I2C DRIVER
1726M: Ludovic Desroches <ludovic.desroches@atmel.com> 1733M: Ludovic Desroches <ludovic.desroches@atmel.com>
1727L: linux-i2c@vger.kernel.org 1734L: linux-i2c@vger.kernel.org
@@ -2085,6 +2092,14 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
2085S: Maintained 2092S: Maintained
2086N: bcm2835 2093N: bcm2835
2087 2094
2095BROADCOM BCM33XX MIPS ARCHITECTURE
2096M: Kevin Cernekee <cernekee@gmail.com>
2097L: linux-mips@linux-mips.org
2098S: Maintained
2099F: arch/mips/bcm3384/*
2100F: arch/mips/include/asm/mach-bcm3384/*
2101F: arch/mips/kernel/*bmips*
2102
2088BROADCOM BCM5301X ARM ARCHITECTURE 2103BROADCOM BCM5301X ARM ARCHITECTURE
2089M: Hauke Mehrtens <hauke@hauke-m.de> 2104M: Hauke Mehrtens <hauke@hauke-m.de>
2090L: linux-arm-kernel@lists.infradead.org 2105L: linux-arm-kernel@lists.infradead.org
@@ -2101,6 +2116,12 @@ S: Maintained
2101F: arch/arm/mach-bcm/bcm63xx.c 2116F: arch/arm/mach-bcm/bcm63xx.c
2102F: arch/arm/include/debug/bcm63xx.S 2117F: arch/arm/include/debug/bcm63xx.S
2103 2118
2119BROADCOM BCM63XX/BCM33XX UDC DRIVER
2120M: Kevin Cernekee <cernekee@gmail.com>
2121L: linux-usb@vger.kernel.org
2122S: Maintained
2123F: drivers/usb/gadget/udc/bcm63xx_udc.*
2124
2104BROADCOM BCM7XXX ARM ARCHITECTURE 2125BROADCOM BCM7XXX ARM ARCHITECTURE
2105M: Marc Carino <marc.ceeeee@gmail.com> 2126M: Marc Carino <marc.ceeeee@gmail.com>
2106M: Brian Norris <computersforpeace@gmail.com> 2127M: Brian Norris <computersforpeace@gmail.com>
@@ -2112,6 +2133,18 @@ F: arch/arm/mach-bcm/*brcmstb*
2112F: arch/arm/boot/dts/bcm7*.dts* 2133F: arch/arm/boot/dts/bcm7*.dts*
2113F: drivers/bus/brcmstb_gisb.c 2134F: drivers/bus/brcmstb_gisb.c
2114 2135
2136BROADCOM BMIPS MIPS ARCHITECTURE
2137M: Kevin Cernekee <cernekee@gmail.com>
2138M: Florian Fainelli <f.fainelli@gmail.com>
2139L: linux-mips@linux-mips.org
2140S: Maintained
2141F: arch/mips/bmips/*
2142F: arch/mips/include/asm/mach-bmips/*
2143F: arch/mips/kernel/*bmips*
2144F: arch/mips/boot/dts/bcm*.dts*
2145F: drivers/irqchip/irq-bcm7*
2146F: drivers/irqchip/irq-brcmstb*
2147
2115BROADCOM TG3 GIGABIT ETHERNET DRIVER 2148BROADCOM TG3 GIGABIT ETHERNET DRIVER
2116M: Prashant Sreedharan <prashant@broadcom.com> 2149M: Prashant Sreedharan <prashant@broadcom.com>
2117M: Michael Chan <mchan@broadcom.com> 2150M: Michael Chan <mchan@broadcom.com>
@@ -3136,7 +3169,8 @@ Q: https://patchwork.kernel.org/project/linux-dmaengine/list/
3136S: Maintained 3169S: Maintained
3137F: drivers/dma/ 3170F: drivers/dma/
3138F: include/linux/dma* 3171F: include/linux/dma*
3139T: git git://git.infradead.org/users/vkoul/slave-dma.git (slave-dma) 3172F: Documentation/dmaengine/
3173T: git git://git.infradead.org/users/vkoul/slave-dma.git
3140 3174
3141DME1737 HARDWARE MONITOR DRIVER 3175DME1737 HARDWARE MONITOR DRIVER
3142M: Juerg Haefliger <juergh@gmail.com> 3176M: Juerg Haefliger <juergh@gmail.com>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index c8424a85bc04..0bee1fe209b1 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -687,7 +687,9 @@ config ARCH_SA1100
687 select CPU_SA1100 687 select CPU_SA1100
688 select GENERIC_CLOCKEVENTS 688 select GENERIC_CLOCKEVENTS
689 select HAVE_IDE 689 select HAVE_IDE
690 select IRQ_DOMAIN
690 select ISA 691 select ISA
692 select MULTI_IRQ_HANDLER
691 select NEED_MACH_MEMORY_H 693 select NEED_MACH_MEMORY_H
692 select SPARSE_IRQ 694 select SPARSE_IRQ
693 help 695 help
diff --git a/arch/arm/boot/dts/bcm63138.dtsi b/arch/arm/boot/dts/bcm63138.dtsi
index f3bb2dd6269e..d2d8e94e0aa2 100644
--- a/arch/arm/boot/dts/bcm63138.dtsi
+++ b/arch/arm/boot/dts/bcm63138.dtsi
@@ -102,7 +102,7 @@
102 twd_watchdog: watchdog@1e620 { 102 twd_watchdog: watchdog@1e620 {
103 compatible = "arm,cortex-a9-twd-wdt"; 103 compatible = "arm,cortex-a9-twd-wdt";
104 reg = <0x1e620 0x20>; 104 reg = <0x1e620 0x20>;
105 interupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>; 105 interrupts = <GIC_PPI 14 IRQ_TYPE_LEVEL_HIGH>;
106 }; 106 };
107 }; 107 };
108 108
diff --git a/arch/arm/common/sa1111.c b/arch/arm/common/sa1111.c
index e57d7e5bf96a..7b69c5f9cd74 100644
--- a/arch/arm/common/sa1111.c
+++ b/arch/arm/common/sa1111.c
@@ -282,8 +282,8 @@ static int sa1111_retrigger_lowirq(struct irq_data *d)
282 } 282 }
283 283
284 if (i == 8) 284 if (i == 8)
285 printk(KERN_ERR "Danger Will Robinson: failed to " 285 pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
286 "re-trigger IRQ%d\n", d->irq); 286 d->irq);
287 return i == 8 ? -1 : 0; 287 return i == 8 ? -1 : 0;
288} 288}
289 289
@@ -384,8 +384,8 @@ static int sa1111_retrigger_highirq(struct irq_data *d)
384 } 384 }
385 385
386 if (i == 8) 386 if (i == 8)
387 printk(KERN_ERR "Danger Will Robinson: failed to " 387 pr_err("Danger Will Robinson: failed to re-trigger IRQ%d\n",
388 "re-trigger IRQ%d\n", d->irq); 388 d->irq);
389 return i == 8 ? -1 : 0; 389 return i == 8 ? -1 : 0;
390} 390}
391 391
@@ -740,9 +740,8 @@ static int __sa1111_probe(struct device *me, struct resource *mem, int irq)
740 goto err_unmap; 740 goto err_unmap;
741 } 741 }
742 742
743 printk(KERN_INFO "SA1111 Microprocessor Companion Chip: " 743 pr_info("SA1111 Microprocessor Companion Chip: silicon revision %lx, metal revision %lx\n",
744 "silicon revision %lx, metal revision %lx\n", 744 (id & SKID_SIREV_MASK) >> 4, id & SKID_MTREV_MASK);
745 (id & SKID_SIREV_MASK)>>4, (id & SKID_MTREV_MASK));
746 745
747 /* 746 /*
748 * We found it. Wake the chip up, and initialise. 747 * We found it. Wake the chip up, and initialise.
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 10e78d00a0bb..2d46862e7bef 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -487,6 +487,16 @@ int set_memory_rw(unsigned long addr, int numpages);
487int set_memory_x(unsigned long addr, int numpages); 487int set_memory_x(unsigned long addr, int numpages);
488int set_memory_nx(unsigned long addr, int numpages); 488int set_memory_nx(unsigned long addr, int numpages);
489 489
490#ifdef CONFIG_DEBUG_RODATA
491void mark_rodata_ro(void);
492void set_kernel_text_rw(void);
493void set_kernel_text_ro(void);
494#else
495static inline void set_kernel_text_rw(void) { }
496static inline void set_kernel_text_ro(void) { }
497#endif
498
490void flush_uprobe_xol_access(struct page *page, unsigned long uaddr, 499void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
491 void *kaddr, unsigned long len); 500 void *kaddr, unsigned long len);
501
492#endif 502#endif
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index dc662fca9230..4111592f0130 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -17,6 +17,7 @@ struct dev_archdata {
17#ifdef CONFIG_ARM_DMA_USE_IOMMU 17#ifdef CONFIG_ARM_DMA_USE_IOMMU
18 struct dma_iommu_mapping *mapping; 18 struct dma_iommu_mapping *mapping;
19#endif 19#endif
20 bool dma_coherent;
20}; 21};
21 22
22struct omap_device; 23struct omap_device;
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index 85738b200023..e6e3446abdf6 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -123,11 +123,18 @@ static inline unsigned long dma_max_pfn(struct device *dev)
123 123
124static inline int set_arch_dma_coherent_ops(struct device *dev) 124static inline int set_arch_dma_coherent_ops(struct device *dev)
125{ 125{
126 dev->archdata.dma_coherent = true;
126 set_dma_ops(dev, &arm_coherent_dma_ops); 127 set_dma_ops(dev, &arm_coherent_dma_ops);
127 return 0; 128 return 0;
128} 129}
129#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) 130#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
130 131
132/* do not use this function in a driver */
133static inline bool is_device_dma_coherent(struct device *dev)
134{
135 return dev->archdata.dma_coherent;
136}
137
131static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 138static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
132{ 139{
133 unsigned int offset = paddr & ~PAGE_MASK; 140 unsigned int offset = paddr & ~PAGE_MASK;
diff --git a/arch/arm/include/asm/fixmap.h b/arch/arm/include/asm/fixmap.h
index 74124b0d0d79..0415eae1df27 100644
--- a/arch/arm/include/asm/fixmap.h
+++ b/arch/arm/include/asm/fixmap.h
@@ -2,27 +2,24 @@
2#define _ASM_FIXMAP_H 2#define _ASM_FIXMAP_H
3 3
4#define FIXADDR_START 0xffc00000UL 4#define FIXADDR_START 0xffc00000UL
5#define FIXADDR_TOP 0xffe00000UL 5#define FIXADDR_END 0xfff00000UL
6#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) 6#define FIXADDR_TOP (FIXADDR_END - PAGE_SIZE)
7 7
8#define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT) 8#include <asm/kmap_types.h>
9 9
10#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) 10enum fixed_addresses {
11#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) 11 FIX_KMAP_BEGIN,
12 FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
12 13
13extern void __this_fixmap_does_not_exist(void); 14 /* Support writing RO kernel text via kprobes, jump labels, etc. */
15 FIX_TEXT_POKE0,
16 FIX_TEXT_POKE1,
14 17
15static inline unsigned long fix_to_virt(const unsigned int idx) 18 __end_of_fixed_addresses
16{ 19};
17 if (idx >= FIX_KMAP_NR_PTES)
18 __this_fixmap_does_not_exist();
19 return __fix_to_virt(idx);
20}
21 20
22static inline unsigned int virt_to_fix(const unsigned long vaddr) 21void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot);
23{ 22
24 BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START); 23#include <asm-generic/fixmap.h>
25 return __virt_to_fix(vaddr);
26}
27 24
28#endif 25#endif
diff --git a/arch/arm/include/asm/hw_irq.h b/arch/arm/include/asm/hw_irq.h
index a71b417b1856..af79da40af2a 100644
--- a/arch/arm/include/asm/hw_irq.h
+++ b/arch/arm/include/asm/hw_irq.h
@@ -8,6 +8,7 @@ static inline void ack_bad_irq(int irq)
8{ 8{
9 extern unsigned long irq_err_count; 9 extern unsigned long irq_err_count;
10 irq_err_count++; 10 irq_err_count++;
11 pr_crit("unexpected IRQ trap at vector %02x\n", irq);
11} 12}
12 13
13void set_irq_flags(unsigned int irq, unsigned int flags); 14void set_irq_flags(unsigned int irq, unsigned int flags);
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index d428e386c88e..3446f6a1d9fa 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -219,6 +219,23 @@ void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
219bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster); 219bool __mcpm_outbound_enter_critical(unsigned int this_cpu, unsigned int cluster);
220int __mcpm_cluster_state(unsigned int cluster); 220int __mcpm_cluster_state(unsigned int cluster);
221 221
222/**
223 * mcpm_sync_init - Initialize the cluster synchronization support
224 *
225 * @power_up_setup: platform specific function invoked during very
226 * early CPU/cluster bringup stage.
227 *
228 * This prepares memory used by vlocks and the MCPM state machine used
229 * across CPUs that may have their caches active or inactive. Must be
230 * called only after a successful call to mcpm_platform_register().
231 *
232 * The power_up_setup argument is a pointer to assembly code called when
233 * the MMU and caches are still disabled during boot and no stack space is
234 * available. The affinity level passed to that code corresponds to the
235 * resource that needs to be initialized (e.g. 1 for cluster level, 0 for
236 * CPU level). Proper exclusion mechanisms are already activated at that
237 * point.
238 */
222int __init mcpm_sync_init( 239int __init mcpm_sync_init(
223 void (*power_up_setup)(unsigned int affinity_level)); 240 void (*power_up_setup)(unsigned int affinity_level));
224 241
diff --git a/arch/arm/include/asm/percpu.h b/arch/arm/include/asm/percpu.h
index 209e6504922e..a89b4076cde4 100644
--- a/arch/arm/include/asm/percpu.h
+++ b/arch/arm/include/asm/percpu.h
@@ -30,14 +30,14 @@ static inline void set_my_cpu_offset(unsigned long off)
30static inline unsigned long __my_cpu_offset(void) 30static inline unsigned long __my_cpu_offset(void)
31{ 31{
32 unsigned long off; 32 unsigned long off;
33 register unsigned long *sp asm ("sp");
34 33
35 /* 34 /*
36 * Read TPIDRPRW. 35 * Read TPIDRPRW.
37 * We want to allow caching the value, so avoid using volatile and 36 * We want to allow caching the value, so avoid using volatile and
38 * instead use a fake stack read to hazard against barrier(). 37 * instead use a fake stack read to hazard against barrier().
39 */ 38 */
40 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off) : "Q" (*sp)); 39 asm("mrc p15, 0, %0, c13, c0, 4" : "=r" (off)
40 : "Q" (*(const unsigned long *)current_stack_pointer));
41 41
42 return off; 42 return off;
43} 43}
diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
index 78a779361682..19cfab526d13 100644
--- a/arch/arm/include/asm/pgalloc.h
+++ b/arch/arm/include/asm/pgalloc.h
@@ -157,7 +157,15 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
157static inline void 157static inline void
158pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep) 158pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
159{ 159{
160 __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE); 160 extern pmdval_t user_pmd_table;
161 pmdval_t prot;
162
163 if (__LINUX_ARM_ARCH__ >= 6 && !IS_ENABLED(CONFIG_ARM_LPAE))
164 prot = user_pmd_table;
165 else
166 prot = _PAGE_USER_TABLE;
167
168 __pmd_populate(pmdp, page_to_phys(ptep), prot);
161} 169}
162#define pmd_pgtable(pmd) pmd_page(pmd) 170#define pmd_pgtable(pmd) pmd_page(pmd)
163 171
diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
index 5cfba15cb401..5e68278e953e 100644
--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
@@ -20,12 +20,14 @@
20#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0) 20#define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
21#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0) 21#define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
22#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0) 22#define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
23#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
23#define PMD_BIT4 (_AT(pmdval_t, 1) << 4) 24#define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
24#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5) 25#define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
25#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */ 26#define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
26/* 27/*
27 * - section 28 * - section
28 */ 29 */
30#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
29#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2) 31#define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
30#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3) 32#define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
31#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */ 33#define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
index 9fd61c72a33a..f8f1cff62065 100644
--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
@@ -76,6 +76,7 @@
76#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */ 76#define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
77#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */ 77#define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
78#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */ 78#define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
79#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
79#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */ 80#define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
80 81
81/* 82/*
diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
index 3b30062975b2..d5cac545ba33 100644
--- a/arch/arm/include/asm/pgtable.h
+++ b/arch/arm/include/asm/pgtable.h
@@ -252,17 +252,57 @@ static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
252 set_pte_ext(ptep, pteval, ext); 252 set_pte_ext(ptep, pteval, ext);
253} 253}
254 254
255#define PTE_BIT_FUNC(fn,op) \ 255static inline pte_t clear_pte_bit(pte_t pte, pgprot_t prot)
256static inline pte_t pte_##fn(pte_t pte) { pte_val(pte) op; return pte; } 256{
257 257 pte_val(pte) &= ~pgprot_val(prot);
258PTE_BIT_FUNC(wrprotect, |= L_PTE_RDONLY); 258 return pte;
259PTE_BIT_FUNC(mkwrite, &= ~L_PTE_RDONLY); 259}
260PTE_BIT_FUNC(mkclean, &= ~L_PTE_DIRTY); 260
261PTE_BIT_FUNC(mkdirty, |= L_PTE_DIRTY); 261static inline pte_t set_pte_bit(pte_t pte, pgprot_t prot)
262PTE_BIT_FUNC(mkold, &= ~L_PTE_YOUNG); 262{
263PTE_BIT_FUNC(mkyoung, |= L_PTE_YOUNG); 263 pte_val(pte) |= pgprot_val(prot);
264PTE_BIT_FUNC(mkexec, &= ~L_PTE_XN); 264 return pte;
265PTE_BIT_FUNC(mknexec, |= L_PTE_XN); 265}
266
267static inline pte_t pte_wrprotect(pte_t pte)
268{
269 return set_pte_bit(pte, __pgprot(L_PTE_RDONLY));
270}
271
272static inline pte_t pte_mkwrite(pte_t pte)
273{
274 return clear_pte_bit(pte, __pgprot(L_PTE_RDONLY));
275}
276
277static inline pte_t pte_mkclean(pte_t pte)
278{
279 return clear_pte_bit(pte, __pgprot(L_PTE_DIRTY));
280}
281
282static inline pte_t pte_mkdirty(pte_t pte)
283{
284 return set_pte_bit(pte, __pgprot(L_PTE_DIRTY));
285}
286
287static inline pte_t pte_mkold(pte_t pte)
288{
289 return clear_pte_bit(pte, __pgprot(L_PTE_YOUNG));
290}
291
292static inline pte_t pte_mkyoung(pte_t pte)
293{
294 return set_pte_bit(pte, __pgprot(L_PTE_YOUNG));
295}
296
297static inline pte_t pte_mkexec(pte_t pte)
298{
299 return clear_pte_bit(pte, __pgprot(L_PTE_XN));
300}
301
302static inline pte_t pte_mknexec(pte_t pte)
303{
304 return set_pte_bit(pte, __pgprot(L_PTE_XN));
305}
266 306
267static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 307static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
268{ 308{
diff --git a/arch/arm/include/asm/ptrace.h b/arch/arm/include/asm/ptrace.h
index 601264d983fa..51622ba7c4a6 100644
--- a/arch/arm/include/asm/ptrace.h
+++ b/arch/arm/include/asm/ptrace.h
@@ -154,9 +154,8 @@ static inline unsigned long user_stack_pointer(struct pt_regs *regs)
154 return regs->ARM_sp; 154 return regs->ARM_sp;
155} 155}
156 156
157#define current_pt_regs(void) ({ \ 157#define current_pt_regs(void) ({ (struct pt_regs *) \
158 register unsigned long sp asm ("sp"); \ 158 ((current_stack_pointer | (THREAD_SIZE - 1)) - 7) - 1; \
159 (struct pt_regs *)((sp | (THREAD_SIZE - 1)) - 7) - 1; \
160}) 159})
161 160
162#endif /* __ASSEMBLY__ */ 161#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index ce73ab635414..d890e41f5520 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -90,14 +90,19 @@ struct thread_info {
90#define init_stack (init_thread_union.stack) 90#define init_stack (init_thread_union.stack)
91 91
92/* 92/*
93 * how to get the current stack pointer in C
94 */
95register unsigned long current_stack_pointer asm ("sp");
96
97/*
93 * how to get the thread information struct from C 98 * how to get the thread information struct from C
94 */ 99 */
95static inline struct thread_info *current_thread_info(void) __attribute_const__; 100static inline struct thread_info *current_thread_info(void) __attribute_const__;
96 101
97static inline struct thread_info *current_thread_info(void) 102static inline struct thread_info *current_thread_info(void)
98{ 103{
99 register unsigned long sp asm ("sp"); 104 return (struct thread_info *)
100 return (struct thread_info *)(sp & ~(THREAD_SIZE - 1)); 105 (current_stack_pointer & ~(THREAD_SIZE - 1));
101} 106}
102 107
103#define thread_saved_pc(tsk) \ 108#define thread_saved_pc(tsk) \
diff --git a/arch/arm/include/asm/vfp.h b/arch/arm/include/asm/vfp.h
index f4ab34fd4f72..ee5f3084243c 100644
--- a/arch/arm/include/asm/vfp.h
+++ b/arch/arm/include/asm/vfp.h
@@ -22,6 +22,7 @@
22#define FPSID_NODOUBLE (1<<20) 22#define FPSID_NODOUBLE (1<<20)
23#define FPSID_ARCH_BIT (16) 23#define FPSID_ARCH_BIT (16)
24#define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT) 24#define FPSID_ARCH_MASK (0xF << FPSID_ARCH_BIT)
25#define FPSID_CPUID_ARCH_MASK (0x7F << FPSID_ARCH_BIT)
25#define FPSID_PART_BIT (8) 26#define FPSID_PART_BIT (8)
26#define FPSID_PART_MASK (0xFF << FPSID_PART_BIT) 27#define FPSID_PART_MASK (0xFF << FPSID_PART_BIT)
27#define FPSID_VARIANT_BIT (4) 28#define FPSID_VARIANT_BIT (4)
@@ -75,6 +76,10 @@
75/* MVFR0 bits */ 76/* MVFR0 bits */
76#define MVFR0_A_SIMD_BIT (0) 77#define MVFR0_A_SIMD_BIT (0)
77#define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT) 78#define MVFR0_A_SIMD_MASK (0xf << MVFR0_A_SIMD_BIT)
79#define MVFR0_SP_BIT (4)
80#define MVFR0_SP_MASK (0xf << MVFR0_SP_BIT)
81#define MVFR0_DP_BIT (8)
82#define MVFR0_DP_MASK (0xf << MVFR0_DP_BIT)
78 83
79/* Bit patterns for decoding the packaged operation descriptors */ 84/* Bit patterns for decoding the packaged operation descriptors */
80#define VFPOPDESC_LENGTH_BIT (9) 85#define VFPOPDESC_LENGTH_BIT (9)
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index e8275ea88e88..efd562412850 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -5,6 +5,18 @@
5#include <linux/dma-attrs.h> 5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h> 6#include <linux/dma-mapping.h>
7 7
8void __xen_dma_map_page(struct device *hwdev, struct page *page,
9 dma_addr_t dev_addr, unsigned long offset, size_t size,
10 enum dma_data_direction dir, struct dma_attrs *attrs);
11void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
12 size_t size, enum dma_data_direction dir,
13 struct dma_attrs *attrs);
14void __xen_dma_sync_single_for_cpu(struct device *hwdev,
15 dma_addr_t handle, size_t size, enum dma_data_direction dir);
16
17void __xen_dma_sync_single_for_device(struct device *hwdev,
18 dma_addr_t handle, size_t size, enum dma_data_direction dir);
19
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size, 20static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags, 21 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs) 22 struct dma_attrs *attrs)
@@ -20,20 +32,56 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
20} 32}
21 33
22static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 34static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir, 35 dma_addr_t dev_addr, unsigned long offset, size_t size,
24 struct dma_attrs *attrs) 36 enum dma_data_direction dir, struct dma_attrs *attrs)
25{ 37{
26 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs); 38 bool local = PFN_DOWN(dev_addr) == page_to_pfn(page);
39 /* Dom0 is mapped 1:1, so if pfn == mfn the page is local otherwise
40 * is a foreign page grant-mapped in dom0. If the page is local we
41 * can safely call the native dma_ops function, otherwise we call
42 * the xen specific function. */
43 if (local)
44 __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
45 else
46 __xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
27} 47}
28 48
29void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 49static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
30 size_t size, enum dma_data_direction dir, 50 size_t size, enum dma_data_direction dir,
31 struct dma_attrs *attrs); 51 struct dma_attrs *attrs)
52{
53 unsigned long pfn = PFN_DOWN(handle);
54 /* Dom0 is mapped 1:1, so calling pfn_valid on a foreign mfn will
55 * always return false. If the page is local we can safely call the
56 * native dma_ops function, otherwise we call the xen specific
57 * function. */
58 if (pfn_valid(pfn)) {
59 if (__generic_dma_ops(hwdev)->unmap_page)
60 __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
61 } else
62 __xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
63}
32 64
33void xen_dma_sync_single_for_cpu(struct device *hwdev, 65static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
34 dma_addr_t handle, size_t size, enum dma_data_direction dir); 66 dma_addr_t handle, size_t size, enum dma_data_direction dir)
67{
68 unsigned long pfn = PFN_DOWN(handle);
69 if (pfn_valid(pfn)) {
70 if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
71 __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
72 } else
73 __xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
74}
35 75
36void xen_dma_sync_single_for_device(struct device *hwdev, 76static inline void xen_dma_sync_single_for_device(struct device *hwdev,
37 dma_addr_t handle, size_t size, enum dma_data_direction dir); 77 dma_addr_t handle, size_t size, enum dma_data_direction dir)
78{
79 unsigned long pfn = PFN_DOWN(handle);
80 if (pfn_valid(pfn)) {
81 if (__generic_dma_ops(hwdev)->sync_single_for_device)
82 __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
83 } else
84 __xen_dma_sync_single_for_device(hwdev, handle, size, dir);
85}
38 86
39#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */ 87#endif /* _ASM_ARM_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm/include/asm/xen/page.h b/arch/arm/include/asm/xen/page.h
index 135c24a5ba26..68c739b3fdf4 100644
--- a/arch/arm/include/asm/xen/page.h
+++ b/arch/arm/include/asm/xen/page.h
@@ -107,4 +107,8 @@ static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn)
107#define xen_remap(cookie, size) ioremap_cache((cookie), (size)) 107#define xen_remap(cookie, size) ioremap_cache((cookie), (size))
108#define xen_unmap(cookie) iounmap((cookie)) 108#define xen_unmap(cookie) iounmap((cookie))
109 109
110bool xen_arch_need_swiotlb(struct device *dev,
111 unsigned long pfn,
112 unsigned long mfn);
113
110#endif /* _ASM_ARM_XEN_PAGE_H */ 114#endif /* _ASM_ARM_XEN_PAGE_H */
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile
index 8dcbed5016ac..f290ac892a95 100644
--- a/arch/arm/kernel/Makefile
+++ b/arch/arm/kernel/Makefile
@@ -47,6 +47,7 @@ endif
47obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o 47obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
48obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o 48obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
49obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o 49obj-$(CONFIG_ARM_ARCH_TIMER) += arch_timer.o
50obj-$(CONFIG_FUNCTION_TRACER) += entry-ftrace.o
50obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o 51obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o insn.o
51obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o 52obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o insn.o
52obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o 53obj-$(CONFIG_JUMP_LABEL) += jump_label.o insn.o patch.o
@@ -67,7 +68,7 @@ test-kprobes-objs += kprobes-test-arm.o
67endif 68endif
68obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o 69obj-$(CONFIG_OABI_COMPAT) += sys_oabi-compat.o
69obj-$(CONFIG_ARM_THUMBEE) += thumbee.o 70obj-$(CONFIG_ARM_THUMBEE) += thumbee.o
70obj-$(CONFIG_KGDB) += kgdb.o 71obj-$(CONFIG_KGDB) += kgdb.o patch.o
71obj-$(CONFIG_ARM_UNWIND) += unwind.o 72obj-$(CONFIG_ARM_UNWIND) += unwind.o
72obj-$(CONFIG_HAVE_TCM) += tcm.o 73obj-$(CONFIG_HAVE_TCM) += tcm.o
73obj-$(CONFIG_OF) += devtree.o 74obj-$(CONFIG_OF) += devtree.o
@@ -84,6 +85,7 @@ obj-$(CONFIG_CPU_PJ4B) += pj4-cp0.o
84obj-$(CONFIG_IWMMXT) += iwmmxt.o 85obj-$(CONFIG_IWMMXT) += iwmmxt.o
85obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o 86obj-$(CONFIG_PERF_EVENTS) += perf_regs.o perf_callchain.o
86obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o 87obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o perf_event_cpu.o
88CFLAGS_pj4-cp0.o := -marm
87AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt 89AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt
88obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o 90obj-$(CONFIG_ARM_CPU_TOPOLOGY) += topology.o
89 91
diff --git a/arch/arm/kernel/atags_compat.c b/arch/arm/kernel/atags_compat.c
index 5236ad38f417..05c28b12353c 100644
--- a/arch/arm/kernel/atags_compat.c
+++ b/arch/arm/kernel/atags_compat.c
@@ -97,8 +97,7 @@ static void __init build_tag_list(struct param_struct *params, void *taglist)
97 struct tag *tag = taglist; 97 struct tag *tag = taglist;
98 98
99 if (params->u1.s.page_size != PAGE_SIZE) { 99 if (params->u1.s.page_size != PAGE_SIZE) {
100 printk(KERN_WARNING "Warning: bad configuration page, " 100 pr_warn("Warning: bad configuration page, trying to continue\n");
101 "trying to continue\n");
102 return; 101 return;
103 } 102 }
104 103
@@ -109,8 +108,7 @@ static void __init build_tag_list(struct param_struct *params, void *taglist)
109 params->u1.s.nr_pages != 0x04000 && 108 params->u1.s.nr_pages != 0x04000 &&
110 params->u1.s.nr_pages != 0x08000 && 109 params->u1.s.nr_pages != 0x08000 &&
111 params->u1.s.nr_pages != 0x10000) { 110 params->u1.s.nr_pages != 0x10000) {
112 printk(KERN_WARNING "Warning: bad NeTTrom parameters " 111 pr_warn("Warning: bad NeTTrom parameters detected, using defaults\n");
113 "detected, using defaults\n");
114 112
115 params->u1.s.nr_pages = 0x1000; /* 16MB */ 113 params->u1.s.nr_pages = 0x1000; /* 16MB */
116 params->u1.s.ramdisk_size = 0; 114 params->u1.s.ramdisk_size = 0;
diff --git a/arch/arm/kernel/atags_parse.c b/arch/arm/kernel/atags_parse.c
index 528f8af2addb..68c6ae0b9e4c 100644
--- a/arch/arm/kernel/atags_parse.c
+++ b/arch/arm/kernel/atags_parse.c
@@ -167,8 +167,7 @@ static void __init parse_tags(const struct tag *t)
167{ 167{
168 for (; t->hdr.size; t = tag_next(t)) 168 for (; t->hdr.size; t = tag_next(t))
169 if (!parse_tag(t)) 169 if (!parse_tag(t))
170 printk(KERN_WARNING 170 pr_warn("Ignoring unrecognised tag 0x%08x\n",
171 "Ignoring unrecognised tag 0x%08x\n",
172 t->hdr.tag); 171 t->hdr.tag);
173} 172}
174 173
@@ -193,7 +192,7 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
193 */ 192 */
194 for_each_machine_desc(p) 193 for_each_machine_desc(p)
195 if (machine_nr == p->nr) { 194 if (machine_nr == p->nr) {
196 printk("Machine: %s\n", p->name); 195 pr_info("Machine: %s\n", p->name);
197 mdesc = p; 196 mdesc = p;
198 break; 197 break;
199 } 198 }
diff --git a/arch/arm/kernel/atags_proc.c b/arch/arm/kernel/atags_proc.c
index c7ff8073416f..5a3379055f55 100644
--- a/arch/arm/kernel/atags_proc.c
+++ b/arch/arm/kernel/atags_proc.c
@@ -41,7 +41,7 @@ static int __init init_atags_procfs(void)
41 size_t size; 41 size_t size;
42 42
43 if (tag->hdr.tag != ATAG_CORE) { 43 if (tag->hdr.tag != ATAG_CORE) {
44 printk(KERN_INFO "No ATAGs?"); 44 pr_info("No ATAGs?");
45 return -EINVAL; 45 return -EINVAL;
46 } 46 }
47 47
@@ -68,7 +68,7 @@ static int __init init_atags_procfs(void)
68 68
69nomem: 69nomem:
70 kfree(b); 70 kfree(b);
71 printk(KERN_ERR "Exporting ATAGs: not enough memory\n"); 71 pr_err("Exporting ATAGs: not enough memory\n");
72 72
73 return -ENOMEM; 73 return -ENOMEM;
74} 74}
diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c
index daaff73bc776..a4effd6d8f2f 100644
--- a/arch/arm/kernel/bios32.c
+++ b/arch/arm/kernel/bios32.c
@@ -364,7 +364,7 @@ void pcibios_fixup_bus(struct pci_bus *bus)
364 /* 364 /*
365 * Report what we did for this bus 365 * Report what we did for this bus
366 */ 366 */
367 printk(KERN_INFO "PCI: bus%d: Fast back to back transfers %sabled\n", 367 pr_info("PCI: bus%d: Fast back to back transfers %sabled\n",
368 bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis"); 368 bus->number, (features & PCI_COMMAND_FAST_BACK) ? "en" : "dis");
369} 369}
370EXPORT_SYMBOL(pcibios_fixup_bus); 370EXPORT_SYMBOL(pcibios_fixup_bus);
diff --git a/arch/arm/kernel/dma-isa.c b/arch/arm/kernel/dma-isa.c
index 360bb6d701f5..84363fe7bad2 100644
--- a/arch/arm/kernel/dma-isa.c
+++ b/arch/arm/kernel/dma-isa.c
@@ -213,8 +213,8 @@ void __init isa_init_dma(void)
213 for (chan = 0; chan < 8; chan++) { 213 for (chan = 0; chan < 8; chan++) {
214 int ret = isa_dma_add(chan, &isa_dma[chan]); 214 int ret = isa_dma_add(chan, &isa_dma[chan]);
215 if (ret) 215 if (ret)
216 printk(KERN_ERR "ISADMA%u: unable to register: %d\n", 216 pr_err("ISADMA%u: unable to register: %d\n",
217 chan, ret); 217 chan, ret);
218 } 218 }
219 219
220 request_dma(DMA_ISA_CASCADE, "cascade"); 220 request_dma(DMA_ISA_CASCADE, "cascade");
diff --git a/arch/arm/kernel/dma.c b/arch/arm/kernel/dma.c
index 7b829d9663b1..e651c4d0a0d9 100644
--- a/arch/arm/kernel/dma.c
+++ b/arch/arm/kernel/dma.c
@@ -79,7 +79,7 @@ int request_dma(unsigned int chan, const char *device_id)
79 return ret; 79 return ret;
80 80
81bad_dma: 81bad_dma:
82 printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan); 82 pr_err("dma: trying to allocate DMA%d\n", chan);
83 return -EINVAL; 83 return -EINVAL;
84 84
85busy: 85busy:
@@ -100,7 +100,7 @@ void free_dma(unsigned int chan)
100 goto bad_dma; 100 goto bad_dma;
101 101
102 if (dma->active) { 102 if (dma->active) {
103 printk(KERN_ERR "dma%d: freeing active DMA\n", chan); 103 pr_err("dma%d: freeing active DMA\n", chan);
104 dma->d_ops->disable(chan, dma); 104 dma->d_ops->disable(chan, dma);
105 dma->active = 0; 105 dma->active = 0;
106 } 106 }
@@ -111,11 +111,11 @@ void free_dma(unsigned int chan)
111 return; 111 return;
112 } 112 }
113 113
114 printk(KERN_ERR "dma%d: trying to free free DMA\n", chan); 114 pr_err("dma%d: trying to free free DMA\n", chan);
115 return; 115 return;
116 116
117bad_dma: 117bad_dma:
118 printk(KERN_ERR "dma: trying to free DMA%d\n", chan); 118 pr_err("dma: trying to free DMA%d\n", chan);
119} 119}
120EXPORT_SYMBOL(free_dma); 120EXPORT_SYMBOL(free_dma);
121 121
@@ -126,8 +126,7 @@ void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg)
126 dma_t *dma = dma_channel(chan); 126 dma_t *dma = dma_channel(chan);
127 127
128 if (dma->active) 128 if (dma->active)
129 printk(KERN_ERR "dma%d: altering DMA SG while " 129 pr_err("dma%d: altering DMA SG while DMA active\n", chan);
130 "DMA active\n", chan);
131 130
132 dma->sg = sg; 131 dma->sg = sg;
133 dma->sgcount = nr_sg; 132 dma->sgcount = nr_sg;
@@ -144,8 +143,7 @@ void __set_dma_addr (unsigned int chan, void *addr)
144 dma_t *dma = dma_channel(chan); 143 dma_t *dma = dma_channel(chan);
145 144
146 if (dma->active) 145 if (dma->active)
147 printk(KERN_ERR "dma%d: altering DMA address while " 146 pr_err("dma%d: altering DMA address while DMA active\n", chan);
148 "DMA active\n", chan);
149 147
150 dma->sg = NULL; 148 dma->sg = NULL;
151 dma->addr = addr; 149 dma->addr = addr;
@@ -162,8 +160,7 @@ void set_dma_count (unsigned int chan, unsigned long count)
162 dma_t *dma = dma_channel(chan); 160 dma_t *dma = dma_channel(chan);
163 161
164 if (dma->active) 162 if (dma->active)
165 printk(KERN_ERR "dma%d: altering DMA count while " 163 pr_err("dma%d: altering DMA count while DMA active\n", chan);
166 "DMA active\n", chan);
167 164
168 dma->sg = NULL; 165 dma->sg = NULL;
169 dma->count = count; 166 dma->count = count;
@@ -178,8 +175,7 @@ void set_dma_mode (unsigned int chan, unsigned int mode)
178 dma_t *dma = dma_channel(chan); 175 dma_t *dma = dma_channel(chan);
179 176
180 if (dma->active) 177 if (dma->active)
181 printk(KERN_ERR "dma%d: altering DMA mode while " 178 pr_err("dma%d: altering DMA mode while DMA active\n", chan);
182 "DMA active\n", chan);
183 179
184 dma->dma_mode = mode; 180 dma->dma_mode = mode;
185 dma->invalid = 1; 181 dma->invalid = 1;
@@ -202,7 +198,7 @@ void enable_dma (unsigned int chan)
202 return; 198 return;
203 199
204free_dma: 200free_dma:
205 printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan); 201 pr_err("dma%d: trying to enable free DMA\n", chan);
206 BUG(); 202 BUG();
207} 203}
208EXPORT_SYMBOL(enable_dma); 204EXPORT_SYMBOL(enable_dma);
@@ -223,7 +219,7 @@ void disable_dma (unsigned int chan)
223 return; 219 return;
224 220
225free_dma: 221free_dma:
226 printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan); 222 pr_err("dma%d: trying to disable free DMA\n", chan);
227 BUG(); 223 BUG();
228} 224}
229EXPORT_SYMBOL(disable_dma); 225EXPORT_SYMBOL(disable_dma);
@@ -240,7 +236,7 @@ EXPORT_SYMBOL(dma_channel_active);
240 236
241void set_dma_page(unsigned int chan, char pagenr) 237void set_dma_page(unsigned int chan, char pagenr)
242{ 238{
243 printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan); 239 pr_err("dma%d: trying to set_dma_page\n", chan);
244} 240}
245EXPORT_SYMBOL(set_dma_page); 241EXPORT_SYMBOL(set_dma_page);
246 242
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 6bb09d4abdea..f8ccc21fa032 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -109,241 +109,6 @@ ENDPROC(ret_from_fork)
109#undef CALL 109#undef CALL
110#define CALL(x) .long x 110#define CALL(x) .long x
111 111
112#ifdef CONFIG_FUNCTION_TRACER
113/*
114 * When compiling with -pg, gcc inserts a call to the mcount routine at the
115 * start of every function. In mcount, apart from the function's address (in
116 * lr), we need to get hold of the function's caller's address.
117 *
118 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
119 *
120 * bl mcount
121 *
122 * These versions have the limitation that in order for the mcount routine to
123 * be able to determine the function's caller's address, an APCS-style frame
124 * pointer (which is set up with something like the code below) is required.
125 *
126 * mov ip, sp
127 * push {fp, ip, lr, pc}
128 * sub fp, ip, #4
129 *
130 * With EABI, these frame pointers are not available unless -mapcs-frame is
131 * specified, and if building as Thumb-2, not even then.
132 *
133 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
134 * with call sites like:
135 *
136 * push {lr}
137 * bl __gnu_mcount_nc
138 *
139 * With these compilers, frame pointers are not necessary.
140 *
141 * mcount can be thought of as a function called in the middle of a subroutine
142 * call. As such, it needs to be transparent for both the caller and the
143 * callee: the original lr needs to be restored when leaving mcount, and no
144 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
145 * clobber the ip register. This is OK because the ARM calling convention
146 * allows it to be clobbered in subroutines and doesn't use it to hold
147 * parameters.)
148 *
149 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
150 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
151 * arch/arm/kernel/ftrace.c).
152 */
153
154#ifndef CONFIG_OLD_MCOUNT
155#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
156#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
157#endif
158#endif
159
160.macro mcount_adjust_addr rd, rn
161 bic \rd, \rn, #1 @ clear the Thumb bit if present
162 sub \rd, \rd, #MCOUNT_INSN_SIZE
163.endm
164
165.macro __mcount suffix
166 mcount_enter
167 ldr r0, =ftrace_trace_function
168 ldr r2, [r0]
169 adr r0, .Lftrace_stub
170 cmp r0, r2
171 bne 1f
172
173#ifdef CONFIG_FUNCTION_GRAPH_TRACER
174 ldr r1, =ftrace_graph_return
175 ldr r2, [r1]
176 cmp r0, r2
177 bne ftrace_graph_caller\suffix
178
179 ldr r1, =ftrace_graph_entry
180 ldr r2, [r1]
181 ldr r0, =ftrace_graph_entry_stub
182 cmp r0, r2
183 bne ftrace_graph_caller\suffix
184#endif
185
186 mcount_exit
187
1881: mcount_get_lr r1 @ lr of instrumented func
189 mcount_adjust_addr r0, lr @ instrumented function
190 adr lr, BSYM(2f)
191 mov pc, r2
1922: mcount_exit
193.endm
194
195.macro __ftrace_caller suffix
196 mcount_enter
197
198 mcount_get_lr r1 @ lr of instrumented func
199 mcount_adjust_addr r0, lr @ instrumented function
200
201 .globl ftrace_call\suffix
202ftrace_call\suffix:
203 bl ftrace_stub
204
205#ifdef CONFIG_FUNCTION_GRAPH_TRACER
206 .globl ftrace_graph_call\suffix
207ftrace_graph_call\suffix:
208 mov r0, r0
209#endif
210
211 mcount_exit
212.endm
213
214.macro __ftrace_graph_caller
215 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
216#ifdef CONFIG_DYNAMIC_FTRACE
217 @ called from __ftrace_caller, saved in mcount_enter
218 ldr r1, [sp, #16] @ instrumented routine (func)
219 mcount_adjust_addr r1, r1
220#else
221 @ called from __mcount, untouched in lr
222 mcount_adjust_addr r1, lr @ instrumented routine (func)
223#endif
224 mov r2, fp @ frame pointer
225 bl prepare_ftrace_return
226 mcount_exit
227.endm
228
229#ifdef CONFIG_OLD_MCOUNT
230/*
231 * mcount
232 */
233
234.macro mcount_enter
235 stmdb sp!, {r0-r3, lr}
236.endm
237
238.macro mcount_get_lr reg
239 ldr \reg, [fp, #-4]
240.endm
241
242.macro mcount_exit
243 ldr lr, [fp, #-4]
244 ldmia sp!, {r0-r3, pc}
245.endm
246
247ENTRY(mcount)
248#ifdef CONFIG_DYNAMIC_FTRACE
249 stmdb sp!, {lr}
250 ldr lr, [fp, #-4]
251 ldmia sp!, {pc}
252#else
253 __mcount _old
254#endif
255ENDPROC(mcount)
256
257#ifdef CONFIG_DYNAMIC_FTRACE
258ENTRY(ftrace_caller_old)
259 __ftrace_caller _old
260ENDPROC(ftrace_caller_old)
261#endif
262
263#ifdef CONFIG_FUNCTION_GRAPH_TRACER
264ENTRY(ftrace_graph_caller_old)
265 __ftrace_graph_caller
266ENDPROC(ftrace_graph_caller_old)
267#endif
268
269.purgem mcount_enter
270.purgem mcount_get_lr
271.purgem mcount_exit
272#endif
273
274/*
275 * __gnu_mcount_nc
276 */
277
278.macro mcount_enter
279/*
280 * This pad compensates for the push {lr} at the call site. Note that we are
281 * unable to unwind through a function which does not otherwise save its lr.
282 */
283 UNWIND(.pad #4)
284 stmdb sp!, {r0-r3, lr}
285 UNWIND(.save {r0-r3, lr})
286.endm
287
288.macro mcount_get_lr reg
289 ldr \reg, [sp, #20]
290.endm
291
292.macro mcount_exit
293 ldmia sp!, {r0-r3, ip, lr}
294 ret ip
295.endm
296
297ENTRY(__gnu_mcount_nc)
298UNWIND(.fnstart)
299#ifdef CONFIG_DYNAMIC_FTRACE
300 mov ip, lr
301 ldmia sp!, {lr}
302 ret ip
303#else
304 __mcount
305#endif
306UNWIND(.fnend)
307ENDPROC(__gnu_mcount_nc)
308
309#ifdef CONFIG_DYNAMIC_FTRACE
310ENTRY(ftrace_caller)
311UNWIND(.fnstart)
312 __ftrace_caller
313UNWIND(.fnend)
314ENDPROC(ftrace_caller)
315#endif
316
317#ifdef CONFIG_FUNCTION_GRAPH_TRACER
318ENTRY(ftrace_graph_caller)
319UNWIND(.fnstart)
320 __ftrace_graph_caller
321UNWIND(.fnend)
322ENDPROC(ftrace_graph_caller)
323#endif
324
325.purgem mcount_enter
326.purgem mcount_get_lr
327.purgem mcount_exit
328
329#ifdef CONFIG_FUNCTION_GRAPH_TRACER
330 .globl return_to_handler
331return_to_handler:
332 stmdb sp!, {r0-r3}
333 mov r0, fp @ frame pointer
334 bl ftrace_return_to_handler
335 mov lr, r0 @ r0 has real ret addr
336 ldmia sp!, {r0-r3}
337 ret lr
338#endif
339
340ENTRY(ftrace_stub)
341.Lftrace_stub:
342 ret lr
343ENDPROC(ftrace_stub)
344
345#endif /* CONFIG_FUNCTION_TRACER */
346
347/*============================================================================= 112/*=============================================================================
348 * SWI handler 113 * SWI handler
349 *----------------------------------------------------------------------------- 114 *-----------------------------------------------------------------------------
diff --git a/arch/arm/kernel/entry-ftrace.S b/arch/arm/kernel/entry-ftrace.S
new file mode 100644
index 000000000000..fe57c73e70a4
--- /dev/null
+++ b/arch/arm/kernel/entry-ftrace.S
@@ -0,0 +1,243 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#include <asm/assembler.h>
8#include <asm/ftrace.h>
9#include <asm/unwind.h>
10
11#include "entry-header.S"
12
13/*
14 * When compiling with -pg, gcc inserts a call to the mcount routine at the
15 * start of every function. In mcount, apart from the function's address (in
16 * lr), we need to get hold of the function's caller's address.
17 *
18 * Older GCCs (pre-4.4) inserted a call to a routine called mcount like this:
19 *
20 * bl mcount
21 *
22 * These versions have the limitation that in order for the mcount routine to
23 * be able to determine the function's caller's address, an APCS-style frame
24 * pointer (which is set up with something like the code below) is required.
25 *
26 * mov ip, sp
27 * push {fp, ip, lr, pc}
28 * sub fp, ip, #4
29 *
30 * With EABI, these frame pointers are not available unless -mapcs-frame is
31 * specified, and if building as Thumb-2, not even then.
32 *
33 * Newer GCCs (4.4+) solve this problem by introducing a new version of mcount,
34 * with call sites like:
35 *
36 * push {lr}
37 * bl __gnu_mcount_nc
38 *
39 * With these compilers, frame pointers are not necessary.
40 *
41 * mcount can be thought of as a function called in the middle of a subroutine
42 * call. As such, it needs to be transparent for both the caller and the
43 * callee: the original lr needs to be restored when leaving mcount, and no
44 * registers should be clobbered. (In the __gnu_mcount_nc implementation, we
45 * clobber the ip register. This is OK because the ARM calling convention
46 * allows it to be clobbered in subroutines and doesn't use it to hold
47 * parameters.)
48 *
49 * When using dynamic ftrace, we patch out the mcount call by a "mov r0, r0"
50 * for the mcount case, and a "pop {lr}" for the __gnu_mcount_nc case (see
51 * arch/arm/kernel/ftrace.c).
52 */
53
54#ifndef CONFIG_OLD_MCOUNT
55#if (__GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 4))
56#error Ftrace requires CONFIG_FRAME_POINTER=y with GCC older than 4.4.0.
57#endif
58#endif
59
60.macro mcount_adjust_addr rd, rn
61 bic \rd, \rn, #1 @ clear the Thumb bit if present
62 sub \rd, \rd, #MCOUNT_INSN_SIZE
63.endm
64
65.macro __mcount suffix
66 mcount_enter
67 ldr r0, =ftrace_trace_function
68 ldr r2, [r0]
69 adr r0, .Lftrace_stub
70 cmp r0, r2
71 bne 1f
72
73#ifdef CONFIG_FUNCTION_GRAPH_TRACER
74 ldr r1, =ftrace_graph_return
75 ldr r2, [r1]
76 cmp r0, r2
77 bne ftrace_graph_caller\suffix
78
79 ldr r1, =ftrace_graph_entry
80 ldr r2, [r1]
81 ldr r0, =ftrace_graph_entry_stub
82 cmp r0, r2
83 bne ftrace_graph_caller\suffix
84#endif
85
86 mcount_exit
87
881: mcount_get_lr r1 @ lr of instrumented func
89 mcount_adjust_addr r0, lr @ instrumented function
90 adr lr, BSYM(2f)
91 mov pc, r2
922: mcount_exit
93.endm
94
95.macro __ftrace_caller suffix
96 mcount_enter
97
98 mcount_get_lr r1 @ lr of instrumented func
99 mcount_adjust_addr r0, lr @ instrumented function
100
101 .globl ftrace_call\suffix
102ftrace_call\suffix:
103 bl ftrace_stub
104
105#ifdef CONFIG_FUNCTION_GRAPH_TRACER
106 .globl ftrace_graph_call\suffix
107ftrace_graph_call\suffix:
108 mov r0, r0
109#endif
110
111 mcount_exit
112.endm
113
114.macro __ftrace_graph_caller
115 sub r0, fp, #4 @ &lr of instrumented routine (&parent)
116#ifdef CONFIG_DYNAMIC_FTRACE
117 @ called from __ftrace_caller, saved in mcount_enter
118 ldr r1, [sp, #16] @ instrumented routine (func)
119 mcount_adjust_addr r1, r1
120#else
121 @ called from __mcount, untouched in lr
122 mcount_adjust_addr r1, lr @ instrumented routine (func)
123#endif
124 mov r2, fp @ frame pointer
125 bl prepare_ftrace_return
126 mcount_exit
127.endm
128
129#ifdef CONFIG_OLD_MCOUNT
130/*
131 * mcount
132 */
133
134.macro mcount_enter
135 stmdb sp!, {r0-r3, lr}
136.endm
137
138.macro mcount_get_lr reg
139 ldr \reg, [fp, #-4]
140.endm
141
142.macro mcount_exit
143 ldr lr, [fp, #-4]
144 ldmia sp!, {r0-r3, pc}
145.endm
146
147ENTRY(mcount)
148#ifdef CONFIG_DYNAMIC_FTRACE
149 stmdb sp!, {lr}
150 ldr lr, [fp, #-4]
151 ldmia sp!, {pc}
152#else
153 __mcount _old
154#endif
155ENDPROC(mcount)
156
157#ifdef CONFIG_DYNAMIC_FTRACE
158ENTRY(ftrace_caller_old)
159 __ftrace_caller _old
160ENDPROC(ftrace_caller_old)
161#endif
162
163#ifdef CONFIG_FUNCTION_GRAPH_TRACER
164ENTRY(ftrace_graph_caller_old)
165 __ftrace_graph_caller
166ENDPROC(ftrace_graph_caller_old)
167#endif
168
169.purgem mcount_enter
170.purgem mcount_get_lr
171.purgem mcount_exit
172#endif
173
174/*
175 * __gnu_mcount_nc
176 */
177
178.macro mcount_enter
179/*
180 * This pad compensates for the push {lr} at the call site. Note that we are
181 * unable to unwind through a function which does not otherwise save its lr.
182 */
183 UNWIND(.pad #4)
184 stmdb sp!, {r0-r3, lr}
185 UNWIND(.save {r0-r3, lr})
186.endm
187
188.macro mcount_get_lr reg
189 ldr \reg, [sp, #20]
190.endm
191
192.macro mcount_exit
193 ldmia sp!, {r0-r3, ip, lr}
194 ret ip
195.endm
196
197ENTRY(__gnu_mcount_nc)
198UNWIND(.fnstart)
199#ifdef CONFIG_DYNAMIC_FTRACE
200 mov ip, lr
201 ldmia sp!, {lr}
202 ret ip
203#else
204 __mcount
205#endif
206UNWIND(.fnend)
207ENDPROC(__gnu_mcount_nc)
208
209#ifdef CONFIG_DYNAMIC_FTRACE
210ENTRY(ftrace_caller)
211UNWIND(.fnstart)
212 __ftrace_caller
213UNWIND(.fnend)
214ENDPROC(ftrace_caller)
215#endif
216
217#ifdef CONFIG_FUNCTION_GRAPH_TRACER
218ENTRY(ftrace_graph_caller)
219UNWIND(.fnstart)
220 __ftrace_graph_caller
221UNWIND(.fnend)
222ENDPROC(ftrace_graph_caller)
223#endif
224
225.purgem mcount_enter
226.purgem mcount_get_lr
227.purgem mcount_exit
228
229#ifdef CONFIG_FUNCTION_GRAPH_TRACER
230 .globl return_to_handler
231return_to_handler:
232 stmdb sp!, {r0-r3}
233 mov r0, fp @ frame pointer
234 bl ftrace_return_to_handler
235 mov lr, r0 @ r0 has real ret addr
236 ldmia sp!, {r0-r3}
237 ret lr
238#endif
239
240ENTRY(ftrace_stub)
241.Lftrace_stub:
242 ret lr
243ENDPROC(ftrace_stub)
diff --git a/arch/arm/kernel/etm.c b/arch/arm/kernel/etm.c
index 131a6ab5f355..8b96972dcb1d 100644
--- a/arch/arm/kernel/etm.c
+++ b/arch/arm/kernel/etm.c
@@ -213,7 +213,7 @@ static void etm_dump(void)
213 int length; 213 int length;
214 214
215 if (!t->etb_regs) { 215 if (!t->etb_regs) {
216 printk(KERN_INFO "No tracing hardware found\n"); 216 pr_info("No tracing hardware found\n");
217 return; 217 return;
218 } 218 }
219 219
@@ -229,11 +229,11 @@ static void etm_dump(void)
229 229
230 etb_writel(t, first, ETBR_READADDR); 230 etb_writel(t, first, ETBR_READADDR);
231 231
232 printk(KERN_INFO "Trace buffer contents length: %d\n", length); 232 pr_info("Trace buffer contents length: %d\n", length);
233 printk(KERN_INFO "--- ETB buffer begin ---\n"); 233 pr_info("--- ETB buffer begin ---\n");
234 for (; length; length--) 234 for (; length; length--)
235 printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM))); 235 printk("%08x", cpu_to_be32(etb_readl(t, ETBR_READMEM)));
236 printk(KERN_INFO "\n--- ETB buffer end ---\n"); 236 pr_info("\n--- ETB buffer end ---\n");
237 237
238 /* deassert the overflow bit */ 238 /* deassert the overflow bit */
239 etb_writel(t, 1, ETBR_CTRL); 239 etb_writel(t, 1, ETBR_CTRL);
@@ -633,14 +633,14 @@ static int __init etm_init(void)
633 633
634 retval = amba_driver_register(&etb_driver); 634 retval = amba_driver_register(&etb_driver);
635 if (retval) { 635 if (retval) {
636 printk(KERN_ERR "Failed to register etb\n"); 636 pr_err("Failed to register etb\n");
637 return retval; 637 return retval;
638 } 638 }
639 639
640 retval = amba_driver_register(&etm_driver); 640 retval = amba_driver_register(&etm_driver);
641 if (retval) { 641 if (retval) {
642 amba_driver_unregister(&etb_driver); 642 amba_driver_unregister(&etb_driver);
643 printk(KERN_ERR "Failed to probe etm\n"); 643 pr_err("Failed to probe etm\n");
644 return retval; 644 return retval;
645 } 645 }
646 646
diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
index b37752a96652..059c3da0fee3 100644
--- a/arch/arm/kernel/fiq.c
+++ b/arch/arm/kernel/fiq.c
@@ -124,7 +124,7 @@ int claim_fiq(struct fiq_handler *f)
124void release_fiq(struct fiq_handler *f) 124void release_fiq(struct fiq_handler *f)
125{ 125{
126 if (current_fiq != f) { 126 if (current_fiq != f) {
127 printk(KERN_ERR "%s FIQ trying to release %s FIQ\n", 127 pr_err("%s FIQ trying to release %s FIQ\n",
128 f->name, current_fiq->name); 128 f->name, current_fiq->name);
129 dump_stack(); 129 dump_stack();
130 return; 130 return;
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index af9a8a927a4e..b8c75e45a950 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -15,6 +15,7 @@
15#include <linux/ftrace.h> 15#include <linux/ftrace.h>
16#include <linux/uaccess.h> 16#include <linux/uaccess.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/stop_machine.h>
18 19
19#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
20#include <asm/opcodes.h> 21#include <asm/opcodes.h>
@@ -35,6 +36,22 @@
35 36
36#define OLD_NOP 0xe1a00000 /* mov r0, r0 */ 37#define OLD_NOP 0xe1a00000 /* mov r0, r0 */
37 38
39static int __ftrace_modify_code(void *data)
40{
41 int *command = data;
42
43 set_kernel_text_rw();
44 ftrace_modify_all_code(*command);
45 set_kernel_text_ro();
46
47 return 0;
48}
49
50void arch_ftrace_update_code(int command)
51{
52 stop_machine(__ftrace_modify_code, &command, NULL);
53}
54
38static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec) 55static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
39{ 56{
40 return rec->arch.old_mcount ? OLD_NOP : NOP; 57 return rec->arch.old_mcount ? OLD_NOP : NOP;
@@ -73,6 +90,8 @@ int ftrace_arch_code_modify_prepare(void)
73int ftrace_arch_code_modify_post_process(void) 90int ftrace_arch_code_modify_post_process(void)
74{ 91{
75 set_all_modules_text_ro(); 92 set_all_modules_text_ro();
93 /* Make sure any TLB misses during machine stop are cleared. */
94 flush_tlb_all();
76 return 0; 95 return 0;
77} 96}
78 97
diff --git a/arch/arm/kernel/io.c b/arch/arm/kernel/io.c
index 9203cf883330..eedefe050022 100644
--- a/arch/arm/kernel/io.c
+++ b/arch/arm/kernel/io.c
@@ -51,6 +51,7 @@ void _memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
51 from++; 51 from++;
52 } 52 }
53} 53}
54EXPORT_SYMBOL(_memcpy_fromio);
54 55
55/* 56/*
56 * Copy data from "real" memory space to IO memory space. 57 * Copy data from "real" memory space to IO memory space.
@@ -66,6 +67,7 @@ void _memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
66 to++; 67 to++;
67 } 68 }
68} 69}
70EXPORT_SYMBOL(_memcpy_toio);
69 71
70/* 72/*
71 * "memset" on IO memory space. 73 * "memset" on IO memory space.
@@ -79,7 +81,4 @@ void _memset_io(volatile void __iomem *dst, int c, size_t count)
79 dst++; 81 dst++;
80 } 82 }
81} 83}
82
83EXPORT_SYMBOL(_memcpy_fromio);
84EXPORT_SYMBOL(_memcpy_toio);
85EXPORT_SYMBOL(_memset_io); 84EXPORT_SYMBOL(_memset_io);
diff --git a/arch/arm/kernel/irq.c b/arch/arm/kernel/irq.c
index 7c81ec428b9b..ad857bada96c 100644
--- a/arch/arm/kernel/irq.c
+++ b/arch/arm/kernel/irq.c
@@ -31,6 +31,7 @@
31#include <linux/smp.h> 31#include <linux/smp.h>
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/seq_file.h> 33#include <linux/seq_file.h>
34#include <linux/ratelimit.h>
34#include <linux/errno.h> 35#include <linux/errno.h>
35#include <linux/list.h> 36#include <linux/list.h>
36#include <linux/kallsyms.h> 37#include <linux/kallsyms.h>
@@ -82,7 +83,7 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
82 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; 83 unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
83 84
84 if (irq >= nr_irqs) { 85 if (irq >= nr_irqs) {
85 printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); 86 pr_err("Trying to set irq flags for IRQ%d\n", irq);
86 return; 87 return;
87 } 88 }
88 89
@@ -135,7 +136,6 @@ int __init arch_probe_nr_irqs(void)
135#endif 136#endif
136 137
137#ifdef CONFIG_HOTPLUG_CPU 138#ifdef CONFIG_HOTPLUG_CPU
138
139static bool migrate_one_irq(struct irq_desc *desc) 139static bool migrate_one_irq(struct irq_desc *desc)
140{ 140{
141 struct irq_data *d = irq_desc_get_irq_data(desc); 141 struct irq_data *d = irq_desc_get_irq_data(desc);
@@ -187,8 +187,8 @@ void migrate_irqs(void)
187 affinity_broken = migrate_one_irq(desc); 187 affinity_broken = migrate_one_irq(desc);
188 raw_spin_unlock(&desc->lock); 188 raw_spin_unlock(&desc->lock);
189 189
190 if (affinity_broken && printk_ratelimit()) 190 if (affinity_broken)
191 pr_warn("IRQ%u no longer affine to CPU%u\n", 191 pr_warn_ratelimited("IRQ%u no longer affine to CPU%u\n",
192 i, smp_processor_id()); 192 i, smp_processor_id());
193 } 193 }
194 194
diff --git a/arch/arm/kernel/iwmmxt.S b/arch/arm/kernel/iwmmxt.S
index ad58e565fe98..49fadbda8c63 100644
--- a/arch/arm/kernel/iwmmxt.S
+++ b/arch/arm/kernel/iwmmxt.S
@@ -58,6 +58,7 @@
58#define MMX_SIZE (0x98) 58#define MMX_SIZE (0x98)
59 59
60 .text 60 .text
61 .arm
61 62
62/* 63/*
63 * Lazy switching of Concan coprocessor context 64 * Lazy switching of Concan coprocessor context
@@ -182,6 +183,8 @@ concan_load:
182 tmcr wCon, r2 183 tmcr wCon, r2
183 ret lr 184 ret lr
184 185
186ENDPROC(iwmmxt_task_enable)
187
185/* 188/*
186 * Back up Concan regs to save area and disable access to them 189 * Back up Concan regs to save area and disable access to them
187 * (mainly for gdb or sleep mode usage) 190 * (mainly for gdb or sleep mode usage)
@@ -232,6 +235,8 @@ ENTRY(iwmmxt_task_disable)
2321: msr cpsr_c, ip @ restore interrupt mode 2351: msr cpsr_c, ip @ restore interrupt mode
233 ldmfd sp!, {r4, pc} 236 ldmfd sp!, {r4, pc}
234 237
238ENDPROC(iwmmxt_task_disable)
239
235/* 240/*
236 * Copy Concan state to given memory address 241 * Copy Concan state to given memory address
237 * 242 *
@@ -268,6 +273,8 @@ ENTRY(iwmmxt_task_copy)
268 msr cpsr_c, ip @ restore interrupt mode 273 msr cpsr_c, ip @ restore interrupt mode
269 ret r3 274 ret r3
270 275
276ENDPROC(iwmmxt_task_copy)
277
271/* 278/*
272 * Restore Concan state from given memory address 279 * Restore Concan state from given memory address
273 * 280 *
@@ -304,6 +311,8 @@ ENTRY(iwmmxt_task_restore)
304 msr cpsr_c, ip @ restore interrupt mode 311 msr cpsr_c, ip @ restore interrupt mode
305 ret r3 312 ret r3
306 313
314ENDPROC(iwmmxt_task_restore)
315
307/* 316/*
308 * Concan handling on task switch 317 * Concan handling on task switch
309 * 318 *
@@ -335,6 +344,8 @@ ENTRY(iwmmxt_task_switch)
335 mrc p15, 0, r1, c2, c0, 0 344 mrc p15, 0, r1, c2, c0, 0
336 sub pc, lr, r1, lsr #32 @ cpwait and return 345 sub pc, lr, r1, lsr #32 @ cpwait and return
337 346
347ENDPROC(iwmmxt_task_switch)
348
338/* 349/*
339 * Remove Concan ownership of given task 350 * Remove Concan ownership of given task
340 * 351 *
@@ -353,6 +364,8 @@ ENTRY(iwmmxt_task_release)
353 msr cpsr_c, r2 @ restore interrupts 364 msr cpsr_c, r2 @ restore interrupts
354 ret lr 365 ret lr
355 366
367ENDPROC(iwmmxt_task_release)
368
356 .data 369 .data
357concan_owner: 370concan_owner:
358 .word 0 371 .word 0
diff --git a/arch/arm/kernel/jump_label.c b/arch/arm/kernel/jump_label.c
index 4ce4f789446d..afeeb9ea6f43 100644
--- a/arch/arm/kernel/jump_label.c
+++ b/arch/arm/kernel/jump_label.c
@@ -19,7 +19,7 @@ static void __arch_jump_label_transform(struct jump_entry *entry,
19 insn = arm_gen_nop(); 19 insn = arm_gen_nop();
20 20
21 if (is_static) 21 if (is_static)
22 __patch_text(addr, insn); 22 __patch_text_early(addr, insn);
23 else 23 else
24 patch_text(addr, insn); 24 patch_text(addr, insn);
25} 25}
diff --git a/arch/arm/kernel/kgdb.c b/arch/arm/kernel/kgdb.c
index a74b53c1b7df..07db2f8a1b45 100644
--- a/arch/arm/kernel/kgdb.c
+++ b/arch/arm/kernel/kgdb.c
@@ -12,8 +12,12 @@
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/kdebug.h> 13#include <linux/kdebug.h>
14#include <linux/kgdb.h> 14#include <linux/kgdb.h>
15#include <linux/uaccess.h>
16
15#include <asm/traps.h> 17#include <asm/traps.h>
16 18
19#include "patch.h"
20
17struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = 21struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
18{ 22{
19 { "r0", 4, offsetof(struct pt_regs, ARM_r0)}, 23 { "r0", 4, offsetof(struct pt_regs, ARM_r0)},
@@ -244,6 +248,31 @@ void kgdb_arch_exit(void)
244 unregister_die_notifier(&kgdb_notifier); 248 unregister_die_notifier(&kgdb_notifier);
245} 249}
246 250
251int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
252{
253 int err;
254
255 /* patch_text() only supports int-sized breakpoints */
256 BUILD_BUG_ON(sizeof(int) != BREAK_INSTR_SIZE);
257
258 err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
259 BREAK_INSTR_SIZE);
260 if (err)
261 return err;
262
263 patch_text((void *)bpt->bpt_addr,
264 *(unsigned int *)arch_kgdb_ops.gdb_bpt_instr);
265
266 return err;
267}
268
269int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
270{
271 patch_text((void *)bpt->bpt_addr, *(unsigned int *)bpt->saved_instr);
272
273 return 0;
274}
275
247/* 276/*
248 * Register our undef instruction hooks with ARM undef core. 277 * Register our undef instruction hooks with ARM undef core.
249 * We regsiter a hook specifically looking for the KGB break inst 278 * We regsiter a hook specifically looking for the KGB break inst
diff --git a/arch/arm/kernel/machine_kexec.c b/arch/arm/kernel/machine_kexec.c
index 8cf0996aa1a8..de2b085ad753 100644
--- a/arch/arm/kernel/machine_kexec.c
+++ b/arch/arm/kernel/machine_kexec.c
@@ -29,6 +29,7 @@ extern unsigned long kexec_boot_atags;
29 29
30static atomic_t waiting_for_crash_ipi; 30static atomic_t waiting_for_crash_ipi;
31 31
32static unsigned long dt_mem;
32/* 33/*
33 * Provide a dummy crash_notes definition while crash dump arrives to arm. 34 * Provide a dummy crash_notes definition while crash dump arrives to arm.
34 * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. 35 * This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
@@ -64,7 +65,7 @@ int machine_kexec_prepare(struct kimage *image)
64 return err; 65 return err;
65 66
66 if (be32_to_cpu(header) == OF_DT_HEADER) 67 if (be32_to_cpu(header) == OF_DT_HEADER)
67 kexec_boot_atags = current_segment->mem; 68 dt_mem = current_segment->mem;
68 } 69 }
69 return 0; 70 return 0;
70} 71}
@@ -126,12 +127,12 @@ void machine_crash_shutdown(struct pt_regs *regs)
126 msecs--; 127 msecs--;
127 } 128 }
128 if (atomic_read(&waiting_for_crash_ipi) > 0) 129 if (atomic_read(&waiting_for_crash_ipi) > 0)
129 printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n"); 130 pr_warn("Non-crashing CPUs did not react to IPI\n");
130 131
131 crash_save_cpu(regs, smp_processor_id()); 132 crash_save_cpu(regs, smp_processor_id());
132 machine_kexec_mask_interrupts(); 133 machine_kexec_mask_interrupts();
133 134
134 printk(KERN_INFO "Loading crashdump kernel...\n"); 135 pr_info("Loading crashdump kernel...\n");
135} 136}
136 137
137/* 138/*
@@ -163,12 +164,12 @@ void machine_kexec(struct kimage *image)
163 reboot_code_buffer = page_address(image->control_code_page); 164 reboot_code_buffer = page_address(image->control_code_page);
164 165
165 /* Prepare parameters for reboot_code_buffer*/ 166 /* Prepare parameters for reboot_code_buffer*/
167 set_kernel_text_rw();
166 kexec_start_address = image->start; 168 kexec_start_address = image->start;
167 kexec_indirection_page = page_list; 169 kexec_indirection_page = page_list;
168 kexec_mach_type = machine_arch_type; 170 kexec_mach_type = machine_arch_type;
169 if (!kexec_boot_atags) 171 kexec_boot_atags = dt_mem ?: image->start - KEXEC_ARM_ZIMAGE_OFFSET
170 kexec_boot_atags = image->start - KEXEC_ARM_ZIMAGE_OFFSET + KEXEC_ARM_ATAGS_OFFSET; 172 + KEXEC_ARM_ATAGS_OFFSET;
171
172 173
173 /* copy our kernel relocation code to the control code page */ 174 /* copy our kernel relocation code to the control code page */
174 reboot_entry = fncpy(reboot_code_buffer, 175 reboot_entry = fncpy(reboot_code_buffer,
@@ -177,7 +178,7 @@ void machine_kexec(struct kimage *image)
177 reboot_entry_phys = (unsigned long)reboot_entry + 178 reboot_entry_phys = (unsigned long)reboot_entry +
178 (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer); 179 (reboot_code_buffer_phys - (unsigned long)reboot_code_buffer);
179 180
180 printk(KERN_INFO "Bye!\n"); 181 pr_info("Bye!\n");
181 182
182 if (kexec_reinit) 183 if (kexec_reinit)
183 kexec_reinit(); 184 kexec_reinit();
diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
index 6a4dffefd357..bea7db9e5b80 100644
--- a/arch/arm/kernel/module.c
+++ b/arch/arm/kernel/module.c
@@ -251,7 +251,7 @@ apply_relocate(Elf32_Shdr *sechdrs, const char *strtab, unsigned int symindex,
251#endif 251#endif
252 252
253 default: 253 default:
254 printk(KERN_ERR "%s: unknown relocation: %u\n", 254 pr_err("%s: unknown relocation: %u\n",
255 module->name, ELF32_R_TYPE(rel->r_info)); 255 module->name, ELF32_R_TYPE(rel->r_info));
256 return -ENOEXEC; 256 return -ENOEXEC;
257 } 257 }
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index 07314af47733..5038960e3c55 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -1,8 +1,11 @@
1#include <linux/kernel.h> 1#include <linux/kernel.h>
2#include <linux/spinlock.h>
2#include <linux/kprobes.h> 3#include <linux/kprobes.h>
4#include <linux/mm.h>
3#include <linux/stop_machine.h> 5#include <linux/stop_machine.h>
4 6
5#include <asm/cacheflush.h> 7#include <asm/cacheflush.h>
8#include <asm/fixmap.h>
6#include <asm/smp_plat.h> 9#include <asm/smp_plat.h>
7#include <asm/opcodes.h> 10#include <asm/opcodes.h>
8 11
@@ -13,21 +16,77 @@ struct patch {
13 unsigned int insn; 16 unsigned int insn;
14}; 17};
15 18
16void __kprobes __patch_text(void *addr, unsigned int insn) 19static DEFINE_SPINLOCK(patch_lock);
20
21static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
22 __acquires(&patch_lock)
23{
24 unsigned int uintaddr = (uintptr_t) addr;
25 bool module = !core_kernel_text(uintaddr);
26 struct page *page;
27
28 if (module && IS_ENABLED(CONFIG_DEBUG_SET_MODULE_RONX))
29 page = vmalloc_to_page(addr);
30 else if (!module && IS_ENABLED(CONFIG_DEBUG_RODATA))
31 page = virt_to_page(addr);
32 else
33 return addr;
34
35 if (flags)
36 spin_lock_irqsave(&patch_lock, *flags);
37 else
38 __acquire(&patch_lock);
39
40 set_fixmap(fixmap, page_to_phys(page));
41
42 return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
43}
44
45static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
46 __releases(&patch_lock)
47{
48 clear_fixmap(fixmap);
49
50 if (flags)
51 spin_unlock_irqrestore(&patch_lock, *flags);
52 else
53 __release(&patch_lock);
54}
55
56void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
17{ 57{
18 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL); 58 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
59 unsigned int uintaddr = (uintptr_t) addr;
60 bool twopage = false;
61 unsigned long flags;
62 void *waddr = addr;
19 int size; 63 int size;
20 64
65 if (remap)
66 waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
67 else
68 __acquire(&patch_lock);
69
21 if (thumb2 && __opcode_is_thumb16(insn)) { 70 if (thumb2 && __opcode_is_thumb16(insn)) {
22 *(u16 *)addr = __opcode_to_mem_thumb16(insn); 71 *(u16 *)waddr = __opcode_to_mem_thumb16(insn);
23 size = sizeof(u16); 72 size = sizeof(u16);
24 } else if (thumb2 && ((uintptr_t)addr & 2)) { 73 } else if (thumb2 && (uintaddr & 2)) {
25 u16 first = __opcode_thumb32_first(insn); 74 u16 first = __opcode_thumb32_first(insn);
26 u16 second = __opcode_thumb32_second(insn); 75 u16 second = __opcode_thumb32_second(insn);
27 u16 *addrh = addr; 76 u16 *addrh0 = waddr;
77 u16 *addrh1 = waddr + 2;
78
79 twopage = (uintaddr & ~PAGE_MASK) == PAGE_SIZE - 2;
80 if (twopage && remap)
81 addrh1 = patch_map(addr + 2, FIX_TEXT_POKE1, NULL);
82
83 *addrh0 = __opcode_to_mem_thumb16(first);
84 *addrh1 = __opcode_to_mem_thumb16(second);
28 85
29 addrh[0] = __opcode_to_mem_thumb16(first); 86 if (twopage && addrh1 != addr + 2) {
30 addrh[1] = __opcode_to_mem_thumb16(second); 87 flush_kernel_vmap_range(addrh1, 2);
88 patch_unmap(FIX_TEXT_POKE1, NULL);
89 }
31 90
32 size = sizeof(u32); 91 size = sizeof(u32);
33 } else { 92 } else {
@@ -36,10 +95,16 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
36 else 95 else
37 insn = __opcode_to_mem_arm(insn); 96 insn = __opcode_to_mem_arm(insn);
38 97
39 *(u32 *)addr = insn; 98 *(u32 *)waddr = insn;
40 size = sizeof(u32); 99 size = sizeof(u32);
41 } 100 }
42 101
102 if (waddr != addr) {
103 flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
104 patch_unmap(FIX_TEXT_POKE0, &flags);
105 } else
106 __release(&patch_lock);
107
43 flush_icache_range((uintptr_t)(addr), 108 flush_icache_range((uintptr_t)(addr),
44 (uintptr_t)(addr) + size); 109 (uintptr_t)(addr) + size);
45} 110}
@@ -60,16 +125,5 @@ void __kprobes patch_text(void *addr, unsigned int insn)
60 .insn = insn, 125 .insn = insn,
61 }; 126 };
62 127
63 if (cache_ops_need_broadcast()) { 128 stop_machine(patch_text_stop_machine, &patch, NULL);
64 stop_machine(patch_text_stop_machine, &patch, cpu_online_mask);
65 } else {
66 bool straddles_word = IS_ENABLED(CONFIG_THUMB2_KERNEL)
67 && __opcode_is_thumb32(insn)
68 && ((uintptr_t)addr & 2);
69
70 if (straddles_word)
71 stop_machine(patch_text_stop_machine, &patch, NULL);
72 else
73 __patch_text(addr, insn);
74 }
75} 129}
diff --git a/arch/arm/kernel/patch.h b/arch/arm/kernel/patch.h
index b4731f2dac38..77e054c2f6cd 100644
--- a/arch/arm/kernel/patch.h
+++ b/arch/arm/kernel/patch.h
@@ -2,6 +2,16 @@
2#define _ARM_KERNEL_PATCH_H 2#define _ARM_KERNEL_PATCH_H
3 3
4void patch_text(void *addr, unsigned int insn); 4void patch_text(void *addr, unsigned int insn);
5void __patch_text(void *addr, unsigned int insn); 5void __patch_text_real(void *addr, unsigned int insn, bool remap);
6
7static inline void __patch_text(void *addr, unsigned int insn)
8{
9 __patch_text_real(addr, insn, true);
10}
11
12static inline void __patch_text_early(void *addr, unsigned int insn)
13{
14 __patch_text_real(addr, insn, false);
15}
6 16
7#endif 17#endif
diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
index fe972a2f3df3..fdfa3a78ec8c 100644
--- a/arch/arm/kernel/process.c
+++ b/arch/arm/kernel/process.c
@@ -51,8 +51,8 @@ EXPORT_SYMBOL(__stack_chk_guard);
51static const char *processor_modes[] __maybe_unused = { 51static const char *processor_modes[] __maybe_unused = {
52 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" , 52 "USER_26", "FIQ_26" , "IRQ_26" , "SVC_26" , "UK4_26" , "UK5_26" , "UK6_26" , "UK7_26" ,
53 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26", 53 "UK8_26" , "UK9_26" , "UK10_26", "UK11_26", "UK12_26", "UK13_26", "UK14_26", "UK15_26",
54 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "UK6_32" , "ABT_32" , 54 "USER_32", "FIQ_32" , "IRQ_32" , "SVC_32" , "UK4_32" , "UK5_32" , "MON_32" , "ABT_32" ,
55 "UK8_32" , "UK9_32" , "UK10_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32" 55 "UK8_32" , "UK9_32" , "HYP_32", "UND_32" , "UK12_32", "UK13_32", "UK14_32", "SYS_32"
56}; 56};
57 57
58static const char *isa_modes[] __maybe_unused = { 58static const char *isa_modes[] __maybe_unused = {
diff --git a/arch/arm/kernel/return_address.c b/arch/arm/kernel/return_address.c
index 98ea4b7eb406..24b4a04846eb 100644
--- a/arch/arm/kernel/return_address.c
+++ b/arch/arm/kernel/return_address.c
@@ -39,13 +39,12 @@ void *return_address(unsigned int level)
39{ 39{
40 struct return_address_data data; 40 struct return_address_data data;
41 struct stackframe frame; 41 struct stackframe frame;
42 register unsigned long current_sp asm ("sp");
43 42
44 data.level = level + 2; 43 data.level = level + 2;
45 data.addr = NULL; 44 data.addr = NULL;
46 45
47 frame.fp = (unsigned long)__builtin_frame_address(0); 46 frame.fp = (unsigned long)__builtin_frame_address(0);
48 frame.sp = current_sp; 47 frame.sp = current_stack_pointer;
49 frame.lr = (unsigned long)__builtin_return_address(0); 48 frame.lr = (unsigned long)__builtin_return_address(0);
50 frame.pc = (unsigned long)return_address; 49 frame.pc = (unsigned long)return_address;
51 50
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index c03106378b49..8361652b6dab 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -900,6 +900,7 @@ void __init setup_arch(char **cmdline_p)
900 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type); 900 mdesc = setup_machine_tags(__atags_pointer, __machine_arch_type);
901 machine_desc = mdesc; 901 machine_desc = mdesc;
902 machine_name = mdesc->name; 902 machine_name = mdesc->name;
903 dump_stack_set_arch_desc("%s", mdesc->name);
903 904
904 if (mdesc->reboot_mode != REBOOT_HARD) 905 if (mdesc->reboot_mode != REBOOT_HARD)
905 reboot_mode = mdesc->reboot_mode; 906 reboot_mode = mdesc->reboot_mode;
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index bd1983437205..8aa6f1b87c9e 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -592,7 +592,6 @@ do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
592 } 592 }
593 syscall = 0; 593 syscall = 0;
594 } else if (thread_flags & _TIF_UPROBE) { 594 } else if (thread_flags & _TIF_UPROBE) {
595 clear_thread_flag(TIF_UPROBE);
596 uprobe_notify_resume(regs); 595 uprobe_notify_resume(regs);
597 } else { 596 } else {
598 clear_thread_flag(TIF_NOTIFY_RESUME); 597 clear_thread_flag(TIF_NOTIFY_RESUME);
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index 13396d3d600e..5e6052e18850 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -225,7 +225,7 @@ void __cpu_die(unsigned int cpu)
225 pr_err("CPU%u: cpu didn't die\n", cpu); 225 pr_err("CPU%u: cpu didn't die\n", cpu);
226 return; 226 return;
227 } 227 }
228 printk(KERN_NOTICE "CPU%u: shutdown\n", cpu); 228 pr_notice("CPU%u: shutdown\n", cpu);
229 229
230 /* 230 /*
231 * platform_cpu_kill() is generally expected to do the powering off 231 * platform_cpu_kill() is generally expected to do the powering off
@@ -235,7 +235,7 @@ void __cpu_die(unsigned int cpu)
235 * the requesting CPU and the dying CPU actually losing power. 235 * the requesting CPU and the dying CPU actually losing power.
236 */ 236 */
237 if (!platform_cpu_kill(cpu)) 237 if (!platform_cpu_kill(cpu))
238 printk("CPU%u: unable to kill\n", cpu); 238 pr_err("CPU%u: unable to kill\n", cpu);
239} 239}
240 240
241/* 241/*
@@ -351,7 +351,7 @@ asmlinkage void secondary_start_kernel(void)
351 351
352 cpu_init(); 352 cpu_init();
353 353
354 printk("CPU%u: Booted secondary processor\n", cpu); 354 pr_debug("CPU%u: Booted secondary processor\n", cpu);
355 355
356 preempt_disable(); 356 preempt_disable();
357 trace_hardirqs_off(); 357 trace_hardirqs_off();
@@ -387,9 +387,6 @@ asmlinkage void secondary_start_kernel(void)
387 387
388void __init smp_cpus_done(unsigned int max_cpus) 388void __init smp_cpus_done(unsigned int max_cpus)
389{ 389{
390 printk(KERN_INFO "SMP: Total of %d processors activated.\n",
391 num_online_cpus());
392
393 hyp_mode_check(); 390 hyp_mode_check();
394} 391}
395 392
@@ -521,7 +518,7 @@ static void ipi_cpu_stop(unsigned int cpu)
521 if (system_state == SYSTEM_BOOTING || 518 if (system_state == SYSTEM_BOOTING ||
522 system_state == SYSTEM_RUNNING) { 519 system_state == SYSTEM_RUNNING) {
523 raw_spin_lock(&stop_lock); 520 raw_spin_lock(&stop_lock);
524 printk(KERN_CRIT "CPU%u: stopping\n", cpu); 521 pr_crit("CPU%u: stopping\n", cpu);
525 dump_stack(); 522 dump_stack();
526 raw_spin_unlock(&stop_lock); 523 raw_spin_unlock(&stop_lock);
527 } 524 }
@@ -615,8 +612,8 @@ void handle_IPI(int ipinr, struct pt_regs *regs)
615 break; 612 break;
616 613
617 default: 614 default:
618 printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%x\n", 615 pr_crit("CPU%u: Unknown IPI message 0x%x\n",
619 cpu, ipinr); 616 cpu, ipinr);
620 break; 617 break;
621 } 618 }
622 619
diff --git a/arch/arm/kernel/smp_twd.c b/arch/arm/kernel/smp_twd.c
index 93090213c71c..172c6a05d27f 100644
--- a/arch/arm/kernel/smp_twd.c
+++ b/arch/arm/kernel/smp_twd.c
@@ -199,7 +199,7 @@ static void twd_calibrate_rate(void)
199 * the timer ticks 199 * the timer ticks
200 */ 200 */
201 if (twd_timer_rate == 0) { 201 if (twd_timer_rate == 0) {
202 printk(KERN_INFO "Calibrating local timer... "); 202 pr_info("Calibrating local timer... ");
203 203
204 /* Wait for a tick to start */ 204 /* Wait for a tick to start */
205 waitjiffies = get_jiffies_64() + 1; 205 waitjiffies = get_jiffies_64() + 1;
@@ -223,7 +223,7 @@ static void twd_calibrate_rate(void)
223 223
224 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5); 224 twd_timer_rate = (0xFFFFFFFFU - count) * (HZ / 5);
225 225
226 printk("%lu.%02luMHz.\n", twd_timer_rate / 1000000, 226 pr_cont("%lu.%02luMHz.\n", twd_timer_rate / 1000000,
227 (twd_timer_rate / 10000) % 100); 227 (twd_timer_rate / 10000) % 100);
228 } 228 }
229} 229}
diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
index f065eb05d254..92b72375c4c7 100644
--- a/arch/arm/kernel/stacktrace.c
+++ b/arch/arm/kernel/stacktrace.c
@@ -134,12 +134,10 @@ static noinline void __save_stack_trace(struct task_struct *tsk,
134 frame.pc = thread_saved_pc(tsk); 134 frame.pc = thread_saved_pc(tsk);
135#endif 135#endif
136 } else { 136 } else {
137 register unsigned long current_sp asm ("sp");
138
139 /* We don't want this function nor the caller */ 137 /* We don't want this function nor the caller */
140 data.skip += 2; 138 data.skip += 2;
141 frame.fp = (unsigned long)__builtin_frame_address(0); 139 frame.fp = (unsigned long)__builtin_frame_address(0);
142 frame.sp = current_sp; 140 frame.sp = current_stack_pointer;
143 frame.lr = (unsigned long)__builtin_return_address(0); 141 frame.lr = (unsigned long)__builtin_return_address(0);
144 frame.pc = (unsigned long)__save_stack_trace; 142 frame.pc = (unsigned long)__save_stack_trace;
145 } 143 }
diff --git a/arch/arm/kernel/swp_emulate.c b/arch/arm/kernel/swp_emulate.c
index 587fdfe1a72c..afdd51e30bec 100644
--- a/arch/arm/kernel/swp_emulate.c
+++ b/arch/arm/kernel/swp_emulate.c
@@ -260,7 +260,7 @@ static int __init swp_emulation_init(void)
260 return -ENOMEM; 260 return -ENOMEM;
261#endif /* CONFIG_PROC_FS */ 261#endif /* CONFIG_PROC_FS */
262 262
263 printk(KERN_NOTICE "Registering SWP/SWPB emulation handler\n"); 263 pr_notice("Registering SWP/SWPB emulation handler\n");
264 register_undef_hook(&swp_hook); 264 register_undef_hook(&swp_hook);
265 265
266 return 0; 266 return 0;
diff --git a/arch/arm/kernel/thumbee.c b/arch/arm/kernel/thumbee.c
index 80f0d69205e7..8ff8dbfbe9fb 100644
--- a/arch/arm/kernel/thumbee.c
+++ b/arch/arm/kernel/thumbee.c
@@ -72,7 +72,7 @@ static int __init thumbee_init(void)
72 if ((pfr0 & 0x0000f000) != 0x00001000) 72 if ((pfr0 & 0x0000f000) != 0x00001000)
73 return 0; 73 return 0;
74 74
75 printk(KERN_INFO "ThumbEE CPU extension supported.\n"); 75 pr_info("ThumbEE CPU extension supported.\n");
76 elf_hwcap |= HWCAP_THUMBEE; 76 elf_hwcap |= HWCAP_THUMBEE;
77 thread_register_notifier(&thumbee_notifier_block); 77 thread_register_notifier(&thumbee_notifier_block);
78 78
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 89cfdd6e50cb..08b7847bf912 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -165,7 +165,7 @@ static void update_cpu_capacity(unsigned int cpu)
165 165
166 set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity); 166 set_capacity_scale(cpu, cpu_capacity(cpu) / middle_capacity);
167 167
168 printk(KERN_INFO "CPU%u: update cpu_capacity %lu\n", 168 pr_info("CPU%u: update cpu_capacity %lu\n",
169 cpu, arch_scale_cpu_capacity(NULL, cpu)); 169 cpu, arch_scale_cpu_capacity(NULL, cpu));
170} 170}
171 171
@@ -269,7 +269,7 @@ void store_cpu_topology(unsigned int cpuid)
269 269
270 update_cpu_capacity(cpuid); 270 update_cpu_capacity(cpuid);
271 271
272 printk(KERN_INFO "CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n", 272 pr_info("CPU%u: thread %d, cpu %d, socket %d, mpidr %x\n",
273 cpuid, cpu_topology[cpuid].thread_id, 273 cpuid, cpu_topology[cpuid].thread_id,
274 cpu_topology[cpuid].core_id, 274 cpu_topology[cpuid].core_id,
275 cpu_topology[cpuid].socket_id, mpidr); 275 cpu_topology[cpuid].socket_id, mpidr);
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 9f5d81881eb6..788e23fe64d8 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -198,14 +198,14 @@ static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
198 } 198 }
199 199
200 if (!fp) { 200 if (!fp) {
201 printk("no frame pointer"); 201 pr_cont("no frame pointer");
202 ok = 0; 202 ok = 0;
203 } else if (verify_stack(fp)) { 203 } else if (verify_stack(fp)) {
204 printk("invalid frame pointer 0x%08x", fp); 204 pr_cont("invalid frame pointer 0x%08x", fp);
205 ok = 0; 205 ok = 0;
206 } else if (fp < (unsigned long)end_of_stack(tsk)) 206 } else if (fp < (unsigned long)end_of_stack(tsk))
207 printk("frame pointer underflow"); 207 pr_cont("frame pointer underflow");
208 printk("\n"); 208 pr_cont("\n");
209 209
210 if (ok) 210 if (ok)
211 c_backtrace(fp, mode); 211 c_backtrace(fp, mode);
@@ -240,8 +240,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
240 static int die_counter; 240 static int die_counter;
241 int ret; 241 int ret;
242 242
243 printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP 243 pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
244 S_ISA "\n", str, err, ++die_counter); 244 str, err, ++die_counter);
245 245
246 /* trap and error numbers are mostly meaningless on ARM */ 246 /* trap and error numbers are mostly meaningless on ARM */
247 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV); 247 ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
@@ -250,8 +250,8 @@ static int __die(const char *str, int err, struct pt_regs *regs)
250 250
251 print_modules(); 251 print_modules();
252 __show_regs(regs); 252 __show_regs(regs);
253 printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n", 253 pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
254 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk)); 254 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
255 255
256 if (!user_mode(regs) || in_interrupt()) { 256 if (!user_mode(regs) || in_interrupt()) {
257 dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp, 257 dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
@@ -446,7 +446,7 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
446die_sig: 446die_sig:
447#ifdef CONFIG_DEBUG_USER 447#ifdef CONFIG_DEBUG_USER
448 if (user_debug & UDBG_UNDEFINED) { 448 if (user_debug & UDBG_UNDEFINED) {
449 printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n", 449 pr_info("%s (%d): undefined instruction: pc=%p\n",
450 current->comm, task_pid_nr(current), pc); 450 current->comm, task_pid_nr(current), pc);
451 __show_regs(regs); 451 __show_regs(regs);
452 dump_instr(KERN_INFO, regs); 452 dump_instr(KERN_INFO, regs);
@@ -496,7 +496,7 @@ asmlinkage void bad_mode(struct pt_regs *regs, int reason)
496{ 496{
497 console_verbose(); 497 console_verbose();
498 498
499 printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]); 499 pr_crit("Bad mode in %s handler detected\n", handler[reason]);
500 500
501 die("Oops - bad mode", regs, 0); 501 die("Oops - bad mode", regs, 0);
502 local_irq_disable(); 502 local_irq_disable();
@@ -516,7 +516,7 @@ static int bad_syscall(int n, struct pt_regs *regs)
516 516
517#ifdef CONFIG_DEBUG_USER 517#ifdef CONFIG_DEBUG_USER
518 if (user_debug & UDBG_SYSCALL) { 518 if (user_debug & UDBG_SYSCALL) {
519 printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n", 519 pr_err("[%d] %s: obsolete system call %08x.\n",
520 task_pid_nr(current), current->comm, n); 520 task_pid_nr(current), current->comm, n);
521 dump_instr(KERN_ERR, regs); 521 dump_instr(KERN_ERR, regs);
522 } 522 }
@@ -694,7 +694,7 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
694 * something catastrophic has happened 694 * something catastrophic has happened
695 */ 695 */
696 if (user_debug & UDBG_SYSCALL) { 696 if (user_debug & UDBG_SYSCALL) {
697 printk("[%d] %s: arm syscall %d\n", 697 pr_err("[%d] %s: arm syscall %d\n",
698 task_pid_nr(current), current->comm, no); 698 task_pid_nr(current), current->comm, no);
699 dump_instr("", regs); 699 dump_instr("", regs);
700 if (user_mode(regs)) { 700 if (user_mode(regs)) {
@@ -753,8 +753,8 @@ late_initcall(arm_mrc_hook_init);
753 753
754void __bad_xchg(volatile void *ptr, int size) 754void __bad_xchg(volatile void *ptr, int size)
755{ 755{
756 printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n", 756 pr_err("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
757 __builtin_return_address(0), ptr, size); 757 __builtin_return_address(0), ptr, size);
758 BUG(); 758 BUG();
759} 759}
760EXPORT_SYMBOL(__bad_xchg); 760EXPORT_SYMBOL(__bad_xchg);
@@ -771,8 +771,8 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
771 771
772#ifdef CONFIG_DEBUG_USER 772#ifdef CONFIG_DEBUG_USER
773 if (user_debug & UDBG_BADABORT) { 773 if (user_debug & UDBG_BADABORT) {
774 printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n", 774 pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
775 task_pid_nr(current), current->comm, code, instr); 775 task_pid_nr(current), current->comm, code, instr);
776 dump_instr(KERN_ERR, regs); 776 dump_instr(KERN_ERR, regs);
777 show_pte(current->mm, addr); 777 show_pte(current->mm, addr);
778 } 778 }
@@ -788,29 +788,29 @@ baddataabort(int code, unsigned long instr, struct pt_regs *regs)
788 788
789void __readwrite_bug(const char *fn) 789void __readwrite_bug(const char *fn)
790{ 790{
791 printk("%s called, but not implemented\n", fn); 791 pr_err("%s called, but not implemented\n", fn);
792 BUG(); 792 BUG();
793} 793}
794EXPORT_SYMBOL(__readwrite_bug); 794EXPORT_SYMBOL(__readwrite_bug);
795 795
796void __pte_error(const char *file, int line, pte_t pte) 796void __pte_error(const char *file, int line, pte_t pte)
797{ 797{
798 printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte)); 798 pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
799} 799}
800 800
801void __pmd_error(const char *file, int line, pmd_t pmd) 801void __pmd_error(const char *file, int line, pmd_t pmd)
802{ 802{
803 printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd)); 803 pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
804} 804}
805 805
806void __pgd_error(const char *file, int line, pgd_t pgd) 806void __pgd_error(const char *file, int line, pgd_t pgd)
807{ 807{
808 printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd)); 808 pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
809} 809}
810 810
811asmlinkage void __div0(void) 811asmlinkage void __div0(void)
812{ 812{
813 printk("Division by zero in kernel.\n"); 813 pr_err("Division by zero in kernel.\n");
814 dump_stack(); 814 dump_stack();
815} 815}
816EXPORT_SYMBOL(__div0); 816EXPORT_SYMBOL(__div0);
diff --git a/arch/arm/kernel/unwind.c b/arch/arm/kernel/unwind.c
index cbb85c5fabf9..0bee233fef9a 100644
--- a/arch/arm/kernel/unwind.c
+++ b/arch/arm/kernel/unwind.c
@@ -471,7 +471,6 @@ int unwind_frame(struct stackframe *frame)
471void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk) 471void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
472{ 472{
473 struct stackframe frame; 473 struct stackframe frame;
474 register unsigned long current_sp asm ("sp");
475 474
476 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk); 475 pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
477 476
@@ -485,7 +484,7 @@ void unwind_backtrace(struct pt_regs *regs, struct task_struct *tsk)
485 frame.pc = regs->ARM_lr; 484 frame.pc = regs->ARM_lr;
486 } else if (tsk == current) { 485 } else if (tsk == current) {
487 frame.fp = (unsigned long)__builtin_frame_address(0); 486 frame.fp = (unsigned long)__builtin_frame_address(0);
488 frame.sp = current_sp; 487 frame.sp = current_stack_pointer;
489 frame.lr = (unsigned long)__builtin_return_address(0); 488 frame.lr = (unsigned long)__builtin_return_address(0);
490 frame.pc = (unsigned long)unwind_backtrace; 489 frame.pc = (unsigned long)unwind_backtrace;
491 } else { 490 } else {
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 8e95aa47457a..b31aa73e8076 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -8,6 +8,9 @@
8#include <asm/thread_info.h> 8#include <asm/thread_info.h>
9#include <asm/memory.h> 9#include <asm/memory.h>
10#include <asm/page.h> 10#include <asm/page.h>
11#ifdef CONFIG_ARM_KERNMEM_PERMS
12#include <asm/pgtable.h>
13#endif
11 14
12#define PROC_INFO \ 15#define PROC_INFO \
13 . = ALIGN(4); \ 16 . = ALIGN(4); \
@@ -90,6 +93,11 @@ SECTIONS
90 _text = .; 93 _text = .;
91 HEAD_TEXT 94 HEAD_TEXT
92 } 95 }
96
97#ifdef CONFIG_ARM_KERNMEM_PERMS
98 . = ALIGN(1<<SECTION_SHIFT);
99#endif
100
93 .text : { /* Real text segment */ 101 .text : { /* Real text segment */
94 _stext = .; /* Text and read-only data */ 102 _stext = .; /* Text and read-only data */
95 __exception_text_start = .; 103 __exception_text_start = .;
@@ -112,6 +120,9 @@ SECTIONS
112 ARM_CPU_KEEP(PROC_INFO) 120 ARM_CPU_KEEP(PROC_INFO)
113 } 121 }
114 122
123#ifdef CONFIG_DEBUG_RODATA
124 . = ALIGN(1<<SECTION_SHIFT);
125#endif
115 RO_DATA(PAGE_SIZE) 126 RO_DATA(PAGE_SIZE)
116 127
117 . = ALIGN(4); 128 . = ALIGN(4);
@@ -145,7 +156,11 @@ SECTIONS
145 _etext = .; /* End of text and rodata section */ 156 _etext = .; /* End of text and rodata section */
146 157
147#ifndef CONFIG_XIP_KERNEL 158#ifndef CONFIG_XIP_KERNEL
159# ifdef CONFIG_ARM_KERNMEM_PERMS
160 . = ALIGN(1<<SECTION_SHIFT);
161# else
148 . = ALIGN(PAGE_SIZE); 162 . = ALIGN(PAGE_SIZE);
163# endif
149 __init_begin = .; 164 __init_begin = .;
150#endif 165#endif
151 /* 166 /*
@@ -219,7 +234,11 @@ SECTIONS
219 __data_loc = ALIGN(4); /* location in binary */ 234 __data_loc = ALIGN(4); /* location in binary */
220 . = PAGE_OFFSET + TEXT_OFFSET; 235 . = PAGE_OFFSET + TEXT_OFFSET;
221#else 236#else
237#ifdef CONFIG_ARM_KERNMEM_PERMS
238 . = ALIGN(1<<SECTION_SHIFT);
239#else
222 . = ALIGN(THREAD_SIZE); 240 . = ALIGN(THREAD_SIZE);
241#endif
223 __init_end = .; 242 __init_end = .;
224 __data_loc = .; 243 __data_loc = .;
225#endif 244#endif
diff --git a/arch/arm/kernel/xscale-cp0.c b/arch/arm/kernel/xscale-cp0.c
index e42adc6bcdb1..bdbb8853a19b 100644
--- a/arch/arm/kernel/xscale-cp0.c
+++ b/arch/arm/kernel/xscale-cp0.c
@@ -157,15 +157,14 @@ static int __init xscale_cp0_init(void)
157 157
158 if (cpu_has_iwmmxt()) { 158 if (cpu_has_iwmmxt()) {
159#ifndef CONFIG_IWMMXT 159#ifndef CONFIG_IWMMXT
160 printk(KERN_WARNING "CAUTION: XScale iWMMXt coprocessor " 160 pr_warn("CAUTION: XScale iWMMXt coprocessor detected, but kernel support is missing.\n");
161 "detected, but kernel support is missing.\n");
162#else 161#else
163 printk(KERN_INFO "XScale iWMMXt coprocessor detected.\n"); 162 pr_info("XScale iWMMXt coprocessor detected.\n");
164 elf_hwcap |= HWCAP_IWMMXT; 163 elf_hwcap |= HWCAP_IWMMXT;
165 thread_register_notifier(&iwmmxt_notifier_block); 164 thread_register_notifier(&iwmmxt_notifier_block);
166#endif 165#endif
167 } else { 166 } else {
168 printk(KERN_INFO "XScale DSP coprocessor detected.\n"); 167 pr_info("XScale DSP coprocessor detected.\n");
169 thread_register_notifier(&dsp_notifier_block); 168 thread_register_notifier(&dsp_notifier_block);
170 cp_access |= 1; 169 cp_access |= 1;
171 } 170 }
diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
index 66a477a3e3cc..7a235b9952be 100644
--- a/arch/arm/lib/copy_from_user.S
+++ b/arch/arm/lib/copy_from_user.S
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/unwind.h>
15 16
16/* 17/*
17 * Prototype: 18 * Prototype:
@@ -77,6 +78,10 @@
77 stmdb sp!, {r0, r2, r3, \reg1, \reg2} 78 stmdb sp!, {r0, r2, r3, \reg1, \reg2}
78 .endm 79 .endm
79 80
81 .macro usave reg1 reg2
82 UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
83 .endm
84
80 .macro exit reg1 reg2 85 .macro exit reg1 reg2
81 add sp, sp, #8 86 add sp, sp, #8
82 ldmfd sp!, {r0, \reg1, \reg2} 87 ldmfd sp!, {r0, \reg1, \reg2}
diff --git a/arch/arm/lib/copy_template.S b/arch/arm/lib/copy_template.S
index 3bc8eb811a73..652e4d98cd47 100644
--- a/arch/arm/lib/copy_template.S
+++ b/arch/arm/lib/copy_template.S
@@ -53,6 +53,12 @@
53 * data as needed by the implementation including this code. Called 53 * data as needed by the implementation including this code. Called
54 * upon code entry. 54 * upon code entry.
55 * 55 *
56 * usave reg1 reg2
57 *
58 * Unwind annotation macro is corresponding for 'enter' macro.
59 * It tell unwinder that preserved some provided registers on the stack
60 * and additional data by a prior 'enter' macro.
61 *
56 * exit reg1 reg2 62 * exit reg1 reg2
57 * 63 *
58 * Restore registers with the values previously saved with the 64 * Restore registers with the values previously saved with the
@@ -67,7 +73,12 @@
67 */ 73 */
68 74
69 75
76 UNWIND( .fnstart )
70 enter r4, lr 77 enter r4, lr
78 UNWIND( .fnend )
79
80 UNWIND( .fnstart )
81 usave r4, lr @ in first stmdb block
71 82
72 subs r2, r2, #4 83 subs r2, r2, #4
73 blt 8f 84 blt 8f
@@ -79,6 +90,11 @@
79 90
801: subs r2, r2, #(28) 911: subs r2, r2, #(28)
81 stmfd sp!, {r5 - r8} 92 stmfd sp!, {r5 - r8}
93 UNWIND( .fnend )
94
95 UNWIND( .fnstart )
96 usave r4, lr
97 UNWIND( .save {r5 - r8} ) @ in second stmfd block
82 blt 5f 98 blt 5f
83 99
84 CALGN( ands ip, r0, #31 ) 100 CALGN( ands ip, r0, #31 )
@@ -144,7 +160,10 @@
144 CALGN( bcs 2b ) 160 CALGN( bcs 2b )
145 161
1467: ldmfd sp!, {r5 - r8} 1627: ldmfd sp!, {r5 - r8}
163 UNWIND( .fnend ) @ end of second stmfd block
147 164
165 UNWIND( .fnstart )
166 usave r4, lr @ still in first stmdb block
1488: movs r2, r2, lsl #31 1678: movs r2, r2, lsl #31
149 ldr1b r1, r3, ne, abort=21f 168 ldr1b r1, r3, ne, abort=21f
150 ldr1b r1, r4, cs, abort=21f 169 ldr1b r1, r4, cs, abort=21f
@@ -173,10 +192,13 @@
173 ldr1w r1, lr, abort=21f 192 ldr1w r1, lr, abort=21f
174 beq 17f 193 beq 17f
175 bgt 18f 194 bgt 18f
195 UNWIND( .fnend )
176 196
177 197
178 .macro forward_copy_shift pull push 198 .macro forward_copy_shift pull push
179 199
200 UNWIND( .fnstart )
201 usave r4, lr @ still in first stmdb block
180 subs r2, r2, #28 202 subs r2, r2, #28
181 blt 14f 203 blt 14f
182 204
@@ -187,7 +209,11 @@
187 CALGN( bcc 15f ) 209 CALGN( bcc 15f )
188 210
18911: stmfd sp!, {r5 - r9} 21111: stmfd sp!, {r5 - r9}
212 UNWIND( .fnend )
190 213
214 UNWIND( .fnstart )
215 usave r4, lr
216 UNWIND( .save {r5 - r9} ) @ in new second stmfd block
191 PLD( pld [r1, #0] ) 217 PLD( pld [r1, #0] )
192 PLD( subs r2, r2, #96 ) 218 PLD( subs r2, r2, #96 )
193 PLD( pld [r1, #28] ) 219 PLD( pld [r1, #28] )
@@ -221,7 +247,10 @@
221 PLD( bge 13b ) 247 PLD( bge 13b )
222 248
223 ldmfd sp!, {r5 - r9} 249 ldmfd sp!, {r5 - r9}
250 UNWIND( .fnend ) @ end of the second stmfd block
224 251
252 UNWIND( .fnstart )
253 usave r4, lr @ still in first stmdb block
22514: ands ip, r2, #28 25414: ands ip, r2, #28
226 beq 16f 255 beq 16f
227 256
@@ -236,6 +265,7 @@
236 265
23716: sub r1, r1, #(\push / 8) 26616: sub r1, r1, #(\push / 8)
238 b 8b 267 b 8b
268 UNWIND( .fnend )
239 269
240 .endm 270 .endm
241 271
diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
index d066df686e17..a9d3db16ecb5 100644
--- a/arch/arm/lib/copy_to_user.S
+++ b/arch/arm/lib/copy_to_user.S
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/unwind.h>
15 16
16/* 17/*
17 * Prototype: 18 * Prototype:
@@ -80,6 +81,10 @@
80 stmdb sp!, {r0, r2, r3, \reg1, \reg2} 81 stmdb sp!, {r0, r2, r3, \reg1, \reg2}
81 .endm 82 .endm
82 83
84 .macro usave reg1 reg2
85 UNWIND( .save {r0, r2, r3, \reg1, \reg2} )
86 .endm
87
83 .macro exit reg1 reg2 88 .macro exit reg1 reg2
84 add sp, sp, #8 89 add sp, sp, #8
85 ldmfd sp!, {r0, \reg1, \reg2} 90 ldmfd sp!, {r0, \reg1, \reg2}
diff --git a/arch/arm/lib/memcpy.S b/arch/arm/lib/memcpy.S
index a9b9e2287a09..7797e81e40e0 100644
--- a/arch/arm/lib/memcpy.S
+++ b/arch/arm/lib/memcpy.S
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/unwind.h>
15 16
16#define LDR1W_SHIFT 0 17#define LDR1W_SHIFT 0
17#define STR1W_SHIFT 0 18#define STR1W_SHIFT 0
@@ -48,6 +49,10 @@
48 stmdb sp!, {r0, \reg1, \reg2} 49 stmdb sp!, {r0, \reg1, \reg2}
49 .endm 50 .endm
50 51
52 .macro usave reg1 reg2
53 UNWIND( .save {r0, \reg1, \reg2} )
54 .endm
55
51 .macro exit reg1 reg2 56 .macro exit reg1 reg2
52 ldmfd sp!, {r0, \reg1, \reg2} 57 ldmfd sp!, {r0, \reg1, \reg2}
53 .endm 58 .endm
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index d1fc0c0c342c..69a9d47fc5ab 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -12,6 +12,7 @@
12 12
13#include <linux/linkage.h> 13#include <linux/linkage.h>
14#include <asm/assembler.h> 14#include <asm/assembler.h>
15#include <asm/unwind.h>
15 16
16 .text 17 .text
17 18
@@ -27,12 +28,17 @@
27 */ 28 */
28 29
29ENTRY(memmove) 30ENTRY(memmove)
31 UNWIND( .fnstart )
30 32
31 subs ip, r0, r1 33 subs ip, r0, r1
32 cmphi r2, ip 34 cmphi r2, ip
33 bls memcpy 35 bls memcpy
34 36
35 stmfd sp!, {r0, r4, lr} 37 stmfd sp!, {r0, r4, lr}
38 UNWIND( .fnend )
39
40 UNWIND( .fnstart )
41 UNWIND( .save {r0, r4, lr} ) @ in first stmfd block
36 add r1, r1, r2 42 add r1, r1, r2
37 add r0, r0, r2 43 add r0, r0, r2
38 subs r2, r2, #4 44 subs r2, r2, #4
@@ -45,6 +51,11 @@ ENTRY(memmove)
45 51
461: subs r2, r2, #(28) 521: subs r2, r2, #(28)
47 stmfd sp!, {r5 - r8} 53 stmfd sp!, {r5 - r8}
54 UNWIND( .fnend )
55
56 UNWIND( .fnstart )
57 UNWIND( .save {r0, r4, lr} )
58 UNWIND( .save {r5 - r8} ) @ in second stmfd block
48 blt 5f 59 blt 5f
49 60
50 CALGN( ands ip, r0, #31 ) 61 CALGN( ands ip, r0, #31 )
@@ -97,6 +108,10 @@ ENTRY(memmove)
97 CALGN( bcs 2b ) 108 CALGN( bcs 2b )
98 109
997: ldmfd sp!, {r5 - r8} 1107: ldmfd sp!, {r5 - r8}
111 UNWIND( .fnend ) @ end of second stmfd block
112
113 UNWIND( .fnstart )
114 UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
100 115
1018: movs r2, r2, lsl #31 1168: movs r2, r2, lsl #31
102 ldrneb r3, [r1, #-1]! 117 ldrneb r3, [r1, #-1]!
@@ -124,10 +139,13 @@ ENTRY(memmove)
124 ldr r3, [r1, #0] 139 ldr r3, [r1, #0]
125 beq 17f 140 beq 17f
126 blt 18f 141 blt 18f
142 UNWIND( .fnend )
127 143
128 144
129 .macro backward_copy_shift push pull 145 .macro backward_copy_shift push pull
130 146
147 UNWIND( .fnstart )
148 UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
131 subs r2, r2, #28 149 subs r2, r2, #28
132 blt 14f 150 blt 14f
133 151
@@ -137,6 +155,11 @@ ENTRY(memmove)
137 CALGN( bcc 15f ) 155 CALGN( bcc 15f )
138 156
13911: stmfd sp!, {r5 - r9} 15711: stmfd sp!, {r5 - r9}
158 UNWIND( .fnend )
159
160 UNWIND( .fnstart )
161 UNWIND( .save {r0, r4, lr} )
162 UNWIND( .save {r5 - r9} ) @ in new second stmfd block
140 163
141 PLD( pld [r1, #-4] ) 164 PLD( pld [r1, #-4] )
142 PLD( subs r2, r2, #96 ) 165 PLD( subs r2, r2, #96 )
@@ -171,6 +194,10 @@ ENTRY(memmove)
171 PLD( bge 13b ) 194 PLD( bge 13b )
172 195
173 ldmfd sp!, {r5 - r9} 196 ldmfd sp!, {r5 - r9}
197 UNWIND( .fnend ) @ end of the second stmfd block
198
199 UNWIND( .fnstart )
200 UNWIND( .save {r0, r4, lr} ) @ still in first stmfd block
174 201
17514: ands ip, r2, #28 20214: ands ip, r2, #28
176 beq 16f 203 beq 16f
@@ -186,6 +213,7 @@ ENTRY(memmove)
186 213
18716: add r1, r1, #(\pull / 8) 21416: add r1, r1, #(\pull / 8)
188 b 8b 215 b 8b
216 UNWIND( .fnend )
189 217
190 .endm 218 .endm
191 219
diff --git a/arch/arm/lib/memset.S b/arch/arm/lib/memset.S
index 671455c854fa..a4ee97b5a2bf 100644
--- a/arch/arm/lib/memset.S
+++ b/arch/arm/lib/memset.S
@@ -11,11 +11,13 @@
11 */ 11 */
12#include <linux/linkage.h> 12#include <linux/linkage.h>
13#include <asm/assembler.h> 13#include <asm/assembler.h>
14#include <asm/unwind.h>
14 15
15 .text 16 .text
16 .align 5 17 .align 5
17 18
18ENTRY(memset) 19ENTRY(memset)
20UNWIND( .fnstart )
19 ands r3, r0, #3 @ 1 unaligned? 21 ands r3, r0, #3 @ 1 unaligned?
20 mov ip, r0 @ preserve r0 as return value 22 mov ip, r0 @ preserve r0 as return value
21 bne 6f @ 1 23 bne 6f @ 1
@@ -34,6 +36,9 @@ ENTRY(memset)
34 * We need 2 extra registers for this loop - use r8 and the LR 36 * We need 2 extra registers for this loop - use r8 and the LR
35 */ 37 */
36 stmfd sp!, {r8, lr} 38 stmfd sp!, {r8, lr}
39UNWIND( .fnend )
40UNWIND( .fnstart )
41UNWIND( .save {r8, lr} )
37 mov r8, r1 42 mov r8, r1
38 mov lr, r1 43 mov lr, r1
39 44
@@ -53,6 +58,7 @@ ENTRY(memset)
53 tst r2, #16 58 tst r2, #16
54 stmneia ip!, {r1, r3, r8, lr} 59 stmneia ip!, {r1, r3, r8, lr}
55 ldmfd sp!, {r8, lr} 60 ldmfd sp!, {r8, lr}
61UNWIND( .fnend )
56 62
57#else 63#else
58 64
@@ -62,6 +68,9 @@ ENTRY(memset)
62 */ 68 */
63 69
64 stmfd sp!, {r4-r8, lr} 70 stmfd sp!, {r4-r8, lr}
71UNWIND( .fnend )
72UNWIND( .fnstart )
73UNWIND( .save {r4-r8, lr} )
65 mov r4, r1 74 mov r4, r1
66 mov r5, r1 75 mov r5, r1
67 mov r6, r1 76 mov r6, r1
@@ -94,9 +103,11 @@ ENTRY(memset)
94 tst r2, #16 103 tst r2, #16
95 stmneia ip!, {r4-r7} 104 stmneia ip!, {r4-r7}
96 ldmfd sp!, {r4-r8, lr} 105 ldmfd sp!, {r4-r8, lr}
106UNWIND( .fnend )
97 107
98#endif 108#endif
99 109
110UNWIND( .fnstart )
1004: tst r2, #8 1114: tst r2, #8
101 stmneia ip!, {r1, r3} 112 stmneia ip!, {r1, r3}
102 tst r2, #4 113 tst r2, #4
@@ -120,4 +131,5 @@ ENTRY(memset)
120 strb r1, [ip], #1 @ 1 131 strb r1, [ip], #1 @ 1
121 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3)) 132 add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
122 b 1b 133 b 1b
134UNWIND( .fnend )
123ENDPROC(memset) 135ENDPROC(memset)
diff --git a/arch/arm/lib/memzero.S b/arch/arm/lib/memzero.S
index 385ccb306fa2..0eded952e089 100644
--- a/arch/arm/lib/memzero.S
+++ b/arch/arm/lib/memzero.S
@@ -9,6 +9,7 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <asm/assembler.h> 11#include <asm/assembler.h>
12#include <asm/unwind.h>
12 13
13 .text 14 .text
14 .align 5 15 .align 5
@@ -18,6 +19,7 @@
18 * mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we 19 * mis-aligned by, and r1 is the number of bytes. If r1 < 4, then we
19 * don't bother; we use byte stores instead. 20 * don't bother; we use byte stores instead.
20 */ 21 */
22UNWIND( .fnstart )
211: subs r1, r1, #4 @ 1 do we have enough 231: subs r1, r1, #4 @ 1 do we have enough
22 blt 5f @ 1 bytes to align with? 24 blt 5f @ 1 bytes to align with?
23 cmp r3, #2 @ 1 25 cmp r3, #2 @ 1
@@ -47,6 +49,9 @@ ENTRY(__memzero)
47 * use the LR 49 * use the LR
48 */ 50 */
49 str lr, [sp, #-4]! @ 1 51 str lr, [sp, #-4]! @ 1
52UNWIND( .fnend )
53UNWIND( .fnstart )
54UNWIND( .save {lr} )
50 mov ip, r2 @ 1 55 mov ip, r2 @ 1
51 mov lr, r2 @ 1 56 mov lr, r2 @ 1
52 57
@@ -66,6 +71,7 @@ ENTRY(__memzero)
66 tst r1, #16 @ 1 16 bytes or more? 71 tst r1, #16 @ 1 16 bytes or more?
67 stmneia r0!, {r2, r3, ip, lr} @ 4 72 stmneia r0!, {r2, r3, ip, lr} @ 4
68 ldr lr, [sp], #4 @ 1 73 ldr lr, [sp], #4 @ 1
74UNWIND( .fnend )
69 75
70#else 76#else
71 77
@@ -75,6 +81,9 @@ ENTRY(__memzero)
75 */ 81 */
76 82
77 stmfd sp!, {r4-r7, lr} 83 stmfd sp!, {r4-r7, lr}
84UNWIND( .fnend )
85UNWIND( .fnstart )
86UNWIND( .save {r4-r7, lr} )
78 mov r4, r2 87 mov r4, r2
79 mov r5, r2 88 mov r5, r2
80 mov r6, r2 89 mov r6, r2
@@ -105,9 +114,11 @@ ENTRY(__memzero)
105 tst r1, #16 114 tst r1, #16
106 stmneia r0!, {r4-r7} 115 stmneia r0!, {r4-r7}
107 ldmfd sp!, {r4-r7, lr} 116 ldmfd sp!, {r4-r7, lr}
117UNWIND( .fnend )
108 118
109#endif 119#endif
110 120
121UNWIND( .fnstart )
1114: tst r1, #8 @ 1 8 bytes or more? 1224: tst r1, #8 @ 1 8 bytes or more?
112 stmneia r0!, {r2, r3} @ 2 123 stmneia r0!, {r2, r3} @ 2
113 tst r1, #4 @ 1 4 bytes or more? 124 tst r1, #4 @ 1 4 bytes or more?
@@ -122,4 +133,5 @@ ENTRY(__memzero)
122 tst r1, #1 @ 1 a byte left over 133 tst r1, #1 @ 1 a byte left over
123 strneb r2, [r0], #1 @ 1 134 strneb r2, [r0], #1 @ 1
124 ret lr @ 1 135 ret lr @ 1
136UNWIND( .fnend )
125ENDPROC(__memzero) 137ENDPROC(__memzero)
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index b9e3f1c61baf..e4a00bafffc1 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -34,7 +34,7 @@ config ARCH_EXYNOS3
34 bool "SAMSUNG EXYNOS3" 34 bool "SAMSUNG EXYNOS3"
35 select ARM_CPU_SUSPEND if PM 35 select ARM_CPU_SUSPEND if PM
36 help 36 help
37 Samsung EXYNOS3 (Crotex-A7) SoC based systems 37 Samsung EXYNOS3 (Cortex-A7) SoC based systems
38 38
39config ARCH_EXYNOS4 39config ARCH_EXYNOS4
40 bool "SAMSUNG EXYNOS4" 40 bool "SAMSUNG EXYNOS4"
diff --git a/arch/arm/mach-sa1100/clock.c b/arch/arm/mach-sa1100/clock.c
index 9fa6a990cf03..03c75a811cb0 100644
--- a/arch/arm/mach-sa1100/clock.c
+++ b/arch/arm/mach-sa1100/clock.c
@@ -15,10 +15,12 @@
15#include <linux/clkdev.h> 15#include <linux/clkdev.h>
16 16
17#include <mach/hardware.h> 17#include <mach/hardware.h>
18#include <mach/generic.h>
18 19
19struct clkops { 20struct clkops {
20 void (*enable)(struct clk *); 21 void (*enable)(struct clk *);
21 void (*disable)(struct clk *); 22 void (*disable)(struct clk *);
23 unsigned long (*get_rate)(struct clk *);
22}; 24};
23 25
24struct clk { 26struct clk {
@@ -33,13 +35,6 @@ struct clk clk_##_name = { \
33 35
34static DEFINE_SPINLOCK(clocks_lock); 36static DEFINE_SPINLOCK(clocks_lock);
35 37
36/* Dummy clk routine to build generic kernel parts that may be using them */
37unsigned long clk_get_rate(struct clk *clk)
38{
39 return 0;
40}
41EXPORT_SYMBOL(clk_get_rate);
42
43static void clk_gpio27_enable(struct clk *clk) 38static void clk_gpio27_enable(struct clk *clk)
44{ 39{
45 /* 40 /*
@@ -58,6 +53,19 @@ static void clk_gpio27_disable(struct clk *clk)
58 GAFR &= ~GPIO_32_768kHz; 53 GAFR &= ~GPIO_32_768kHz;
59} 54}
60 55
56static void clk_cpu_enable(struct clk *clk)
57{
58}
59
60static void clk_cpu_disable(struct clk *clk)
61{
62}
63
64static unsigned long clk_cpu_get_rate(struct clk *clk)
65{
66 return sa11x0_getspeed(0) * 1000;
67}
68
61int clk_enable(struct clk *clk) 69int clk_enable(struct clk *clk)
62{ 70{
63 unsigned long flags; 71 unsigned long flags;
@@ -87,16 +95,37 @@ void clk_disable(struct clk *clk)
87} 95}
88EXPORT_SYMBOL(clk_disable); 96EXPORT_SYMBOL(clk_disable);
89 97
98unsigned long clk_get_rate(struct clk *clk)
99{
100 if (clk && clk->ops && clk->ops->get_rate)
101 return clk->ops->get_rate(clk);
102
103 return 0;
104}
105EXPORT_SYMBOL(clk_get_rate);
106
90const struct clkops clk_gpio27_ops = { 107const struct clkops clk_gpio27_ops = {
91 .enable = clk_gpio27_enable, 108 .enable = clk_gpio27_enable,
92 .disable = clk_gpio27_disable, 109 .disable = clk_gpio27_disable,
93}; 110};
94 111
112const struct clkops clk_cpu_ops = {
113 .enable = clk_cpu_enable,
114 .disable = clk_cpu_disable,
115 .get_rate = clk_cpu_get_rate,
116};
117
95static DEFINE_CLK(gpio27, &clk_gpio27_ops); 118static DEFINE_CLK(gpio27, &clk_gpio27_ops);
96 119
120static DEFINE_CLK(cpu, &clk_cpu_ops);
121
97static struct clk_lookup sa11xx_clkregs[] = { 122static struct clk_lookup sa11xx_clkregs[] = {
98 CLKDEV_INIT("sa1111.0", NULL, &clk_gpio27), 123 CLKDEV_INIT("sa1111.0", NULL, &clk_gpio27),
99 CLKDEV_INIT("sa1100-rtc", NULL, NULL), 124 CLKDEV_INIT("sa1100-rtc", NULL, NULL),
125 CLKDEV_INIT("sa11x0-fb", NULL, &clk_cpu),
126 CLKDEV_INIT("sa11x0-pcmcia", NULL, &clk_cpu),
127 /* sa1111 names devices using internal offsets, PCMCIA is at 0x1800 */
128 CLKDEV_INIT("1800", NULL, &clk_cpu),
100}; 129};
101 130
102static int __init sa11xx_clk_init(void) 131static int __init sa11xx_clk_init(void)
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index 108939f8d053..b90c7d828391 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -30,7 +30,7 @@
30#include <linux/gpio_keys.h> 30#include <linux/gpio_keys.h>
31#include <linux/input.h> 31#include <linux/input.h>
32#include <linux/gpio.h> 32#include <linux/gpio.h>
33#include <linux/pda_power.h> 33#include <linux/power/gpio-charger.h>
34 34
35#include <video/sa1100fb.h> 35#include <video/sa1100fb.h>
36 36
@@ -131,62 +131,24 @@ static struct irda_platform_data collie_ir_data = {
131/* 131/*
132 * Collie AC IN 132 * Collie AC IN
133 */ 133 */
134static int collie_power_init(struct device *dev)
135{
136 int ret = gpio_request(COLLIE_GPIO_AC_IN, "ac in");
137 if (ret)
138 goto err_gpio_req;
139
140 ret = gpio_direction_input(COLLIE_GPIO_AC_IN);
141 if (ret)
142 goto err_gpio_in;
143
144 return 0;
145
146err_gpio_in:
147 gpio_free(COLLIE_GPIO_AC_IN);
148err_gpio_req:
149 return ret;
150}
151
152static void collie_power_exit(struct device *dev)
153{
154 gpio_free(COLLIE_GPIO_AC_IN);
155}
156
157static int collie_power_ac_online(void)
158{
159 return gpio_get_value(COLLIE_GPIO_AC_IN) == 2;
160}
161
162static char *collie_ac_supplied_to[] = { 134static char *collie_ac_supplied_to[] = {
163 "main-battery", 135 "main-battery",
164 "backup-battery", 136 "backup-battery",
165}; 137};
166 138
167static struct pda_power_pdata collie_power_data = { 139
168 .init = collie_power_init, 140static struct gpio_charger_platform_data collie_power_data = {
169 .is_ac_online = collie_power_ac_online, 141 .name = "charger",
170 .exit = collie_power_exit, 142 .type = POWER_SUPPLY_TYPE_MAINS,
143 .gpio = COLLIE_GPIO_AC_IN,
171 .supplied_to = collie_ac_supplied_to, 144 .supplied_to = collie_ac_supplied_to,
172 .num_supplicants = ARRAY_SIZE(collie_ac_supplied_to), 145 .num_supplicants = ARRAY_SIZE(collie_ac_supplied_to),
173}; 146};
174 147
175static struct resource collie_power_resource[] = {
176 {
177 .name = "ac",
178 .flags = IORESOURCE_IRQ |
179 IORESOURCE_IRQ_HIGHEDGE |
180 IORESOURCE_IRQ_LOWEDGE,
181 },
182};
183
184static struct platform_device collie_power_device = { 148static struct platform_device collie_power_device = {
185 .name = "pda-power", 149 .name = "gpio-charger",
186 .id = -1, 150 .id = -1,
187 .dev.platform_data = &collie_power_data, 151 .dev.platform_data = &collie_power_data,
188 .resource = collie_power_resource,
189 .num_resources = ARRAY_SIZE(collie_power_resource),
190}; 152};
191 153
192#ifdef CONFIG_SHARP_LOCOMO 154#ifdef CONFIG_SHARP_LOCOMO
@@ -420,9 +382,6 @@ static void __init collie_init(void)
420 382
421 GPSR |= _COLLIE_GPIO_UCB1x00_RESET; 383 GPSR |= _COLLIE_GPIO_UCB1x00_RESET;
422 384
423 collie_power_resource[0].start = gpio_to_irq(COLLIE_GPIO_AC_IN);
424 collie_power_resource[0].end = gpio_to_irq(COLLIE_GPIO_AC_IN);
425
426 sa11x0_ppc_configure_mcp(); 385 sa11x0_ppc_configure_mcp();
427 386
428 387
diff --git a/arch/arm/mach-sa1100/include/mach/entry-macro.S b/arch/arm/mach-sa1100/include/mach/entry-macro.S
deleted file mode 100644
index 8cf7630bf024..000000000000
--- a/arch/arm/mach-sa1100/include/mach/entry-macro.S
+++ /dev/null
@@ -1,41 +0,0 @@
1/*
2 * arch/arm/mach-sa1100/include/mach/entry-macro.S
3 *
4 * Low-level IRQ helper macros for SA1100-based platforms
5 *
6 * This file is licensed under the terms of the GNU General Public
7 * License version 2. This program is licensed "as is" without any
8 * warranty of any kind, whether express or implied.
9 */
10
11 .macro get_irqnr_preamble, base, tmp
12 mov \base, #0xfa000000 @ ICIP = 0xfa050000
13 add \base, \base, #0x00050000
14 .endm
15
16 .macro get_irqnr_and_base, irqnr, irqstat, base, tmp
17 ldr \irqstat, [\base] @ get irqs
18 ldr \irqnr, [\base, #4] @ ICMR = 0xfa050004
19 ands \irqstat, \irqstat, \irqnr
20 mov \irqnr, #0
21 beq 1001f
22 tst \irqstat, #0xff
23 moveq \irqstat, \irqstat, lsr #8
24 addeq \irqnr, \irqnr, #8
25 tsteq \irqstat, #0xff
26 moveq \irqstat, \irqstat, lsr #8
27 addeq \irqnr, \irqnr, #8
28 tsteq \irqstat, #0xff
29 moveq \irqstat, \irqstat, lsr #8
30 addeq \irqnr, \irqnr, #8
31 tst \irqstat, #0x0f
32 moveq \irqstat, \irqstat, lsr #4
33 addeq \irqnr, \irqnr, #4
34 tst \irqstat, #0x03
35 moveq \irqstat, \irqstat, lsr #2
36 addeq \irqnr, \irqnr, #2
37 tst \irqstat, #0x01
38 addeqs \irqnr, \irqnr, #1
391001:
40 .endm
41
diff --git a/arch/arm/mach-sa1100/include/mach/irqs.h b/arch/arm/mach-sa1100/include/mach/irqs.h
index 3790298b7142..de0983494c7e 100644
--- a/arch/arm/mach-sa1100/include/mach/irqs.h
+++ b/arch/arm/mach-sa1100/include/mach/irqs.h
@@ -8,56 +8,56 @@
8 * 2001/11/14 RMK Cleaned up and standardised a lot of the IRQs. 8 * 2001/11/14 RMK Cleaned up and standardised a lot of the IRQs.
9 */ 9 */
10 10
11#define IRQ_GPIO0 0 11#define IRQ_GPIO0 1
12#define IRQ_GPIO1 1 12#define IRQ_GPIO1 2
13#define IRQ_GPIO2 2 13#define IRQ_GPIO2 3
14#define IRQ_GPIO3 3 14#define IRQ_GPIO3 4
15#define IRQ_GPIO4 4 15#define IRQ_GPIO4 5
16#define IRQ_GPIO5 5 16#define IRQ_GPIO5 6
17#define IRQ_GPIO6 6 17#define IRQ_GPIO6 7
18#define IRQ_GPIO7 7 18#define IRQ_GPIO7 8
19#define IRQ_GPIO8 8 19#define IRQ_GPIO8 9
20#define IRQ_GPIO9 9 20#define IRQ_GPIO9 10
21#define IRQ_GPIO10 10 21#define IRQ_GPIO10 11
22#define IRQ_GPIO11_27 11 22#define IRQ_GPIO11_27 12
23#define IRQ_LCD 12 /* LCD controller */ 23#define IRQ_LCD 13 /* LCD controller */
24#define IRQ_Ser0UDC 13 /* Ser. port 0 UDC */ 24#define IRQ_Ser0UDC 14 /* Ser. port 0 UDC */
25#define IRQ_Ser1SDLC 14 /* Ser. port 1 SDLC */ 25#define IRQ_Ser1SDLC 15 /* Ser. port 1 SDLC */
26#define IRQ_Ser1UART 15 /* Ser. port 1 UART */ 26#define IRQ_Ser1UART 16 /* Ser. port 1 UART */
27#define IRQ_Ser2ICP 16 /* Ser. port 2 ICP */ 27#define IRQ_Ser2ICP 17 /* Ser. port 2 ICP */
28#define IRQ_Ser3UART 17 /* Ser. port 3 UART */ 28#define IRQ_Ser3UART 18 /* Ser. port 3 UART */
29#define IRQ_Ser4MCP 18 /* Ser. port 4 MCP */ 29#define IRQ_Ser4MCP 19 /* Ser. port 4 MCP */
30#define IRQ_Ser4SSP 19 /* Ser. port 4 SSP */ 30#define IRQ_Ser4SSP 20 /* Ser. port 4 SSP */
31#define IRQ_DMA0 20 /* DMA controller channel 0 */ 31#define IRQ_DMA0 21 /* DMA controller channel 0 */
32#define IRQ_DMA1 21 /* DMA controller channel 1 */ 32#define IRQ_DMA1 22 /* DMA controller channel 1 */
33#define IRQ_DMA2 22 /* DMA controller channel 2 */ 33#define IRQ_DMA2 23 /* DMA controller channel 2 */
34#define IRQ_DMA3 23 /* DMA controller channel 3 */ 34#define IRQ_DMA3 24 /* DMA controller channel 3 */
35#define IRQ_DMA4 24 /* DMA controller channel 4 */ 35#define IRQ_DMA4 25 /* DMA controller channel 4 */
36#define IRQ_DMA5 25 /* DMA controller channel 5 */ 36#define IRQ_DMA5 26 /* DMA controller channel 5 */
37#define IRQ_OST0 26 /* OS Timer match 0 */ 37#define IRQ_OST0 27 /* OS Timer match 0 */
38#define IRQ_OST1 27 /* OS Timer match 1 */ 38#define IRQ_OST1 28 /* OS Timer match 1 */
39#define IRQ_OST2 28 /* OS Timer match 2 */ 39#define IRQ_OST2 29 /* OS Timer match 2 */
40#define IRQ_OST3 29 /* OS Timer match 3 */ 40#define IRQ_OST3 30 /* OS Timer match 3 */
41#define IRQ_RTC1Hz 30 /* RTC 1 Hz clock */ 41#define IRQ_RTC1Hz 31 /* RTC 1 Hz clock */
42#define IRQ_RTCAlrm 31 /* RTC Alarm */ 42#define IRQ_RTCAlrm 32 /* RTC Alarm */
43 43
44#define IRQ_GPIO11 32 44#define IRQ_GPIO11 33
45#define IRQ_GPIO12 33 45#define IRQ_GPIO12 34
46#define IRQ_GPIO13 34 46#define IRQ_GPIO13 35
47#define IRQ_GPIO14 35 47#define IRQ_GPIO14 36
48#define IRQ_GPIO15 36 48#define IRQ_GPIO15 37
49#define IRQ_GPIO16 37 49#define IRQ_GPIO16 38
50#define IRQ_GPIO17 38 50#define IRQ_GPIO17 39
51#define IRQ_GPIO18 39 51#define IRQ_GPIO18 40
52#define IRQ_GPIO19 40 52#define IRQ_GPIO19 41
53#define IRQ_GPIO20 41 53#define IRQ_GPIO20 42
54#define IRQ_GPIO21 42 54#define IRQ_GPIO21 43
55#define IRQ_GPIO22 43 55#define IRQ_GPIO22 44
56#define IRQ_GPIO23 44 56#define IRQ_GPIO23 45
57#define IRQ_GPIO24 45 57#define IRQ_GPIO24 46
58#define IRQ_GPIO25 46 58#define IRQ_GPIO25 47
59#define IRQ_GPIO26 47 59#define IRQ_GPIO26 48
60#define IRQ_GPIO27 48 60#define IRQ_GPIO27 49
61 61
62/* 62/*
63 * The next 16 interrupts are for board specific purposes. Since 63 * The next 16 interrupts are for board specific purposes. Since
@@ -65,8 +65,8 @@
65 * these. If you need more, increase IRQ_BOARD_END, but keep it 65 * these. If you need more, increase IRQ_BOARD_END, but keep it
66 * within sensible limits. IRQs 49 to 64 are available. 66 * within sensible limits. IRQs 49 to 64 are available.
67 */ 67 */
68#define IRQ_BOARD_START 49 68#define IRQ_BOARD_START 50
69#define IRQ_BOARD_END 65 69#define IRQ_BOARD_END 66
70 70
71/* 71/*
72 * Figure out the MAX IRQ number. 72 * Figure out the MAX IRQ number.
diff --git a/arch/arm/mach-sa1100/irq.c b/arch/arm/mach-sa1100/irq.c
index 2124f1fc2fbe..63e2901db416 100644
--- a/arch/arm/mach-sa1100/irq.c
+++ b/arch/arm/mach-sa1100/irq.c
@@ -14,17 +14,73 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/io.h> 15#include <linux/io.h>
16#include <linux/irq.h> 16#include <linux/irq.h>
17#include <linux/irqdomain.h>
17#include <linux/ioport.h> 18#include <linux/ioport.h>
18#include <linux/syscore_ops.h> 19#include <linux/syscore_ops.h>
19 20
20#include <mach/hardware.h> 21#include <mach/hardware.h>
21#include <mach/irqs.h> 22#include <mach/irqs.h>
22#include <asm/mach/irq.h> 23#include <asm/mach/irq.h>
24#include <asm/exception.h>
23 25
24#include "generic.h" 26#include "generic.h"
25 27
26 28
27/* 29/*
30 * We don't need to ACK IRQs on the SA1100 unless they're GPIOs
31 * this is for internal IRQs i.e. from IRQ LCD to RTCAlrm.
32 */
33static void sa1100_mask_irq(struct irq_data *d)
34{
35 ICMR &= ~BIT(d->hwirq);
36}
37
38static void sa1100_unmask_irq(struct irq_data *d)
39{
40 ICMR |= BIT(d->hwirq);
41}
42
43/*
44 * Apart form GPIOs, only the RTC alarm can be a wakeup event.
45 */
46static int sa1100_set_wake(struct irq_data *d, unsigned int on)
47{
48 if (BIT(d->hwirq) == IC_RTCAlrm) {
49 if (on)
50 PWER |= PWER_RTC;
51 else
52 PWER &= ~PWER_RTC;
53 return 0;
54 }
55 return -EINVAL;
56}
57
58static struct irq_chip sa1100_normal_chip = {
59 .name = "SC",
60 .irq_ack = sa1100_mask_irq,
61 .irq_mask = sa1100_mask_irq,
62 .irq_unmask = sa1100_unmask_irq,
63 .irq_set_wake = sa1100_set_wake,
64};
65
66static int sa1100_normal_irqdomain_map(struct irq_domain *d,
67 unsigned int irq, irq_hw_number_t hwirq)
68{
69 irq_set_chip_and_handler(irq, &sa1100_normal_chip,
70 handle_level_irq);
71 set_irq_flags(irq, IRQF_VALID);
72
73 return 0;
74}
75
76static struct irq_domain_ops sa1100_normal_irqdomain_ops = {
77 .map = sa1100_normal_irqdomain_map,
78 .xlate = irq_domain_xlate_onetwocell,
79};
80
81static struct irq_domain *sa1100_normal_irqdomain;
82
83/*
28 * SA1100 GPIO edge detection for IRQs: 84 * SA1100 GPIO edge detection for IRQs:
29 * IRQs are generated on Falling-Edge, Rising-Edge, or both. 85 * IRQs are generated on Falling-Edge, Rising-Edge, or both.
30 * Use this instead of directly setting GRER/GFER. 86 * Use this instead of directly setting GRER/GFER.
@@ -33,20 +89,11 @@ static int GPIO_IRQ_rising_edge;
33static int GPIO_IRQ_falling_edge; 89static int GPIO_IRQ_falling_edge;
34static int GPIO_IRQ_mask = (1 << 11) - 1; 90static int GPIO_IRQ_mask = (1 << 11) - 1;
35 91
36/*
37 * To get the GPIO number from an IRQ number
38 */
39#define GPIO_11_27_IRQ(i) ((i) - 21)
40#define GPIO11_27_MASK(irq) (1 << GPIO_11_27_IRQ(irq))
41
42static int sa1100_gpio_type(struct irq_data *d, unsigned int type) 92static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
43{ 93{
44 unsigned int mask; 94 unsigned int mask;
45 95
46 if (d->irq <= 10) 96 mask = BIT(d->hwirq);
47 mask = 1 << d->irq;
48 else
49 mask = GPIO11_27_MASK(d->irq);
50 97
51 if (type == IRQ_TYPE_PROBE) { 98 if (type == IRQ_TYPE_PROBE) {
52 if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask) 99 if ((GPIO_IRQ_rising_edge | GPIO_IRQ_falling_edge) & mask)
@@ -70,41 +117,51 @@ static int sa1100_gpio_type(struct irq_data *d, unsigned int type)
70} 117}
71 118
72/* 119/*
73 * GPIO IRQs must be acknowledged. This is for IRQs from 0 to 10. 120 * GPIO IRQs must be acknowledged.
74 */ 121 */
75static void sa1100_low_gpio_ack(struct irq_data *d) 122static void sa1100_gpio_ack(struct irq_data *d)
76{
77 GEDR = (1 << d->irq);
78}
79
80static void sa1100_low_gpio_mask(struct irq_data *d)
81{
82 ICMR &= ~(1 << d->irq);
83}
84
85static void sa1100_low_gpio_unmask(struct irq_data *d)
86{ 123{
87 ICMR |= 1 << d->irq; 124 GEDR = BIT(d->hwirq);
88} 125}
89 126
90static int sa1100_low_gpio_wake(struct irq_data *d, unsigned int on) 127static int sa1100_gpio_wake(struct irq_data *d, unsigned int on)
91{ 128{
92 if (on) 129 if (on)
93 PWER |= 1 << d->irq; 130 PWER |= BIT(d->hwirq);
94 else 131 else
95 PWER &= ~(1 << d->irq); 132 PWER &= ~BIT(d->hwirq);
96 return 0; 133 return 0;
97} 134}
98 135
136/*
137 * This is for IRQs from 0 to 10.
138 */
99static struct irq_chip sa1100_low_gpio_chip = { 139static struct irq_chip sa1100_low_gpio_chip = {
100 .name = "GPIO-l", 140 .name = "GPIO-l",
101 .irq_ack = sa1100_low_gpio_ack, 141 .irq_ack = sa1100_gpio_ack,
102 .irq_mask = sa1100_low_gpio_mask, 142 .irq_mask = sa1100_mask_irq,
103 .irq_unmask = sa1100_low_gpio_unmask, 143 .irq_unmask = sa1100_unmask_irq,
104 .irq_set_type = sa1100_gpio_type, 144 .irq_set_type = sa1100_gpio_type,
105 .irq_set_wake = sa1100_low_gpio_wake, 145 .irq_set_wake = sa1100_gpio_wake,
146};
147
148static int sa1100_low_gpio_irqdomain_map(struct irq_domain *d,
149 unsigned int irq, irq_hw_number_t hwirq)
150{
151 irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip,
152 handle_edge_irq);
153 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
154
155 return 0;
156}
157
158static struct irq_domain_ops sa1100_low_gpio_irqdomain_ops = {
159 .map = sa1100_low_gpio_irqdomain_map,
160 .xlate = irq_domain_xlate_onetwocell,
106}; 161};
107 162
163static struct irq_domain *sa1100_low_gpio_irqdomain;
164
108/* 165/*
109 * IRQ11 (GPIO11 through 27) handler. We enter here with the 166 * IRQ11 (GPIO11 through 27) handler. We enter here with the
110 * irq_controller_lock held, and IRQs disabled. Decode the IRQ 167 * irq_controller_lock held, and IRQs disabled. Decode the IRQ
@@ -141,16 +198,9 @@ sa1100_high_gpio_handler(unsigned int irq, struct irq_desc *desc)
141 * In addition, the IRQs are all collected up into one bit in the 198 * In addition, the IRQs are all collected up into one bit in the
142 * interrupt controller registers. 199 * interrupt controller registers.
143 */ 200 */
144static void sa1100_high_gpio_ack(struct irq_data *d)
145{
146 unsigned int mask = GPIO11_27_MASK(d->irq);
147
148 GEDR = mask;
149}
150
151static void sa1100_high_gpio_mask(struct irq_data *d) 201static void sa1100_high_gpio_mask(struct irq_data *d)
152{ 202{
153 unsigned int mask = GPIO11_27_MASK(d->irq); 203 unsigned int mask = BIT(d->hwirq);
154 204
155 GPIO_IRQ_mask &= ~mask; 205 GPIO_IRQ_mask &= ~mask;
156 206
@@ -160,7 +210,7 @@ static void sa1100_high_gpio_mask(struct irq_data *d)
160 210
161static void sa1100_high_gpio_unmask(struct irq_data *d) 211static void sa1100_high_gpio_unmask(struct irq_data *d)
162{ 212{
163 unsigned int mask = GPIO11_27_MASK(d->irq); 213 unsigned int mask = BIT(d->hwirq);
164 214
165 GPIO_IRQ_mask |= mask; 215 GPIO_IRQ_mask |= mask;
166 216
@@ -168,61 +218,32 @@ static void sa1100_high_gpio_unmask(struct irq_data *d)
168 GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask; 218 GFER = GPIO_IRQ_falling_edge & GPIO_IRQ_mask;
169} 219}
170 220
171static int sa1100_high_gpio_wake(struct irq_data *d, unsigned int on)
172{
173 if (on)
174 PWER |= GPIO11_27_MASK(d->irq);
175 else
176 PWER &= ~GPIO11_27_MASK(d->irq);
177 return 0;
178}
179
180static struct irq_chip sa1100_high_gpio_chip = { 221static struct irq_chip sa1100_high_gpio_chip = {
181 .name = "GPIO-h", 222 .name = "GPIO-h",
182 .irq_ack = sa1100_high_gpio_ack, 223 .irq_ack = sa1100_gpio_ack,
183 .irq_mask = sa1100_high_gpio_mask, 224 .irq_mask = sa1100_high_gpio_mask,
184 .irq_unmask = sa1100_high_gpio_unmask, 225 .irq_unmask = sa1100_high_gpio_unmask,
185 .irq_set_type = sa1100_gpio_type, 226 .irq_set_type = sa1100_gpio_type,
186 .irq_set_wake = sa1100_high_gpio_wake, 227 .irq_set_wake = sa1100_gpio_wake,
187}; 228};
188 229
189/* 230static int sa1100_high_gpio_irqdomain_map(struct irq_domain *d,
190 * We don't need to ACK IRQs on the SA1100 unless they're GPIOs 231 unsigned int irq, irq_hw_number_t hwirq)
191 * this is for internal IRQs i.e. from 11 to 31.
192 */
193static void sa1100_mask_irq(struct irq_data *d)
194{
195 ICMR &= ~(1 << d->irq);
196}
197
198static void sa1100_unmask_irq(struct irq_data *d)
199{ 232{
200 ICMR |= (1 << d->irq); 233 irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip,
201} 234 handle_edge_irq);
235 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
202 236
203/* 237 return 0;
204 * Apart form GPIOs, only the RTC alarm can be a wakeup event.
205 */
206static int sa1100_set_wake(struct irq_data *d, unsigned int on)
207{
208 if (d->irq == IRQ_RTCAlrm) {
209 if (on)
210 PWER |= PWER_RTC;
211 else
212 PWER &= ~PWER_RTC;
213 return 0;
214 }
215 return -EINVAL;
216} 238}
217 239
218static struct irq_chip sa1100_normal_chip = { 240static struct irq_domain_ops sa1100_high_gpio_irqdomain_ops = {
219 .name = "SC", 241 .map = sa1100_high_gpio_irqdomain_map,
220 .irq_ack = sa1100_mask_irq, 242 .xlate = irq_domain_xlate_onetwocell,
221 .irq_mask = sa1100_mask_irq,
222 .irq_unmask = sa1100_unmask_irq,
223 .irq_set_wake = sa1100_set_wake,
224}; 243};
225 244
245static struct irq_domain *sa1100_high_gpio_irqdomain;
246
226static struct resource irq_resource = 247static struct resource irq_resource =
227 DEFINE_RES_MEM_NAMED(0x90050000, SZ_64K, "irqs"); 248 DEFINE_RES_MEM_NAMED(0x90050000, SZ_64K, "irqs");
228 249
@@ -291,10 +312,25 @@ static int __init sa1100irq_init_devicefs(void)
291 312
292device_initcall(sa1100irq_init_devicefs); 313device_initcall(sa1100irq_init_devicefs);
293 314
294void __init sa1100_init_irq(void) 315static asmlinkage void __exception_irq_entry
316sa1100_handle_irq(struct pt_regs *regs)
295{ 317{
296 unsigned int irq; 318 uint32_t icip, icmr, mask;
319
320 do {
321 icip = (ICIP);
322 icmr = (ICMR);
323 mask = icip & icmr;
324
325 if (mask == 0)
326 break;
327
328 handle_IRQ(ffs(mask) - 1 + IRQ_GPIO0, regs);
329 } while (1);
330}
297 331
332void __init sa1100_init_irq(void)
333{
298 request_resource(&iomem_resource, &irq_resource); 334 request_resource(&iomem_resource, &irq_resource);
299 335
300 /* disable all IRQs */ 336 /* disable all IRQs */
@@ -314,29 +350,24 @@ void __init sa1100_init_irq(void)
314 */ 350 */
315 ICCR = 1; 351 ICCR = 1;
316 352
317 for (irq = 0; irq <= 10; irq++) { 353 sa1100_low_gpio_irqdomain = irq_domain_add_legacy(NULL,
318 irq_set_chip_and_handler(irq, &sa1100_low_gpio_chip, 354 11, IRQ_GPIO0, 0,
319 handle_edge_irq); 355 &sa1100_low_gpio_irqdomain_ops, NULL);
320 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
321 }
322 356
323 for (irq = 12; irq <= 31; irq++) { 357 sa1100_normal_irqdomain = irq_domain_add_legacy(NULL,
324 irq_set_chip_and_handler(irq, &sa1100_normal_chip, 358 21, IRQ_GPIO11_27, 11,
325 handle_level_irq); 359 &sa1100_normal_irqdomain_ops, NULL);
326 set_irq_flags(irq, IRQF_VALID);
327 }
328 360
329 for (irq = 32; irq <= 48; irq++) { 361 sa1100_high_gpio_irqdomain = irq_domain_add_legacy(NULL,
330 irq_set_chip_and_handler(irq, &sa1100_high_gpio_chip, 362 17, IRQ_GPIO11, 11,
331 handle_edge_irq); 363 &sa1100_high_gpio_irqdomain_ops, NULL);
332 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
333 }
334 364
335 /* 365 /*
336 * Install handler for GPIO 11-27 edge detect interrupts 366 * Install handler for GPIO 11-27 edge detect interrupts
337 */ 367 */
338 irq_set_chip(IRQ_GPIO11_27, &sa1100_normal_chip);
339 irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler); 368 irq_set_chained_handler(IRQ_GPIO11_27, sa1100_high_gpio_handler);
340 369
370 set_handle_irq(sa1100_handle_irq);
371
341 sa1100_init_gpio(); 372 sa1100_init_gpio();
342} 373}
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ab906b801047..03823e784f63 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -1009,3 +1009,24 @@ config ARCH_SUPPORTS_BIG_ENDIAN
1009 help 1009 help
1010 This option specifies the architecture can support big endian 1010 This option specifies the architecture can support big endian
1011 operation. 1011 operation.
1012
1013config ARM_KERNMEM_PERMS
1014 bool "Restrict kernel memory permissions"
1015 help
1016 If this is set, kernel memory other than kernel text (and rodata)
1017 will be made non-executable. The tradeoff is that each region is
1018 padded to section-size (1MiB) boundaries (because their permissions
1019 are different and splitting the 1M pages into 4K ones causes TLB
1020 performance problems), wasting memory.
1021
1022config DEBUG_RODATA
1023 bool "Make kernel text and rodata read-only"
1024 depends on ARM_KERNMEM_PERMS
1025 default y
1026 help
1027 If this is set, kernel text and rodata will be made read-only. This
1028 is to help catch accidental or malicious attempts to change the
1029 kernel's executable code. Additionally splits rodata from kernel
1030 text so it can be made explicitly non-executable. This creates
1031 another section-size padded region, so it can waste more memory
1032 space while gaining the read-only protections.
diff --git a/arch/arm/mm/Makefile b/arch/arm/mm/Makefile
index 91da64de440f..d3afdf9eb65a 100644
--- a/arch/arm/mm/Makefile
+++ b/arch/arm/mm/Makefile
@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
6 iomap.o 6 iomap.o
7 7
8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ 8obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
9 mmap.o pgd.o mmu.o 9 mmap.o pgd.o mmu.o pageattr.o
10 10
11ifneq ($(CONFIG_MMU),y) 11ifneq ($(CONFIG_MMU),y)
12obj-y += nommu.o 12obj-y += nommu.o
diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
index 83792f4324ea..2c0c541c60ca 100644
--- a/arch/arm/mm/alignment.c
+++ b/arch/arm/mm/alignment.c
@@ -113,7 +113,7 @@ static int safe_usermode(int new_usermode, bool warn)
113 new_usermode |= UM_FIXUP; 113 new_usermode |= UM_FIXUP;
114 114
115 if (warn) 115 if (warn)
116 printk(KERN_WARNING "alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n"); 116 pr_warn("alignment: ignoring faults is unsafe on this CPU. Defaulting to fixup mode.\n");
117 } 117 }
118 118
119 return new_usermode; 119 return new_usermode;
@@ -523,7 +523,7 @@ do_alignment_ldmstm(unsigned long addr, unsigned long instr, struct pt_regs *reg
523 * processor for us. 523 * processor for us.
524 */ 524 */
525 if (addr != eaddr) { 525 if (addr != eaddr) {
526 printk(KERN_ERR "LDMSTM: PC = %08lx, instr = %08lx, " 526 pr_err("LDMSTM: PC = %08lx, instr = %08lx, "
527 "addr = %08lx, eaddr = %08lx\n", 527 "addr = %08lx, eaddr = %08lx\n",
528 instruction_pointer(regs), instr, addr, eaddr); 528 instruction_pointer(regs), instr, addr, eaddr);
529 show_regs(regs); 529 show_regs(regs);
@@ -567,7 +567,7 @@ fault:
567 return TYPE_FAULT; 567 return TYPE_FAULT;
568 568
569bad: 569bad:
570 printk(KERN_ERR "Alignment trap: not handling ldm with s-bit set\n"); 570 pr_err("Alignment trap: not handling ldm with s-bit set\n");
571 return TYPE_ERROR; 571 return TYPE_ERROR;
572} 572}
573 573
@@ -899,13 +899,13 @@ do_alignment(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
899 return 0; 899 return 0;
900 900
901 swp: 901 swp:
902 printk(KERN_ERR "Alignment trap: not handling swp instruction\n"); 902 pr_err("Alignment trap: not handling swp instruction\n");
903 903
904 bad: 904 bad:
905 /* 905 /*
906 * Oops, we didn't handle the instruction. 906 * Oops, we didn't handle the instruction.
907 */ 907 */
908 printk(KERN_ERR "Alignment trap: not handling instruction " 908 pr_err("Alignment trap: not handling instruction "
909 "%0*lx at [<%08lx>]\n", 909 "%0*lx at [<%08lx>]\n",
910 isize << 1, 910 isize << 1,
911 isize == 2 ? tinstr : instr, instrptr); 911 isize == 2 ? tinstr : instr, instrptr);
diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c
index e028a7f2ebcc..097181e08c25 100644
--- a/arch/arm/mm/cache-feroceon-l2.c
+++ b/arch/arm/mm/cache-feroceon-l2.c
@@ -313,7 +313,7 @@ static void __init disable_l2_prefetch(void)
313 */ 313 */
314 u = read_extra_features(); 314 u = read_extra_features();
315 if (!(u & 0x01000000)) { 315 if (!(u & 0x01000000)) {
316 printk(KERN_INFO "Feroceon L2: Disabling L2 prefetch.\n"); 316 pr_info("Feroceon L2: Disabling L2 prefetch.\n");
317 write_extra_features(u | 0x01000000); 317 write_extra_features(u | 0x01000000);
318 } 318 }
319} 319}
@@ -326,7 +326,7 @@ static void __init enable_l2(void)
326 if (!(u & 0x00400000)) { 326 if (!(u & 0x00400000)) {
327 int i, d; 327 int i, d;
328 328
329 printk(KERN_INFO "Feroceon L2: Enabling L2\n"); 329 pr_info("Feroceon L2: Enabling L2\n");
330 330
331 d = flush_and_disable_dcache(); 331 d = flush_and_disable_dcache();
332 i = invalidate_and_disable_icache(); 332 i = invalidate_and_disable_icache();
@@ -353,7 +353,7 @@ void __init feroceon_l2_init(int __l2_wt_override)
353 353
354 enable_l2(); 354 enable_l2();
355 355
356 printk(KERN_INFO "Feroceon L2: Cache support initialised%s.\n", 356 pr_info("Feroceon L2: Cache support initialised%s.\n",
357 l2_wt_override ? ", in WT override mode" : ""); 357 l2_wt_override ? ", in WT override mode" : "");
358} 358}
359#ifdef CONFIG_OF 359#ifdef CONFIG_OF
diff --git a/arch/arm/mm/cache-tauros2.c b/arch/arm/mm/cache-tauros2.c
index b273739e6359..1e373d268c04 100644
--- a/arch/arm/mm/cache-tauros2.c
+++ b/arch/arm/mm/cache-tauros2.c
@@ -185,7 +185,7 @@ static void enable_extra_feature(unsigned int features)
185 u &= ~0x01000000; 185 u &= ~0x01000000;
186 else 186 else
187 u |= 0x01000000; 187 u |= 0x01000000;
188 printk(KERN_INFO "Tauros2: %s L2 prefetch.\n", 188 pr_info("Tauros2: %s L2 prefetch.\n",
189 (features & CACHE_TAUROS2_PREFETCH_ON) 189 (features & CACHE_TAUROS2_PREFETCH_ON)
190 ? "Enabling" : "Disabling"); 190 ? "Enabling" : "Disabling");
191 191
@@ -193,7 +193,7 @@ static void enable_extra_feature(unsigned int features)
193 u |= 0x00100000; 193 u |= 0x00100000;
194 else 194 else
195 u &= ~0x00100000; 195 u &= ~0x00100000;
196 printk(KERN_INFO "Tauros2: %s line fill burt8.\n", 196 pr_info("Tauros2: %s line fill burt8.\n",
197 (features & CACHE_TAUROS2_LINEFILL_BURST8) 197 (features & CACHE_TAUROS2_LINEFILL_BURST8)
198 ? "Enabling" : "Disabling"); 198 ? "Enabling" : "Disabling");
199 199
@@ -216,7 +216,7 @@ static void __init tauros2_internal_init(unsigned int features)
216 */ 216 */
217 feat = read_extra_features(); 217 feat = read_extra_features();
218 if (!(feat & 0x00400000)) { 218 if (!(feat & 0x00400000)) {
219 printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); 219 pr_info("Tauros2: Enabling L2 cache.\n");
220 write_extra_features(feat | 0x00400000); 220 write_extra_features(feat | 0x00400000);
221 } 221 }
222 222
@@ -253,7 +253,7 @@ static void __init tauros2_internal_init(unsigned int features)
253 */ 253 */
254 actlr = read_actlr(); 254 actlr = read_actlr();
255 if (!(actlr & 0x00000002)) { 255 if (!(actlr & 0x00000002)) {
256 printk(KERN_INFO "Tauros2: Enabling L2 cache.\n"); 256 pr_info("Tauros2: Enabling L2 cache.\n");
257 write_actlr(actlr | 0x00000002); 257 write_actlr(actlr | 0x00000002);
258 } 258 }
259 259
@@ -262,11 +262,11 @@ static void __init tauros2_internal_init(unsigned int features)
262#endif 262#endif
263 263
264 if (mode == NULL) { 264 if (mode == NULL) {
265 printk(KERN_CRIT "Tauros2: Unable to detect CPU mode.\n"); 265 pr_crit("Tauros2: Unable to detect CPU mode.\n");
266 return; 266 return;
267 } 267 }
268 268
269 printk(KERN_INFO "Tauros2: L2 cache support initialised " 269 pr_info("Tauros2: L2 cache support initialised "
270 "in %s mode.\n", mode); 270 "in %s mode.\n", mode);
271} 271}
272 272
diff --git a/arch/arm/mm/context.c b/arch/arm/mm/context.c
index 6eb97b3a7481..91892569710f 100644
--- a/arch/arm/mm/context.c
+++ b/arch/arm/mm/context.c
@@ -184,36 +184,46 @@ static u64 new_context(struct mm_struct *mm, unsigned int cpu)
184 u64 asid = atomic64_read(&mm->context.id); 184 u64 asid = atomic64_read(&mm->context.id);
185 u64 generation = atomic64_read(&asid_generation); 185 u64 generation = atomic64_read(&asid_generation);
186 186
187 if (asid != 0 && is_reserved_asid(asid)) { 187 if (asid != 0) {
188 /* 188 /*
189 * Our current ASID was active during a rollover, we can 189 * If our current ASID was active during a rollover, we
190 * continue to use it and this was just a false alarm. 190 * can continue to use it and this was just a false alarm.
191 */ 191 */
192 asid = generation | (asid & ~ASID_MASK); 192 if (is_reserved_asid(asid))
193 } else { 193 return generation | (asid & ~ASID_MASK);
194
194 /* 195 /*
195 * Allocate a free ASID. If we can't find one, take a 196 * We had a valid ASID in a previous life, so try to re-use
196 * note of the currently active ASIDs and mark the TLBs 197 * it if possible.,
197 * as requiring flushes. We always count from ASID #1,
198 * as we reserve ASID #0 to switch via TTBR0 and to
199 * avoid speculative page table walks from hitting in
200 * any partial walk caches, which could be populated
201 * from overlapping level-1 descriptors used to map both
202 * the module area and the userspace stack.
203 */ 198 */
204 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); 199 asid &= ~ASID_MASK;
205 if (asid == NUM_USER_ASIDS) { 200 if (!__test_and_set_bit(asid, asid_map))
206 generation = atomic64_add_return(ASID_FIRST_VERSION, 201 goto bump_gen;
207 &asid_generation);
208 flush_context(cpu);
209 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
210 }
211 __set_bit(asid, asid_map);
212 cur_idx = asid;
213 asid |= generation;
214 cpumask_clear(mm_cpumask(mm));
215 } 202 }
216 203
204 /*
205 * Allocate a free ASID. If we can't find one, take a note of the
206 * currently active ASIDs and mark the TLBs as requiring flushes.
207 * We always count from ASID #1, as we reserve ASID #0 to switch
208 * via TTBR0 and to avoid speculative page table walks from hitting
209 * in any partial walk caches, which could be populated from
210 * overlapping level-1 descriptors used to map both the module
211 * area and the userspace stack.
212 */
213 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx);
214 if (asid == NUM_USER_ASIDS) {
215 generation = atomic64_add_return(ASID_FIRST_VERSION,
216 &asid_generation);
217 flush_context(cpu);
218 asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1);
219 }
220
221 __set_bit(asid, asid_map);
222 cur_idx = asid;
223
224bump_gen:
225 asid |= generation;
226 cpumask_clear(mm_cpumask(mm));
217 return asid; 227 return asid;
218} 228}
219 229
diff --git a/arch/arm/mm/copypage-v6.c b/arch/arm/mm/copypage-v6.c
index b9bcc9d79176..70423345da26 100644
--- a/arch/arm/mm/copypage-v6.c
+++ b/arch/arm/mm/copypage-v6.c
@@ -62,7 +62,7 @@ static void discard_old_kernel_data(void *kto)
62 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06" 62 __asm__("mcrr p15, 0, %1, %0, c6 @ 0xec401f06"
63 : 63 :
64 : "r" (kto), 64 : "r" (kto),
65 "r" ((unsigned long)kto + PAGE_SIZE - L1_CACHE_BYTES) 65 "r" ((unsigned long)kto + PAGE_SIZE - 1)
66 : "cc"); 66 : "cc");
67} 67}
68 68
diff --git a/arch/arm/mm/fault-armv.c b/arch/arm/mm/fault-armv.c
index ff379ac115df..d9e0d00a6699 100644
--- a/arch/arm/mm/fault-armv.c
+++ b/arch/arm/mm/fault-armv.c
@@ -235,7 +235,7 @@ void __init check_writebuffer_bugs(void)
235 const char *reason; 235 const char *reason;
236 unsigned long v = 1; 236 unsigned long v = 1;
237 237
238 printk(KERN_INFO "CPU: Testing write buffer coherency: "); 238 pr_info("CPU: Testing write buffer coherency: ");
239 239
240 page = alloc_page(GFP_KERNEL); 240 page = alloc_page(GFP_KERNEL);
241 if (page) { 241 if (page) {
@@ -261,9 +261,9 @@ void __init check_writebuffer_bugs(void)
261 } 261 }
262 262
263 if (v) { 263 if (v) {
264 printk("failed, %s\n", reason); 264 pr_cont("failed, %s\n", reason);
265 shared_pte_mask = L_PTE_MT_UNCACHED; 265 shared_pte_mask = L_PTE_MT_UNCACHED;
266 } else { 266 } else {
267 printk("ok\n"); 267 pr_cont("ok\n");
268 } 268 }
269} 269}
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index eb8830a4c5ed..a982dc3190df 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -63,9 +63,9 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
63 if (!mm) 63 if (!mm)
64 mm = &init_mm; 64 mm = &init_mm;
65 65
66 printk(KERN_ALERT "pgd = %p\n", mm->pgd); 66 pr_alert("pgd = %p\n", mm->pgd);
67 pgd = pgd_offset(mm, addr); 67 pgd = pgd_offset(mm, addr);
68 printk(KERN_ALERT "[%08lx] *pgd=%08llx", 68 pr_alert("[%08lx] *pgd=%08llx",
69 addr, (long long)pgd_val(*pgd)); 69 addr, (long long)pgd_val(*pgd));
70 70
71 do { 71 do {
@@ -77,31 +77,31 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
77 break; 77 break;
78 78
79 if (pgd_bad(*pgd)) { 79 if (pgd_bad(*pgd)) {
80 printk("(bad)"); 80 pr_cont("(bad)");
81 break; 81 break;
82 } 82 }
83 83
84 pud = pud_offset(pgd, addr); 84 pud = pud_offset(pgd, addr);
85 if (PTRS_PER_PUD != 1) 85 if (PTRS_PER_PUD != 1)
86 printk(", *pud=%08llx", (long long)pud_val(*pud)); 86 pr_cont(", *pud=%08llx", (long long)pud_val(*pud));
87 87
88 if (pud_none(*pud)) 88 if (pud_none(*pud))
89 break; 89 break;
90 90
91 if (pud_bad(*pud)) { 91 if (pud_bad(*pud)) {
92 printk("(bad)"); 92 pr_cont("(bad)");
93 break; 93 break;
94 } 94 }
95 95
96 pmd = pmd_offset(pud, addr); 96 pmd = pmd_offset(pud, addr);
97 if (PTRS_PER_PMD != 1) 97 if (PTRS_PER_PMD != 1)
98 printk(", *pmd=%08llx", (long long)pmd_val(*pmd)); 98 pr_cont(", *pmd=%08llx", (long long)pmd_val(*pmd));
99 99
100 if (pmd_none(*pmd)) 100 if (pmd_none(*pmd))
101 break; 101 break;
102 102
103 if (pmd_bad(*pmd)) { 103 if (pmd_bad(*pmd)) {
104 printk("(bad)"); 104 pr_cont("(bad)");
105 break; 105 break;
106 } 106 }
107 107
@@ -110,15 +110,15 @@ void show_pte(struct mm_struct *mm, unsigned long addr)
110 break; 110 break;
111 111
112 pte = pte_offset_map(pmd, addr); 112 pte = pte_offset_map(pmd, addr);
113 printk(", *pte=%08llx", (long long)pte_val(*pte)); 113 pr_cont(", *pte=%08llx", (long long)pte_val(*pte));
114#ifndef CONFIG_ARM_LPAE 114#ifndef CONFIG_ARM_LPAE
115 printk(", *ppte=%08llx", 115 pr_cont(", *ppte=%08llx",
116 (long long)pte_val(pte[PTE_HWTABLE_PTRS])); 116 (long long)pte_val(pte[PTE_HWTABLE_PTRS]));
117#endif 117#endif
118 pte_unmap(pte); 118 pte_unmap(pte);
119 } while(0); 119 } while(0);
120 120
121 printk("\n"); 121 pr_cont("\n");
122} 122}
123#else /* CONFIG_MMU */ 123#else /* CONFIG_MMU */
124void show_pte(struct mm_struct *mm, unsigned long addr) 124void show_pte(struct mm_struct *mm, unsigned long addr)
@@ -142,10 +142,9 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
142 * No handler, we'll have to terminate things with extreme prejudice. 142 * No handler, we'll have to terminate things with extreme prejudice.
143 */ 143 */
144 bust_spinlocks(1); 144 bust_spinlocks(1);
145 printk(KERN_ALERT 145 pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
146 "Unable to handle kernel %s at virtual address %08lx\n", 146 (addr < PAGE_SIZE) ? "NULL pointer dereference" :
147 (addr < PAGE_SIZE) ? "NULL pointer dereference" : 147 "paging request", addr);
148 "paging request", addr);
149 148
150 show_pte(mm, addr); 149 show_pte(mm, addr);
151 die("Oops", regs, fsr); 150 die("Oops", regs, fsr);
@@ -551,7 +550,7 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
551 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs)) 550 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
552 return; 551 return;
553 552
554 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n", 553 pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
555 inf->name, fsr, addr); 554 inf->name, fsr, addr);
556 555
557 info.si_signo = inf->sig; 556 info.si_signo = inf->sig;
@@ -583,7 +582,7 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
583 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs)) 582 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
584 return; 583 return;
585 584
586 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n", 585 pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
587 inf->name, ifsr, addr); 586 inf->name, ifsr, addr);
588 587
589 info.si_signo = inf->sig; 588 info.si_signo = inf->sig;
diff --git a/arch/arm/mm/flush.c b/arch/arm/mm/flush.c
index 265b836b3bd1..34b66af516ea 100644
--- a/arch/arm/mm/flush.c
+++ b/arch/arm/mm/flush.c
@@ -33,7 +33,7 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
33 asm( "mcrr p15, 0, %1, %0, c14\n" 33 asm( "mcrr p15, 0, %1, %0, c14\n"
34 " mcr p15, 0, %2, c7, c10, 4" 34 " mcr p15, 0, %2, c7, c10, 4"
35 : 35 :
36 : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero) 36 : "r" (to), "r" (to + PAGE_SIZE - 1), "r" (zero)
37 : "cc"); 37 : "cc");
38} 38}
39 39
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index e17ed00828d7..b98895d9fe57 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -18,19 +18,20 @@
18#include <asm/tlbflush.h> 18#include <asm/tlbflush.h>
19#include "mm.h" 19#include "mm.h"
20 20
21pte_t *fixmap_page_table;
22
23static inline void set_fixmap_pte(int idx, pte_t pte) 21static inline void set_fixmap_pte(int idx, pte_t pte)
24{ 22{
25 unsigned long vaddr = __fix_to_virt(idx); 23 unsigned long vaddr = __fix_to_virt(idx);
26 set_pte_ext(fixmap_page_table + idx, pte, 0); 24 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
25
26 set_pte_ext(ptep, pte, 0);
27 local_flush_tlb_kernel_page(vaddr); 27 local_flush_tlb_kernel_page(vaddr);
28} 28}
29 29
30static inline pte_t get_fixmap_pte(unsigned long vaddr) 30static inline pte_t get_fixmap_pte(unsigned long vaddr)
31{ 31{
32 unsigned long idx = __virt_to_fix(vaddr); 32 pte_t *ptep = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
33 return *(fixmap_page_table + idx); 33
34 return *ptep;
34} 35}
35 36
36void *kmap(struct page *page) 37void *kmap(struct page *page)
@@ -84,7 +85,7 @@ void *kmap_atomic(struct page *page)
84 * With debugging enabled, kunmap_atomic forces that entry to 0. 85 * With debugging enabled, kunmap_atomic forces that entry to 0.
85 * Make sure it was indeed properly unmapped. 86 * Make sure it was indeed properly unmapped.
86 */ 87 */
87 BUG_ON(!pte_none(*(fixmap_page_table + idx))); 88 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
88#endif 89#endif
89 /* 90 /*
90 * When debugging is off, kunmap_atomic leaves the previous mapping 91 * When debugging is off, kunmap_atomic leaves the previous mapping
@@ -137,7 +138,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
137 idx = type + KM_TYPE_NR * smp_processor_id(); 138 idx = type + KM_TYPE_NR * smp_processor_id();
138 vaddr = __fix_to_virt(idx); 139 vaddr = __fix_to_virt(idx);
139#ifdef CONFIG_DEBUG_HIGHMEM 140#ifdef CONFIG_DEBUG_HIGHMEM
140 BUG_ON(!pte_none(*(fixmap_page_table + idx))); 141 BUG_ON(!pte_none(get_fixmap_pte(vaddr)));
141#endif 142#endif
142 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot)); 143 set_fixmap_pte(idx, pfn_pte(pfn, kmap_prot));
143 144
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 9481f85c56e6..98ad9c79ea0e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -29,6 +29,7 @@
29#include <asm/prom.h> 29#include <asm/prom.h>
30#include <asm/sections.h> 30#include <asm/sections.h>
31#include <asm/setup.h> 31#include <asm/setup.h>
32#include <asm/system_info.h>
32#include <asm/tlb.h> 33#include <asm/tlb.h>
33#include <asm/fixmap.h> 34#include <asm/fixmap.h>
34 35
@@ -67,7 +68,7 @@ early_param("initrd", early_initrd);
67 68
68static int __init parse_tag_initrd(const struct tag *tag) 69static int __init parse_tag_initrd(const struct tag *tag)
69{ 70{
70 printk(KERN_WARNING "ATAG_INITRD is deprecated; " 71 pr_warn("ATAG_INITRD is deprecated; "
71 "please update your bootloader.\n"); 72 "please update your bootloader.\n");
72 phys_initrd_start = __virt_to_phys(tag->u.initrd.start); 73 phys_initrd_start = __virt_to_phys(tag->u.initrd.start);
73 phys_initrd_size = tag->u.initrd.size; 74 phys_initrd_size = tag->u.initrd.size;
@@ -544,7 +545,7 @@ void __init mem_init(void)
544#define MLM(b, t) b, t, ((t) - (b)) >> 20 545#define MLM(b, t) b, t, ((t) - (b)) >> 20
545#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K) 546#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
546 547
547 printk(KERN_NOTICE "Virtual kernel memory layout:\n" 548 pr_notice("Virtual kernel memory layout:\n"
548 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n" 549 " vector : 0x%08lx - 0x%08lx (%4ld kB)\n"
549#ifdef CONFIG_HAVE_TCM 550#ifdef CONFIG_HAVE_TCM
550 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n" 551 " DTCM : 0x%08lx - 0x%08lx (%4ld kB)\n"
@@ -570,7 +571,7 @@ void __init mem_init(void)
570 MLK(DTCM_OFFSET, (unsigned long) dtcm_end), 571 MLK(DTCM_OFFSET, (unsigned long) dtcm_end),
571 MLK(ITCM_OFFSET, (unsigned long) itcm_end), 572 MLK(ITCM_OFFSET, (unsigned long) itcm_end),
572#endif 573#endif
573 MLK(FIXADDR_START, FIXADDR_TOP), 574 MLK(FIXADDR_START, FIXADDR_END),
574 MLM(VMALLOC_START, VMALLOC_END), 575 MLM(VMALLOC_START, VMALLOC_END),
575 MLM(PAGE_OFFSET, (unsigned long)high_memory), 576 MLM(PAGE_OFFSET, (unsigned long)high_memory),
576#ifdef CONFIG_HIGHMEM 577#ifdef CONFIG_HIGHMEM
@@ -615,7 +616,145 @@ void __init mem_init(void)
615 } 616 }
616} 617}
617 618
618void free_initmem(void) 619#ifdef CONFIG_ARM_KERNMEM_PERMS
620struct section_perm {
621 unsigned long start;
622 unsigned long end;
623 pmdval_t mask;
624 pmdval_t prot;
625 pmdval_t clear;
626};
627
628static struct section_perm nx_perms[] = {
629 /* Make pages tables, etc before _stext RW (set NX). */
630 {
631 .start = PAGE_OFFSET,
632 .end = (unsigned long)_stext,
633 .mask = ~PMD_SECT_XN,
634 .prot = PMD_SECT_XN,
635 },
636 /* Make init RW (set NX). */
637 {
638 .start = (unsigned long)__init_begin,
639 .end = (unsigned long)_sdata,
640 .mask = ~PMD_SECT_XN,
641 .prot = PMD_SECT_XN,
642 },
643#ifdef CONFIG_DEBUG_RODATA
644 /* Make rodata NX (set RO in ro_perms below). */
645 {
646 .start = (unsigned long)__start_rodata,
647 .end = (unsigned long)__init_begin,
648 .mask = ~PMD_SECT_XN,
649 .prot = PMD_SECT_XN,
650 },
651#endif
652};
653
654#ifdef CONFIG_DEBUG_RODATA
655static struct section_perm ro_perms[] = {
656 /* Make kernel code and rodata RX (set RO). */
657 {
658 .start = (unsigned long)_stext,
659 .end = (unsigned long)__init_begin,
660#ifdef CONFIG_ARM_LPAE
661 .mask = ~PMD_SECT_RDONLY,
662 .prot = PMD_SECT_RDONLY,
663#else
664 .mask = ~(PMD_SECT_APX | PMD_SECT_AP_WRITE),
665 .prot = PMD_SECT_APX | PMD_SECT_AP_WRITE,
666 .clear = PMD_SECT_AP_WRITE,
667#endif
668 },
669};
670#endif
671
672/*
673 * Updates section permissions only for the current mm (sections are
674 * copied into each mm). During startup, this is the init_mm. Is only
675 * safe to be called with preemption disabled, as under stop_machine().
676 */
677static inline void section_update(unsigned long addr, pmdval_t mask,
678 pmdval_t prot)
679{
680 struct mm_struct *mm;
681 pmd_t *pmd;
682
683 mm = current->active_mm;
684 pmd = pmd_offset(pud_offset(pgd_offset(mm, addr), addr), addr);
685
686#ifdef CONFIG_ARM_LPAE
687 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
688#else
689 if (addr & SECTION_SIZE)
690 pmd[1] = __pmd((pmd_val(pmd[1]) & mask) | prot);
691 else
692 pmd[0] = __pmd((pmd_val(pmd[0]) & mask) | prot);
693#endif
694 flush_pmd_entry(pmd);
695 local_flush_tlb_kernel_range(addr, addr + SECTION_SIZE);
696}
697
698/* Make sure extended page tables are in use. */
699static inline bool arch_has_strict_perms(void)
700{
701 if (cpu_architecture() < CPU_ARCH_ARMv6)
702 return false;
703
704 return !!(get_cr() & CR_XP);
705}
706
707#define set_section_perms(perms, field) { \
708 size_t i; \
709 unsigned long addr; \
710 \
711 if (!arch_has_strict_perms()) \
712 return; \
713 \
714 for (i = 0; i < ARRAY_SIZE(perms); i++) { \
715 if (!IS_ALIGNED(perms[i].start, SECTION_SIZE) || \
716 !IS_ALIGNED(perms[i].end, SECTION_SIZE)) { \
717 pr_err("BUG: section %lx-%lx not aligned to %lx\n", \
718 perms[i].start, perms[i].end, \
719 SECTION_SIZE); \
720 continue; \
721 } \
722 \
723 for (addr = perms[i].start; \
724 addr < perms[i].end; \
725 addr += SECTION_SIZE) \
726 section_update(addr, perms[i].mask, \
727 perms[i].field); \
728 } \
729}
730
731static inline void fix_kernmem_perms(void)
732{
733 set_section_perms(nx_perms, prot);
734}
735
736#ifdef CONFIG_DEBUG_RODATA
737void mark_rodata_ro(void)
738{
739 set_section_perms(ro_perms, prot);
740}
741
742void set_kernel_text_rw(void)
743{
744 set_section_perms(ro_perms, clear);
745}
746
747void set_kernel_text_ro(void)
748{
749 set_section_perms(ro_perms, prot);
750}
751#endif /* CONFIG_DEBUG_RODATA */
752
753#else
754static inline void fix_kernmem_perms(void) { }
755#endif /* CONFIG_ARM_KERNMEM_PERMS */
756
757void free_tcmmem(void)
619{ 758{
620#ifdef CONFIG_HAVE_TCM 759#ifdef CONFIG_HAVE_TCM
621 extern char __tcm_start, __tcm_end; 760 extern char __tcm_start, __tcm_end;
@@ -623,6 +762,12 @@ void free_initmem(void)
623 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start); 762 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
624 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link"); 763 free_reserved_area(&__tcm_start, &__tcm_end, -1, "TCM link");
625#endif 764#endif
765}
766
767void free_initmem(void)
768{
769 fix_kernmem_perms();
770 free_tcmmem();
626 771
627 poison_init_mem(__init_begin, __init_end - __init_begin); 772 poison_init_mem(__init_begin, __init_end - __init_begin);
628 if (!machine_is_integrator() && !machine_is_cintegrator()) 773 if (!machine_is_integrator() && !machine_is_cintegrator())
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index 9f98cec7fe1e..cda7c40999b6 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -22,6 +22,7 @@
22#include <asm/cputype.h> 22#include <asm/cputype.h>
23#include <asm/sections.h> 23#include <asm/sections.h>
24#include <asm/cachetype.h> 24#include <asm/cachetype.h>
25#include <asm/fixmap.h>
25#include <asm/sections.h> 26#include <asm/sections.h>
26#include <asm/setup.h> 27#include <asm/setup.h>
27#include <asm/smp_plat.h> 28#include <asm/smp_plat.h>
@@ -52,6 +53,8 @@ EXPORT_SYMBOL(empty_zero_page);
52 */ 53 */
53pmd_t *top_pmd; 54pmd_t *top_pmd;
54 55
56pmdval_t user_pmd_table = _PAGE_USER_TABLE;
57
55#define CPOLICY_UNCACHED 0 58#define CPOLICY_UNCACHED 0
56#define CPOLICY_BUFFERED 1 59#define CPOLICY_BUFFERED 1
57#define CPOLICY_WRITETHROUGH 2 60#define CPOLICY_WRITETHROUGH 2
@@ -192,7 +195,7 @@ early_param("cachepolicy", early_cachepolicy);
192static int __init early_nocache(char *__unused) 195static int __init early_nocache(char *__unused)
193{ 196{
194 char *p = "buffered"; 197 char *p = "buffered";
195 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); 198 pr_warn("nocache is deprecated; use cachepolicy=%s\n", p);
196 early_cachepolicy(p); 199 early_cachepolicy(p);
197 return 0; 200 return 0;
198} 201}
@@ -201,7 +204,7 @@ early_param("nocache", early_nocache);
201static int __init early_nowrite(char *__unused) 204static int __init early_nowrite(char *__unused)
202{ 205{
203 char *p = "uncached"; 206 char *p = "uncached";
204 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); 207 pr_warn("nowb is deprecated; use cachepolicy=%s\n", p);
205 early_cachepolicy(p); 208 early_cachepolicy(p);
206 return 0; 209 return 0;
207} 210}
@@ -354,43 +357,28 @@ const struct mem_type *get_mem_type(unsigned int type)
354} 357}
355EXPORT_SYMBOL(get_mem_type); 358EXPORT_SYMBOL(get_mem_type);
356 359
357#define PTE_SET_FN(_name, pteop) \ 360/*
358static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \ 361 * To avoid TLB flush broadcasts, this uses local_flush_tlb_kernel_range().
359 void *data) \ 362 * As a result, this can only be called with preemption disabled, as under
360{ \ 363 * stop_machine().
361 pte_t pte = pteop(*ptep); \ 364 */
362\ 365void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
363 set_pte_ext(ptep, pte, 0); \ 366{
364 return 0; \ 367 unsigned long vaddr = __fix_to_virt(idx);
365} \ 368 pte_t *pte = pte_offset_kernel(pmd_off_k(vaddr), vaddr);
366
367#define SET_MEMORY_FN(_name, callback) \
368int set_memory_##_name(unsigned long addr, int numpages) \
369{ \
370 unsigned long start = addr; \
371 unsigned long size = PAGE_SIZE*numpages; \
372 unsigned end = start + size; \
373\
374 if (start < MODULES_VADDR || start >= MODULES_END) \
375 return -EINVAL;\
376\
377 if (end < MODULES_VADDR || end >= MODULES_END) \
378 return -EINVAL; \
379\
380 apply_to_page_range(&init_mm, start, size, callback, NULL); \
381 flush_tlb_kernel_range(start, end); \
382 return 0;\
383}
384 369
385PTE_SET_FN(ro, pte_wrprotect) 370 /* Make sure fixmap region does not exceed available allocation. */
386PTE_SET_FN(rw, pte_mkwrite) 371 BUILD_BUG_ON(FIXADDR_START + (__end_of_fixed_addresses * PAGE_SIZE) >
387PTE_SET_FN(x, pte_mkexec) 372 FIXADDR_END);
388PTE_SET_FN(nx, pte_mknexec) 373 BUG_ON(idx >= __end_of_fixed_addresses);
389 374
390SET_MEMORY_FN(ro, pte_set_ro) 375 if (pgprot_val(prot))
391SET_MEMORY_FN(rw, pte_set_rw) 376 set_pte_at(NULL, vaddr, pte,
392SET_MEMORY_FN(x, pte_set_x) 377 pfn_pte(phys >> PAGE_SHIFT, prot));
393SET_MEMORY_FN(nx, pte_set_nx) 378 else
379 pte_clear(NULL, vaddr, pte);
380 local_flush_tlb_kernel_range(vaddr, vaddr + PAGE_SIZE);
381}
394 382
395/* 383/*
396 * Adjust the PMD section entries according to the CPU in use. 384 * Adjust the PMD section entries according to the CPU in use.
@@ -528,14 +516,23 @@ static void __init build_mem_type_table(void)
528 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte; 516 hyp_device_pgprot = mem_types[MT_DEVICE].prot_pte;
529 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2; 517 s2_device_pgprot = mem_types[MT_DEVICE].prot_pte_s2;
530 518
519#ifndef CONFIG_ARM_LPAE
531 /* 520 /*
532 * We don't use domains on ARMv6 (since this causes problems with 521 * We don't use domains on ARMv6 (since this causes problems with
533 * v6/v7 kernels), so we must use a separate memory type for user 522 * v6/v7 kernels), so we must use a separate memory type for user
534 * r/o, kernel r/w to map the vectors page. 523 * r/o, kernel r/w to map the vectors page.
535 */ 524 */
536#ifndef CONFIG_ARM_LPAE
537 if (cpu_arch == CPU_ARCH_ARMv6) 525 if (cpu_arch == CPU_ARCH_ARMv6)
538 vecs_pgprot |= L_PTE_MT_VECTORS; 526 vecs_pgprot |= L_PTE_MT_VECTORS;
527
528 /*
529 * Check is it with support for the PXN bit
530 * in the Short-descriptor translation table format descriptors.
531 */
532 if (cpu_arch == CPU_ARCH_ARMv7 &&
533 (read_cpuid_ext(CPUID_EXT_MMFR0) & 0xF) == 4) {
534 user_pmd_table |= PMD_PXNTABLE;
535 }
539#endif 536#endif
540 537
541 /* 538 /*
@@ -605,6 +602,11 @@ static void __init build_mem_type_table(void)
605 } 602 }
606 kern_pgprot |= PTE_EXT_AF; 603 kern_pgprot |= PTE_EXT_AF;
607 vecs_pgprot |= PTE_EXT_AF; 604 vecs_pgprot |= PTE_EXT_AF;
605
606 /*
607 * Set PXN for user mappings
608 */
609 user_pgprot |= PTE_EXT_PXN;
608#endif 610#endif
609 611
610 for (i = 0; i < 16; i++) { 612 for (i = 0; i < 16; i++) {
@@ -786,8 +788,7 @@ static void __init create_36bit_mapping(struct map_desc *md,
786 length = PAGE_ALIGN(md->length); 788 length = PAGE_ALIGN(md->length);
787 789
788 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { 790 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
789 printk(KERN_ERR "MM: CPU does not support supersection " 791 pr_err("MM: CPU does not support supersection mapping for 0x%08llx at 0x%08lx\n",
790 "mapping for 0x%08llx at 0x%08lx\n",
791 (long long)__pfn_to_phys((u64)md->pfn), addr); 792 (long long)__pfn_to_phys((u64)md->pfn), addr);
792 return; 793 return;
793 } 794 }
@@ -799,15 +800,13 @@ static void __init create_36bit_mapping(struct map_desc *md,
799 * of the actual domain assignments in use. 800 * of the actual domain assignments in use.
800 */ 801 */
801 if (type->domain) { 802 if (type->domain) {
802 printk(KERN_ERR "MM: invalid domain in supersection " 803 pr_err("MM: invalid domain in supersection mapping for 0x%08llx at 0x%08lx\n",
803 "mapping for 0x%08llx at 0x%08lx\n",
804 (long long)__pfn_to_phys((u64)md->pfn), addr); 804 (long long)__pfn_to_phys((u64)md->pfn), addr);
805 return; 805 return;
806 } 806 }
807 807
808 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { 808 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
809 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx" 809 pr_err("MM: cannot create mapping for 0x%08llx at 0x%08lx invalid alignment\n",
810 " at 0x%08lx invalid alignment\n",
811 (long long)__pfn_to_phys((u64)md->pfn), addr); 810 (long long)__pfn_to_phys((u64)md->pfn), addr);
812 return; 811 return;
813 } 812 }
@@ -850,18 +849,16 @@ static void __init create_mapping(struct map_desc *md)
850 pgd_t *pgd; 849 pgd_t *pgd;
851 850
852 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { 851 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
853 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx" 852 pr_warn("BUG: not creating mapping for 0x%08llx at 0x%08lx in user region\n",
854 " at 0x%08lx in user region\n", 853 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
855 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
856 return; 854 return;
857 } 855 }
858 856
859 if ((md->type == MT_DEVICE || md->type == MT_ROM) && 857 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
860 md->virtual >= PAGE_OFFSET && 858 md->virtual >= PAGE_OFFSET &&
861 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) { 859 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
862 printk(KERN_WARNING "BUG: mapping for 0x%08llx" 860 pr_warn("BUG: mapping for 0x%08llx at 0x%08lx out of vmalloc space\n",
863 " at 0x%08lx out of vmalloc space\n", 861 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
864 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
865 } 862 }
866 863
867 type = &mem_types[md->type]; 864 type = &mem_types[md->type];
@@ -881,9 +878,8 @@ static void __init create_mapping(struct map_desc *md)
881 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); 878 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
882 879
883 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { 880 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
884 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not " 881 pr_warn("BUG: map for 0x%08llx at 0x%08lx can not be mapped using pages, ignoring.\n",
885 "be mapped using pages, ignoring.\n", 882 (long long)__pfn_to_phys(md->pfn), addr);
886 (long long)__pfn_to_phys(md->pfn), addr);
887 return; 883 return;
888 } 884 }
889 885
@@ -1053,15 +1049,13 @@ static int __init early_vmalloc(char *arg)
1053 1049
1054 if (vmalloc_reserve < SZ_16M) { 1050 if (vmalloc_reserve < SZ_16M) {
1055 vmalloc_reserve = SZ_16M; 1051 vmalloc_reserve = SZ_16M;
1056 printk(KERN_WARNING 1052 pr_warn("vmalloc area too small, limiting to %luMB\n",
1057 "vmalloc area too small, limiting to %luMB\n",
1058 vmalloc_reserve >> 20); 1053 vmalloc_reserve >> 20);
1059 } 1054 }
1060 1055
1061 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { 1056 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
1062 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); 1057 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
1063 printk(KERN_WARNING 1058 pr_warn("vmalloc area is too big, limiting to %luMB\n",
1064 "vmalloc area is too big, limiting to %luMB\n",
1065 vmalloc_reserve >> 20); 1059 vmalloc_reserve >> 20);
1066 } 1060 }
1067 1061
@@ -1094,7 +1088,7 @@ void __init sanity_check_meminfo(void)
1094 1088
1095 if (highmem) { 1089 if (highmem) {
1096 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n", 1090 pr_notice("Ignoring RAM at %pa-%pa (!CONFIG_HIGHMEM)\n",
1097 &block_start, &block_end); 1091 &block_start, &block_end);
1098 memblock_remove(reg->base, reg->size); 1092 memblock_remove(reg->base, reg->size);
1099 continue; 1093 continue;
1100 } 1094 }
@@ -1103,7 +1097,7 @@ void __init sanity_check_meminfo(void)
1103 phys_addr_t overlap_size = reg->size - size_limit; 1097 phys_addr_t overlap_size = reg->size - size_limit;
1104 1098
1105 pr_notice("Truncating RAM at %pa-%pa to -%pa", 1099 pr_notice("Truncating RAM at %pa-%pa to -%pa",
1106 &block_start, &block_end, &vmalloc_limit); 1100 &block_start, &block_end, &vmalloc_limit);
1107 memblock_remove(vmalloc_limit, overlap_size); 1101 memblock_remove(vmalloc_limit, overlap_size);
1108 block_end = vmalloc_limit; 1102 block_end = vmalloc_limit;
1109 } 1103 }
@@ -1326,10 +1320,10 @@ static void __init kmap_init(void)
1326#ifdef CONFIG_HIGHMEM 1320#ifdef CONFIG_HIGHMEM
1327 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE), 1321 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1328 PKMAP_BASE, _PAGE_KERNEL_TABLE); 1322 PKMAP_BASE, _PAGE_KERNEL_TABLE);
1329
1330 fixmap_page_table = early_pte_alloc(pmd_off_k(FIXADDR_START),
1331 FIXADDR_START, _PAGE_KERNEL_TABLE);
1332#endif 1323#endif
1324
1325 early_pte_alloc(pmd_off_k(FIXADDR_START), FIXADDR_START,
1326 _PAGE_KERNEL_TABLE);
1333} 1327}
1334 1328
1335static void __init map_lowmem(void) 1329static void __init map_lowmem(void)
@@ -1349,13 +1343,20 @@ static void __init map_lowmem(void)
1349 if (start >= end) 1343 if (start >= end)
1350 break; 1344 break;
1351 1345
1352 if (end < kernel_x_start || start >= kernel_x_end) { 1346 if (end < kernel_x_start) {
1353 map.pfn = __phys_to_pfn(start); 1347 map.pfn = __phys_to_pfn(start);
1354 map.virtual = __phys_to_virt(start); 1348 map.virtual = __phys_to_virt(start);
1355 map.length = end - start; 1349 map.length = end - start;
1356 map.type = MT_MEMORY_RWX; 1350 map.type = MT_MEMORY_RWX;
1357 1351
1358 create_mapping(&map); 1352 create_mapping(&map);
1353 } else if (start >= kernel_x_end) {
1354 map.pfn = __phys_to_pfn(start);
1355 map.virtual = __phys_to_virt(start);
1356 map.length = end - start;
1357 map.type = MT_MEMORY_RW;
1358
1359 create_mapping(&map);
1359 } else { 1360 } else {
1360 /* This better cover the entire kernel */ 1361 /* This better cover the entire kernel */
1361 if (start < kernel_x_start) { 1362 if (start < kernel_x_start) {
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
new file mode 100644
index 000000000000..004e35cdcfff
--- /dev/null
+++ b/arch/arm/mm/pageattr.c
@@ -0,0 +1,91 @@
1/*
2 * Copyright (c) 2014, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13#include <linux/mm.h>
14#include <linux/module.h>
15
16#include <asm/pgtable.h>
17#include <asm/tlbflush.h>
18
19struct page_change_data {
20 pgprot_t set_mask;
21 pgprot_t clear_mask;
22};
23
24static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
25 void *data)
26{
27 struct page_change_data *cdata = data;
28 pte_t pte = *ptep;
29
30 pte = clear_pte_bit(pte, cdata->clear_mask);
31 pte = set_pte_bit(pte, cdata->set_mask);
32
33 set_pte_ext(ptep, pte, 0);
34 return 0;
35}
36
37static int change_memory_common(unsigned long addr, int numpages,
38 pgprot_t set_mask, pgprot_t clear_mask)
39{
40 unsigned long start = addr;
41 unsigned long size = PAGE_SIZE*numpages;
42 unsigned long end = start + size;
43 int ret;
44 struct page_change_data data;
45
46 if (!IS_ALIGNED(addr, PAGE_SIZE)) {
47 start &= PAGE_MASK;
48 end = start + size;
49 WARN_ON_ONCE(1);
50 }
51
52 if (!is_module_address(start) || !is_module_address(end - 1))
53 return -EINVAL;
54
55 data.set_mask = set_mask;
56 data.clear_mask = clear_mask;
57
58 ret = apply_to_page_range(&init_mm, start, size, change_page_range,
59 &data);
60
61 flush_tlb_kernel_range(start, end);
62 return ret;
63}
64
65int set_memory_ro(unsigned long addr, int numpages)
66{
67 return change_memory_common(addr, numpages,
68 __pgprot(L_PTE_RDONLY),
69 __pgprot(0));
70}
71
72int set_memory_rw(unsigned long addr, int numpages)
73{
74 return change_memory_common(addr, numpages,
75 __pgprot(0),
76 __pgprot(L_PTE_RDONLY));
77}
78
79int set_memory_nx(unsigned long addr, int numpages)
80{
81 return change_memory_common(addr, numpages,
82 __pgprot(L_PTE_XN),
83 __pgprot(0));
84}
85
86int set_memory_x(unsigned long addr, int numpages)
87{
88 return change_memory_common(addr, numpages,
89 __pgprot(0),
90 __pgprot(L_PTE_XN));
91}
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index 22ac2a6fbfe3..8b4ee5e81c14 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -591,9 +591,10 @@ __krait_proc_info:
591 /* 591 /*
592 * Some Krait processors don't indicate support for SDIV and UDIV 592 * Some Krait processors don't indicate support for SDIV and UDIV
593 * instructions in the ARM instruction set, even though they actually 593 * instructions in the ARM instruction set, even though they actually
594 * do support them. 594 * do support them. They also don't indicate support for fused multiply
595 * instructions even though they actually do support them.
595 */ 596 */
596 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV 597 __v7_proc __v7_setup, hwcaps = HWCAP_IDIV | HWCAP_VFPv4
597 .size __krait_proc_info, . - __krait_proc_info 598 .size __krait_proc_info, . - __krait_proc_info
598 599
599 /* 600 /*
diff --git a/arch/arm/nwfpe/fpmodule.c b/arch/arm/nwfpe/fpmodule.c
index 4e729f055a81..ec717c190e2c 100644
--- a/arch/arm/nwfpe/fpmodule.c
+++ b/arch/arm/nwfpe/fpmodule.c
@@ -86,20 +86,20 @@ extern void nwfpe_enter(void);
86static int __init fpe_init(void) 86static int __init fpe_init(void)
87{ 87{
88 if (sizeof(FPA11) > sizeof(union fp_state)) { 88 if (sizeof(FPA11) > sizeof(union fp_state)) {
89 printk(KERN_ERR "nwfpe: bad structure size\n"); 89 pr_err("nwfpe: bad structure size\n");
90 return -EINVAL; 90 return -EINVAL;
91 } 91 }
92 92
93 if (sizeof(FPREG) != 12) { 93 if (sizeof(FPREG) != 12) {
94 printk(KERN_ERR "nwfpe: bad register size\n"); 94 pr_err("nwfpe: bad register size\n");
95 return -EINVAL; 95 return -EINVAL;
96 } 96 }
97 if (fpe_type[0] && strcmp(fpe_type, "nwfpe")) 97 if (fpe_type[0] && strcmp(fpe_type, "nwfpe"))
98 return 0; 98 return 0;
99 99
100 /* Display title, version and copyright information. */ 100 /* Display title, version and copyright information. */
101 printk(KERN_WARNING "NetWinder Floating Point Emulator V0.97 (" 101 pr_info("NetWinder Floating Point Emulator V0.97 ("
102 NWFPE_BITS " precision)\n"); 102 NWFPE_BITS " precision)\n");
103 103
104 thread_register_notifier(&nwfpe_notifier_block); 104 thread_register_notifier(&nwfpe_notifier_block);
105 105
diff --git a/arch/arm/vfp/vfphw.S b/arch/arm/vfp/vfphw.S
index cda654cbf2c2..f74a8f7e5f84 100644
--- a/arch/arm/vfp/vfphw.S
+++ b/arch/arm/vfp/vfphw.S
@@ -197,6 +197,12 @@ look_for_VFP_exceptions:
197 tst r5, #FPSCR_IXE 197 tst r5, #FPSCR_IXE
198 bne process_exception 198 bne process_exception
199 199
200 tst r5, #FPSCR_LENGTH_MASK
201 beq skip
202 orr r1, r1, #FPEXC_DEX
203 b process_exception
204skip:
205
200 @ Fall into hand on to next handler - appropriate coproc instr 206 @ Fall into hand on to next handler - appropriate coproc instr
201 @ not recognised by VFP 207 @ not recognised by VFP
202 208
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 2f37e1d6cb45..f6e4d56eda00 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -738,63 +738,73 @@ static int __init vfp_init(void)
738 vfp_vector = vfp_null_entry; 738 vfp_vector = vfp_null_entry;
739 739
740 pr_info("VFP support v0.3: "); 740 pr_info("VFP support v0.3: ");
741 if (VFP_arch) 741 if (VFP_arch) {
742 pr_cont("not present\n"); 742 pr_cont("not present\n");
743 else if (vfpsid & FPSID_NODOUBLE) { 743 return 0;
744 pr_cont("no double precision support\n"); 744 /* Extract the architecture on CPUID scheme */
745 } else { 745 } else if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) {
746 hotcpu_notifier(vfp_hotplug, 0); 746 VFP_arch = vfpsid & FPSID_CPUID_ARCH_MASK;
747 747 VFP_arch >>= FPSID_ARCH_BIT;
748 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */
749 pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
750 (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
751 (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT,
752 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
753 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
754 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
755
756 vfp_vector = vfp_support_entry;
757
758 thread_register_notifier(&vfp_notifier_block);
759 vfp_pm_init();
760
761 /*
762 * We detected VFP, and the support code is
763 * in place; report VFP support to userspace.
764 */
765 elf_hwcap |= HWCAP_VFP;
766#ifdef CONFIG_VFPv3
767 if (VFP_arch >= 2) {
768 elf_hwcap |= HWCAP_VFPv3;
769
770 /*
771 * Check for VFPv3 D16 and VFPv4 D16. CPUs in
772 * this configuration only have 16 x 64bit
773 * registers.
774 */
775 if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1)
776 elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */
777 else
778 elf_hwcap |= HWCAP_VFPD32;
779 }
780#endif
781 /* 748 /*
782 * Check for the presence of the Advanced SIMD 749 * Check for the presence of the Advanced SIMD
783 * load/store instructions, integer and single 750 * load/store instructions, integer and single
784 * precision floating point operations. Only check 751 * precision floating point operations. Only check
785 * for NEON if the hardware has the MVFR registers. 752 * for NEON if the hardware has the MVFR registers.
786 */ 753 */
787 if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { 754 if (IS_ENABLED(CONFIG_NEON) &&
788#ifdef CONFIG_NEON 755 (fmrx(MVFR1) & 0x000fff00) == 0x00011100)
789 if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) 756 elf_hwcap |= HWCAP_NEON;
790 elf_hwcap |= HWCAP_NEON; 757
791#endif 758 if (IS_ENABLED(CONFIG_VFPv3)) {
792#ifdef CONFIG_VFPv3 759 u32 mvfr0 = fmrx(MVFR0);
760 if (((mvfr0 & MVFR0_DP_MASK) >> MVFR0_DP_BIT) == 0x2 ||
761 ((mvfr0 & MVFR0_SP_MASK) >> MVFR0_SP_BIT) == 0x2) {
762 elf_hwcap |= HWCAP_VFPv3;
763 /*
764 * Check for VFPv3 D16 and VFPv4 D16. CPUs in
765 * this configuration only have 16 x 64bit
766 * registers.
767 */
768 if ((mvfr0 & MVFR0_A_SIMD_MASK) == 1)
769 /* also v4-D16 */
770 elf_hwcap |= HWCAP_VFPv3D16;
771 else
772 elf_hwcap |= HWCAP_VFPD32;
773 }
774
793 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000) 775 if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000)
794 elf_hwcap |= HWCAP_VFPv4; 776 elf_hwcap |= HWCAP_VFPv4;
795#endif
796 } 777 }
778 /* Extract the architecture version on pre-cpuid scheme */
779 } else {
780 if (vfpsid & FPSID_NODOUBLE) {
781 pr_cont("no double precision support\n");
782 return 0;
783 }
784
785 VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT;
797 } 786 }
787
788 hotcpu_notifier(vfp_hotplug, 0);
789
790 vfp_vector = vfp_support_entry;
791
792 thread_register_notifier(&vfp_notifier_block);
793 vfp_pm_init();
794
795 /*
796 * We detected VFP, and the support code is
797 * in place; report VFP support to userspace.
798 */
799 elf_hwcap |= HWCAP_VFP;
800
801 pr_cont("implementor %02x architecture %d part %02x variant %x rev %x\n",
802 (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT,
803 VFP_arch,
804 (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT,
805 (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT,
806 (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT);
807
798 return 0; 808 return 0;
799} 809}
800 810
diff --git a/arch/arm/vfp/vfpsingle.c b/arch/arm/vfp/vfpsingle.c
index 4f96c1617aae..f0465ba0f221 100644
--- a/arch/arm/vfp/vfpsingle.c
+++ b/arch/arm/vfp/vfpsingle.c
@@ -290,7 +290,7 @@ u32 vfp_estimate_sqrt_significand(u32 exponent, u32 significand)
290 u32 z, a; 290 u32 z, a;
291 291
292 if ((significand & 0xc0000000) != 0x40000000) { 292 if ((significand & 0xc0000000) != 0x40000000) {
293 printk(KERN_WARNING "VFP: estimate_sqrt: invalid significand\n"); 293 pr_warn("VFP: estimate_sqrt: invalid significand\n");
294 } 294 }
295 295
296 a = significand << 1; 296 a = significand << 1;
diff --git a/arch/arm/xen/Makefile b/arch/arm/xen/Makefile
index 1f85bfe6b470..12969523414c 100644
--- a/arch/arm/xen/Makefile
+++ b/arch/arm/xen/Makefile
@@ -1 +1 @@
obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o mm32.o obj-y := enlighten.o hypercall.o grant-table.o p2m.o mm.o
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 0e15f011f9c8..c7ca936ebd99 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -261,11 +261,6 @@ static int __init xen_guest_init(void)
261 261
262 xen_setup_features(); 262 xen_setup_features();
263 263
264 if (!xen_feature(XENFEAT_grant_map_identity)) {
265 pr_warn("Please upgrade your Xen.\n"
266 "If your platform has any non-coherent DMA devices, they won't work properly.\n");
267 }
268
269 if (xen_feature(XENFEAT_dom0)) 264 if (xen_feature(XENFEAT_dom0))
270 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED; 265 xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
271 else 266 else
diff --git a/arch/arm/xen/mm.c b/arch/arm/xen/mm.c
index b0e77de99148..351b24a979d4 100644
--- a/arch/arm/xen/mm.c
+++ b/arch/arm/xen/mm.c
@@ -1,6 +1,10 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
1#include <linux/bootmem.h> 3#include <linux/bootmem.h>
2#include <linux/gfp.h> 4#include <linux/gfp.h>
5#include <linux/highmem.h>
3#include <linux/export.h> 6#include <linux/export.h>
7#include <linux/of_address.h>
4#include <linux/slab.h> 8#include <linux/slab.h>
5#include <linux/types.h> 9#include <linux/types.h>
6#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
@@ -8,6 +12,7 @@
8#include <linux/swiotlb.h> 12#include <linux/swiotlb.h>
9 13
10#include <xen/xen.h> 14#include <xen/xen.h>
15#include <xen/interface/grant_table.h>
11#include <xen/interface/memory.h> 16#include <xen/interface/memory.h>
12#include <xen/swiotlb-xen.h> 17#include <xen/swiotlb-xen.h>
13 18
@@ -16,6 +21,114 @@
16#include <asm/xen/hypercall.h> 21#include <asm/xen/hypercall.h>
17#include <asm/xen/interface.h> 22#include <asm/xen/interface.h>
18 23
24enum dma_cache_op {
25 DMA_UNMAP,
26 DMA_MAP,
27};
28static bool hypercall_cflush = false;
29
30/* functions called by SWIOTLB */
31
32static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
33 size_t size, enum dma_data_direction dir, enum dma_cache_op op)
34{
35 struct gnttab_cache_flush cflush;
36 unsigned long pfn;
37 size_t left = size;
38
39 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
40 offset %= PAGE_SIZE;
41
42 do {
43 size_t len = left;
44
45 /* buffers in highmem or foreign pages cannot cross page
46 * boundaries */
47 if (len + offset > PAGE_SIZE)
48 len = PAGE_SIZE - offset;
49
50 cflush.op = 0;
51 cflush.a.dev_bus_addr = pfn << PAGE_SHIFT;
52 cflush.offset = offset;
53 cflush.length = len;
54
55 if (op == DMA_UNMAP && dir != DMA_TO_DEVICE)
56 cflush.op = GNTTAB_CACHE_INVAL;
57 if (op == DMA_MAP) {
58 if (dir == DMA_FROM_DEVICE)
59 cflush.op = GNTTAB_CACHE_INVAL;
60 else
61 cflush.op = GNTTAB_CACHE_CLEAN;
62 }
63 if (cflush.op)
64 HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1);
65
66 offset = 0;
67 pfn++;
68 left -= len;
69 } while (left);
70}
71
72static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
73 size_t size, enum dma_data_direction dir)
74{
75 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_UNMAP);
76}
77
78static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
79 size_t size, enum dma_data_direction dir)
80{
81 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, DMA_MAP);
82}
83
84void __xen_dma_map_page(struct device *hwdev, struct page *page,
85 dma_addr_t dev_addr, unsigned long offset, size_t size,
86 enum dma_data_direction dir, struct dma_attrs *attrs)
87{
88 if (is_device_dma_coherent(hwdev))
89 return;
90 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
91 return;
92
93 __xen_dma_page_cpu_to_dev(hwdev, dev_addr, size, dir);
94}
95
96void __xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
97 size_t size, enum dma_data_direction dir,
98 struct dma_attrs *attrs)
99
100{
101 if (is_device_dma_coherent(hwdev))
102 return;
103 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
104 return;
105
106 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
107}
108
109void __xen_dma_sync_single_for_cpu(struct device *hwdev,
110 dma_addr_t handle, size_t size, enum dma_data_direction dir)
111{
112 if (is_device_dma_coherent(hwdev))
113 return;
114 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
115}
116
117void __xen_dma_sync_single_for_device(struct device *hwdev,
118 dma_addr_t handle, size_t size, enum dma_data_direction dir)
119{
120 if (is_device_dma_coherent(hwdev))
121 return;
122 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
123}
124
125bool xen_arch_need_swiotlb(struct device *dev,
126 unsigned long pfn,
127 unsigned long mfn)
128{
129 return (!hypercall_cflush && (pfn != mfn) && !is_device_dma_coherent(dev));
130}
131
19int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, 132int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
20 unsigned int address_bits, 133 unsigned int address_bits,
21 dma_addr_t *dma_handle) 134 dma_addr_t *dma_handle)
@@ -56,10 +169,18 @@ static struct dma_map_ops xen_swiotlb_dma_ops = {
56 169
57int __init xen_mm_init(void) 170int __init xen_mm_init(void)
58{ 171{
172 struct gnttab_cache_flush cflush;
59 if (!xen_initial_domain()) 173 if (!xen_initial_domain())
60 return 0; 174 return 0;
61 xen_swiotlb_init(1, false); 175 xen_swiotlb_init(1, false);
62 xen_dma_ops = &xen_swiotlb_dma_ops; 176 xen_dma_ops = &xen_swiotlb_dma_ops;
177
178 cflush.op = 0;
179 cflush.a.dev_bus_addr = 0;
180 cflush.offset = 0;
181 cflush.length = 0;
182 if (HYPERVISOR_grant_table_op(GNTTABOP_cache_flush, &cflush, 1) != -ENOSYS)
183 hypercall_cflush = true;
63 return 0; 184 return 0;
64} 185}
65arch_initcall(xen_mm_init); 186arch_initcall(xen_mm_init);
diff --git a/arch/arm/xen/mm32.c b/arch/arm/xen/mm32.c
deleted file mode 100644
index 3b99860fd7ae..000000000000
--- a/arch/arm/xen/mm32.c
+++ /dev/null
@@ -1,202 +0,0 @@
1#include <linux/cpu.h>
2#include <linux/dma-mapping.h>
3#include <linux/gfp.h>
4#include <linux/highmem.h>
5
6#include <xen/features.h>
7
8static DEFINE_PER_CPU(unsigned long, xen_mm32_scratch_virt);
9static DEFINE_PER_CPU(pte_t *, xen_mm32_scratch_ptep);
10
11static int alloc_xen_mm32_scratch_page(int cpu)
12{
13 struct page *page;
14 unsigned long virt;
15 pmd_t *pmdp;
16 pte_t *ptep;
17
18 if (per_cpu(xen_mm32_scratch_ptep, cpu) != NULL)
19 return 0;
20
21 page = alloc_page(GFP_KERNEL);
22 if (page == NULL) {
23 pr_warn("Failed to allocate xen_mm32_scratch_page for cpu %d\n", cpu);
24 return -ENOMEM;
25 }
26
27 virt = (unsigned long)__va(page_to_phys(page));
28 pmdp = pmd_offset(pud_offset(pgd_offset_k(virt), virt), virt);
29 ptep = pte_offset_kernel(pmdp, virt);
30
31 per_cpu(xen_mm32_scratch_virt, cpu) = virt;
32 per_cpu(xen_mm32_scratch_ptep, cpu) = ptep;
33
34 return 0;
35}
36
37static int xen_mm32_cpu_notify(struct notifier_block *self,
38 unsigned long action, void *hcpu)
39{
40 int cpu = (long)hcpu;
41 switch (action) {
42 case CPU_UP_PREPARE:
43 if (alloc_xen_mm32_scratch_page(cpu))
44 return NOTIFY_BAD;
45 break;
46 default:
47 break;
48 }
49 return NOTIFY_OK;
50}
51
52static struct notifier_block xen_mm32_cpu_notifier = {
53 .notifier_call = xen_mm32_cpu_notify,
54};
55
56static void* xen_mm32_remap_page(dma_addr_t handle)
57{
58 unsigned long virt = get_cpu_var(xen_mm32_scratch_virt);
59 pte_t *ptep = __get_cpu_var(xen_mm32_scratch_ptep);
60
61 *ptep = pfn_pte(handle >> PAGE_SHIFT, PAGE_KERNEL);
62 local_flush_tlb_kernel_page(virt);
63
64 return (void*)virt;
65}
66
67static void xen_mm32_unmap(void *vaddr)
68{
69 put_cpu_var(xen_mm32_scratch_virt);
70}
71
72
73/* functions called by SWIOTLB */
74
75static void dma_cache_maint(dma_addr_t handle, unsigned long offset,
76 size_t size, enum dma_data_direction dir,
77 void (*op)(const void *, size_t, int))
78{
79 unsigned long pfn;
80 size_t left = size;
81
82 pfn = (handle >> PAGE_SHIFT) + offset / PAGE_SIZE;
83 offset %= PAGE_SIZE;
84
85 do {
86 size_t len = left;
87 void *vaddr;
88
89 if (!pfn_valid(pfn))
90 {
91 /* Cannot map the page, we don't know its physical address.
92 * Return and hope for the best */
93 if (!xen_feature(XENFEAT_grant_map_identity))
94 return;
95 vaddr = xen_mm32_remap_page(handle) + offset;
96 op(vaddr, len, dir);
97 xen_mm32_unmap(vaddr - offset);
98 } else {
99 struct page *page = pfn_to_page(pfn);
100
101 if (PageHighMem(page)) {
102 if (len + offset > PAGE_SIZE)
103 len = PAGE_SIZE - offset;
104
105 if (cache_is_vipt_nonaliasing()) {
106 vaddr = kmap_atomic(page);
107 op(vaddr + offset, len, dir);
108 kunmap_atomic(vaddr);
109 } else {
110 vaddr = kmap_high_get(page);
111 if (vaddr) {
112 op(vaddr + offset, len, dir);
113 kunmap_high(page);
114 }
115 }
116 } else {
117 vaddr = page_address(page) + offset;
118 op(vaddr, len, dir);
119 }
120 }
121
122 offset = 0;
123 pfn++;
124 left -= len;
125 } while (left);
126}
127
128static void __xen_dma_page_dev_to_cpu(struct device *hwdev, dma_addr_t handle,
129 size_t size, enum dma_data_direction dir)
130{
131 /* Cannot use __dma_page_dev_to_cpu because we don't have a
132 * struct page for handle */
133
134 if (dir != DMA_TO_DEVICE)
135 outer_inv_range(handle, handle + size);
136
137 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_unmap_area);
138}
139
140static void __xen_dma_page_cpu_to_dev(struct device *hwdev, dma_addr_t handle,
141 size_t size, enum dma_data_direction dir)
142{
143
144 dma_cache_maint(handle & PAGE_MASK, handle & ~PAGE_MASK, size, dir, dmac_map_area);
145
146 if (dir == DMA_FROM_DEVICE) {
147 outer_inv_range(handle, handle + size);
148 } else {
149 outer_clean_range(handle, handle + size);
150 }
151}
152
153void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
154 size_t size, enum dma_data_direction dir,
155 struct dma_attrs *attrs)
156
157{
158 if (!__generic_dma_ops(hwdev)->unmap_page)
159 return;
160 if (dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
161 return;
162
163 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
164}
165
166void xen_dma_sync_single_for_cpu(struct device *hwdev,
167 dma_addr_t handle, size_t size, enum dma_data_direction dir)
168{
169 if (!__generic_dma_ops(hwdev)->sync_single_for_cpu)
170 return;
171 __xen_dma_page_dev_to_cpu(hwdev, handle, size, dir);
172}
173
174void xen_dma_sync_single_for_device(struct device *hwdev,
175 dma_addr_t handle, size_t size, enum dma_data_direction dir)
176{
177 if (!__generic_dma_ops(hwdev)->sync_single_for_device)
178 return;
179 __xen_dma_page_cpu_to_dev(hwdev, handle, size, dir);
180}
181
182int __init xen_mm32_init(void)
183{
184 int cpu;
185
186 if (!xen_initial_domain())
187 return 0;
188
189 register_cpu_notifier(&xen_mm32_cpu_notifier);
190 get_online_cpus();
191 for_each_online_cpu(cpu) {
192 if (alloc_xen_mm32_scratch_page(cpu)) {
193 put_online_cpus();
194 unregister_cpu_notifier(&xen_mm32_cpu_notifier);
195 return -ENOMEM;
196 }
197 }
198 put_online_cpus();
199
200 return 0;
201}
202arch_initcall(xen_mm32_init);
diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h
index cf98b362094b..243ef256b8c9 100644
--- a/arch/arm64/include/asm/device.h
+++ b/arch/arm64/include/asm/device.h
@@ -21,6 +21,7 @@ struct dev_archdata {
21#ifdef CONFIG_IOMMU_API 21#ifdef CONFIG_IOMMU_API
22 void *iommu; /* private IOMMU data */ 22 void *iommu; /* private IOMMU data */
23#endif 23#endif
24 bool dma_coherent;
24}; 25};
25 26
26struct pdev_archdata { 27struct pdev_archdata {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index adeae3f6f0fc..d34189bceff7 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -54,11 +54,18 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
54 54
55static inline int set_arch_dma_coherent_ops(struct device *dev) 55static inline int set_arch_dma_coherent_ops(struct device *dev)
56{ 56{
57 dev->archdata.dma_coherent = true;
57 set_dma_ops(dev, &coherent_swiotlb_dma_ops); 58 set_dma_ops(dev, &coherent_swiotlb_dma_ops);
58 return 0; 59 return 0;
59} 60}
60#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops 61#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops
61 62
63/* do not use this function in a driver */
64static inline bool is_device_dma_coherent(struct device *dev)
65{
66 return dev->archdata.dma_coherent;
67}
68
62#include <asm-generic/dma-mapping-common.h> 69#include <asm-generic/dma-mapping-common.h>
63 70
64static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 71static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
diff --git a/arch/arm64/include/asm/xen/page-coherent.h b/arch/arm64/include/asm/xen/page-coherent.h
index dde3fc9c49f0..2052102b4e02 100644
--- a/arch/arm64/include/asm/xen/page-coherent.h
+++ b/arch/arm64/include/asm/xen/page-coherent.h
@@ -1,43 +1 @@
1#ifndef _ASM_ARM64_XEN_PAGE_COHERENT_H #include <../../arm/include/asm/xen/page-coherent.h>
2#define _ASM_ARM64_XEN_PAGE_COHERENT_H
3
4#include <asm/page.h>
5#include <linux/dma-attrs.h>
6#include <linux/dma-mapping.h>
7
8static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
9 dma_addr_t *dma_handle, gfp_t flags,
10 struct dma_attrs *attrs)
11{
12 return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
13}
14
15static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
16 void *cpu_addr, dma_addr_t dma_handle,
17 struct dma_attrs *attrs)
18{
19 __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
20}
21
22static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
23 unsigned long offset, size_t size, enum dma_data_direction dir,
24 struct dma_attrs *attrs)
25{
26}
27
28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
29 size_t size, enum dma_data_direction dir,
30 struct dma_attrs *attrs)
31{
32}
33
34static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
35 dma_addr_t handle, size_t size, enum dma_data_direction dir)
36{
37}
38
39static inline void xen_dma_sync_single_for_device(struct device *hwdev,
40 dma_addr_t handle, size_t size, enum dma_data_direction dir)
41{
42}
43#endif /* _ASM_ARM64_XEN_PAGE_COHERENT_H */
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 663da771580a..3425f311c49e 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -511,7 +511,7 @@ static int cpu_psci_cpu_kill(unsigned int cpu)
511 511
512static int psci_suspend_finisher(unsigned long index) 512static int psci_suspend_finisher(unsigned long index)
513{ 513{
514 struct psci_power_state *state = __get_cpu_var(psci_power_state); 514 struct psci_power_state *state = __this_cpu_read(psci_power_state);
515 515
516 return psci_ops.cpu_suspend(state[index - 1], 516 return psci_ops.cpu_suspend(state[index - 1],
517 virt_to_phys(cpu_resume)); 517 virt_to_phys(cpu_resume));
@@ -520,7 +520,7 @@ static int psci_suspend_finisher(unsigned long index)
520static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index) 520static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
521{ 521{
522 int ret; 522 int ret;
523 struct psci_power_state *state = __get_cpu_var(psci_power_state); 523 struct psci_power_state *state = __this_cpu_read(psci_power_state);
524 /* 524 /*
525 * idle state index 0 corresponds to wfi, should never be called 525 * idle state index 0 corresponds to wfi, should never be called
526 * from the cpu_suspend operations 526 * from the cpu_suspend operations
diff --git a/arch/mips/Kbuild.platforms b/arch/mips/Kbuild.platforms
index f5e18bf3275e..e5fc463b36d0 100644
--- a/arch/mips/Kbuild.platforms
+++ b/arch/mips/Kbuild.platforms
@@ -2,7 +2,9 @@
2 2
3platforms += alchemy 3platforms += alchemy
4platforms += ar7 4platforms += ar7
5platforms += ath25
5platforms += ath79 6platforms += ath79
7platforms += bcm3384
6platforms += bcm47xx 8platforms += bcm47xx
7platforms += bcm63xx 9platforms += bcm63xx
8platforms += cavium-octeon 10platforms += cavium-octeon
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 9536ef912f59..3289969ee423 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -53,6 +53,7 @@ config MIPS
53 select HAVE_CC_STACKPROTECTOR 53 select HAVE_CC_STACKPROTECTOR
54 select CPU_PM if CPU_IDLE 54 select CPU_PM if CPU_IDLE
55 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 55 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
56 select ARCH_BINFMT_ELF_STATE
56 57
57menu "Machine selection" 58menu "Machine selection"
58 59
@@ -62,7 +63,7 @@ choice
62 63
63config MIPS_ALCHEMY 64config MIPS_ALCHEMY
64 bool "Alchemy processor based machines" 65 bool "Alchemy processor based machines"
65 select 64BIT_PHYS_ADDR 66 select ARCH_PHYS_ADDR_T_64BIT
66 select CEVT_R4K 67 select CEVT_R4K
67 select CSRC_R4K 68 select CSRC_R4K
68 select IRQ_CPU 69 select IRQ_CPU
@@ -96,6 +97,20 @@ config AR7
96 Support for the Texas Instruments AR7 System-on-a-Chip 97 Support for the Texas Instruments AR7 System-on-a-Chip
97 family: TNETD7100, 7200 and 7300. 98 family: TNETD7100, 7200 and 7300.
98 99
100config ATH25
101 bool "Atheros AR231x/AR531x SoC support"
102 select CEVT_R4K
103 select CSRC_R4K
104 select DMA_NONCOHERENT
105 select IRQ_CPU
106 select IRQ_DOMAIN
107 select SYS_HAS_CPU_MIPS32_R1
108 select SYS_SUPPORTS_BIG_ENDIAN
109 select SYS_SUPPORTS_32BIT_KERNEL
110 select SYS_HAS_EARLY_PRINTK
111 help
112 Support for Atheros AR231x and Atheros AR531x based boards
113
99config ATH79 114config ATH79
100 bool "Atheros AR71XX/AR724X/AR913X based boards" 115 bool "Atheros AR71XX/AR724X/AR913X based boards"
101 select ARCH_REQUIRE_GPIOLIB 116 select ARCH_REQUIRE_GPIOLIB
@@ -115,6 +130,32 @@ config ATH79
115 help 130 help
116 Support for the Atheros AR71XX/AR724X/AR913X SoCs. 131 Support for the Atheros AR71XX/AR724X/AR913X SoCs.
117 132
133config BCM3384
134 bool "Broadcom BCM3384 based boards"
135 select BOOT_RAW
136 select NO_EXCEPT_FILL
137 select USE_OF
138 select CEVT_R4K
139 select CSRC_R4K
140 select SYNC_R4K
141 select COMMON_CLK
142 select DMA_NONCOHERENT
143 select IRQ_CPU
144 select SYS_SUPPORTS_32BIT_KERNEL
145 select SYS_SUPPORTS_BIG_ENDIAN
146 select SYS_SUPPORTS_HIGHMEM
147 select SYS_HAS_CPU_BMIPS5000
148 select SWAP_IO_SPACE
149 select USB_EHCI_BIG_ENDIAN_DESC
150 select USB_EHCI_BIG_ENDIAN_MMIO
151 select USB_OHCI_BIG_ENDIAN_DESC
152 select USB_OHCI_BIG_ENDIAN_MMIO
153 help
154 Support for BCM3384 based boards. BCM3384/BCM33843 is a cable modem
155 chipset with a Linux application processor that is often used to
156 provide Samba services, a CUPS print server, and/or advanced routing
157 features.
158
118config BCM47XX 159config BCM47XX
119 bool "Broadcom BCM47XX based boards" 160 bool "Broadcom BCM47XX based boards"
120 select ARCH_WANT_OPTIONAL_GPIOLIB 161 select ARCH_WANT_OPTIONAL_GPIOLIB
@@ -269,6 +310,8 @@ config LANTIQ
269 select USE_OF 310 select USE_OF
270 select PINCTRL 311 select PINCTRL
271 select PINCTRL_LANTIQ 312 select PINCTRL_LANTIQ
313 select ARCH_HAS_RESET_CONTROLLER
314 select RESET_CONTROLLER
272 315
273config LASAT 316config LASAT
274 bool "LASAT Networks platforms" 317 bool "LASAT Networks platforms"
@@ -315,17 +358,18 @@ config MIPS_MALTA
315 select BOOT_RAW 358 select BOOT_RAW
316 select CEVT_R4K 359 select CEVT_R4K
317 select CSRC_R4K 360 select CSRC_R4K
318 select CSRC_GIC 361 select CLKSRC_MIPS_GIC
319 select DMA_MAYBE_COHERENT 362 select DMA_MAYBE_COHERENT
320 select GENERIC_ISA_DMA 363 select GENERIC_ISA_DMA
321 select HAVE_PCSPKR_PLATFORM 364 select HAVE_PCSPKR_PLATFORM
322 select IRQ_CPU 365 select IRQ_CPU
323 select IRQ_GIC 366 select MIPS_GIC
324 select HW_HAS_PCI 367 select HW_HAS_PCI
325 select I8253 368 select I8253
326 select I8259 369 select I8259
327 select MIPS_BONITO64 370 select MIPS_BONITO64
328 select MIPS_CPU_SCACHE 371 select MIPS_CPU_SCACHE
372 select MIPS_L1_CACHE_SHIFT_6
329 select PCI_GT64XXX_PCI0 373 select PCI_GT64XXX_PCI0
330 select MIPS_MSC 374 select MIPS_MSC
331 select SWAP_IO_SPACE 375 select SWAP_IO_SPACE
@@ -340,6 +384,7 @@ config MIPS_MALTA
340 select SYS_SUPPORTS_64BIT_KERNEL 384 select SYS_SUPPORTS_64BIT_KERNEL
341 select SYS_SUPPORTS_BIG_ENDIAN 385 select SYS_SUPPORTS_BIG_ENDIAN
342 select SYS_SUPPORTS_LITTLE_ENDIAN 386 select SYS_SUPPORTS_LITTLE_ENDIAN
387 select SYS_SUPPORTS_MICROMIPS
343 select SYS_SUPPORTS_MIPS_CMP 388 select SYS_SUPPORTS_MIPS_CMP
344 select SYS_SUPPORTS_MIPS_CPS 389 select SYS_SUPPORTS_MIPS_CPS
345 select SYS_SUPPORTS_MIPS16 390 select SYS_SUPPORTS_MIPS16
@@ -357,12 +402,12 @@ config MIPS_SEAD3
357 select BUILTIN_DTB 402 select BUILTIN_DTB
358 select CEVT_R4K 403 select CEVT_R4K
359 select CSRC_R4K 404 select CSRC_R4K
360 select CSRC_GIC 405 select CLKSRC_MIPS_GIC
361 select CPU_MIPSR2_IRQ_VI 406 select CPU_MIPSR2_IRQ_VI
362 select CPU_MIPSR2_IRQ_EI 407 select CPU_MIPSR2_IRQ_EI
363 select DMA_NONCOHERENT 408 select DMA_NONCOHERENT
364 select IRQ_CPU 409 select IRQ_CPU
365 select IRQ_GIC 410 select MIPS_GIC
366 select LIBFDT 411 select LIBFDT
367 select MIPS_MSC 412 select MIPS_MSC
368 select SYS_HAS_CPU_MIPS32_R1 413 select SYS_HAS_CPU_MIPS32_R1
@@ -726,7 +771,7 @@ config MIKROTIK_RB532
726config CAVIUM_OCTEON_SOC 771config CAVIUM_OCTEON_SOC
727 bool "Cavium Networks Octeon SoC based boards" 772 bool "Cavium Networks Octeon SoC based boards"
728 select CEVT_R4K 773 select CEVT_R4K
729 select 64BIT_PHYS_ADDR 774 select ARCH_PHYS_ADDR_T_64BIT
730 select DMA_COHERENT 775 select DMA_COHERENT
731 select SYS_SUPPORTS_64BIT_KERNEL 776 select SYS_SUPPORTS_64BIT_KERNEL
732 select SYS_SUPPORTS_BIG_ENDIAN 777 select SYS_SUPPORTS_BIG_ENDIAN
@@ -768,7 +813,7 @@ config NLM_XLR_BOARD
768 select SWAP_IO_SPACE 813 select SWAP_IO_SPACE
769 select SYS_SUPPORTS_32BIT_KERNEL 814 select SYS_SUPPORTS_32BIT_KERNEL
770 select SYS_SUPPORTS_64BIT_KERNEL 815 select SYS_SUPPORTS_64BIT_KERNEL
771 select 64BIT_PHYS_ADDR 816 select ARCH_PHYS_ADDR_T_64BIT
772 select SYS_SUPPORTS_BIG_ENDIAN 817 select SYS_SUPPORTS_BIG_ENDIAN
773 select SYS_SUPPORTS_HIGHMEM 818 select SYS_SUPPORTS_HIGHMEM
774 select DMA_COHERENT 819 select DMA_COHERENT
@@ -794,7 +839,7 @@ config NLM_XLP_BOARD
794 select HW_HAS_PCI 839 select HW_HAS_PCI
795 select SYS_SUPPORTS_32BIT_KERNEL 840 select SYS_SUPPORTS_32BIT_KERNEL
796 select SYS_SUPPORTS_64BIT_KERNEL 841 select SYS_SUPPORTS_64BIT_KERNEL
797 select 64BIT_PHYS_ADDR 842 select ARCH_PHYS_ADDR_T_64BIT
798 select SYS_SUPPORTS_BIG_ENDIAN 843 select SYS_SUPPORTS_BIG_ENDIAN
799 select SYS_SUPPORTS_LITTLE_ENDIAN 844 select SYS_SUPPORTS_LITTLE_ENDIAN
800 select SYS_SUPPORTS_HIGHMEM 845 select SYS_SUPPORTS_HIGHMEM
@@ -835,6 +880,7 @@ config MIPS_PARAVIRT
835endchoice 880endchoice
836 881
837source "arch/mips/alchemy/Kconfig" 882source "arch/mips/alchemy/Kconfig"
883source "arch/mips/ath25/Kconfig"
838source "arch/mips/ath79/Kconfig" 884source "arch/mips/ath79/Kconfig"
839source "arch/mips/bcm47xx/Kconfig" 885source "arch/mips/bcm47xx/Kconfig"
840source "arch/mips/bcm63xx/Kconfig" 886source "arch/mips/bcm63xx/Kconfig"
@@ -907,10 +953,6 @@ config CEVT_GT641XX
907config CEVT_R4K 953config CEVT_R4K
908 bool 954 bool
909 955
910config CEVT_GIC
911 select MIPS_CM
912 bool
913
914config CEVT_SB1250 956config CEVT_SB1250
915 bool 957 bool
916 958
@@ -926,10 +968,6 @@ config CSRC_IOASIC
926config CSRC_R4K 968config CSRC_R4K
927 bool 969 bool
928 970
929config CSRC_GIC
930 select MIPS_CM
931 bool
932
933config CSRC_SB1250 971config CSRC_SB1250
934 bool 972 bool
935 973
@@ -941,7 +979,7 @@ config FW_CFE
941 bool 979 bool
942 980
943config ARCH_DMA_ADDR_T_64BIT 981config ARCH_DMA_ADDR_T_64BIT
944 def_bool (HIGHMEM && 64BIT_PHYS_ADDR) || 64BIT 982 def_bool (HIGHMEM && ARCH_PHYS_ADDR_T_64BIT) || 64BIT
945 983
946config DMA_MAYBE_COHERENT 984config DMA_MAYBE_COHERENT
947 select DMA_NONCOHERENT 985 select DMA_NONCOHERENT
@@ -975,6 +1013,7 @@ config SYS_SUPPORTS_HOTPLUG_CPU
975 1013
976config I8259 1014config I8259
977 bool 1015 bool
1016 select IRQ_DOMAIN
978 1017
979config MIPS_BONITO64 1018config MIPS_BONITO64
980 bool 1019 bool
@@ -1055,6 +1094,7 @@ config MIPS_HUGE_TLB_SUPPORT
1055 1094
1056config IRQ_CPU 1095config IRQ_CPU
1057 bool 1096 bool
1097 select IRQ_DOMAIN
1058 1098
1059config IRQ_CPU_RM7K 1099config IRQ_CPU_RM7K
1060 bool 1100 bool
@@ -1071,10 +1111,6 @@ config IRQ_TXX9
1071config IRQ_GT641XX 1111config IRQ_GT641XX
1072 bool 1112 bool
1073 1113
1074config IRQ_GIC
1075 select MIPS_CM
1076 bool
1077
1078config PCI_GT64XXX_PCI0 1114config PCI_GT64XXX_PCI0
1079 bool 1115 bool
1080 1116
@@ -1574,6 +1610,7 @@ config CPU_LOONGSON1
1574 select CPU_HAS_PREFETCH 1610 select CPU_HAS_PREFETCH
1575 select CPU_SUPPORTS_32BIT_KERNEL 1611 select CPU_SUPPORTS_32BIT_KERNEL
1576 select CPU_SUPPORTS_HIGHMEM 1612 select CPU_SUPPORTS_HIGHMEM
1613 select CPU_SUPPORTS_CPUFREQ
1577 1614
1578config CPU_BMIPS32_3300 1615config CPU_BMIPS32_3300
1579 select SMP_UP if SMP 1616 select SMP_UP if SMP
@@ -1586,12 +1623,14 @@ config CPU_BMIPS4350
1586 1623
1587config CPU_BMIPS4380 1624config CPU_BMIPS4380
1588 bool 1625 bool
1626 select MIPS_L1_CACHE_SHIFT_6
1589 select SYS_SUPPORTS_SMP 1627 select SYS_SUPPORTS_SMP
1590 select SYS_SUPPORTS_HOTPLUG_CPU 1628 select SYS_SUPPORTS_HOTPLUG_CPU
1591 1629
1592config CPU_BMIPS5000 1630config CPU_BMIPS5000
1593 bool 1631 bool
1594 select MIPS_CPU_SCACHE 1632 select MIPS_CPU_SCACHE
1633 select MIPS_L1_CACHE_SHIFT_7
1595 select SYS_SUPPORTS_SMP 1634 select SYS_SUPPORTS_SMP
1596 select SYS_SUPPORTS_HOTPLUG_CPU 1635 select SYS_SUPPORTS_HOTPLUG_CPU
1597 1636
@@ -1886,15 +1925,6 @@ config FORCE_MAX_ZONEORDER
1886 The page size is not necessarily 4KB. Keep this in mind 1925 The page size is not necessarily 4KB. Keep this in mind
1887 when choosing a value for this option. 1926 when choosing a value for this option.
1888 1927
1889config CEVT_GIC
1890 bool "Use GIC global counter for clock events"
1891 depends on IRQ_GIC && !MIPS_SEAD3
1892 help
1893 Use the GIC global counter for the clock events. The R4K clock
1894 event driver is always present, so if the platform ends up not
1895 detecting a GIC, it will fall back to the R4K timer for the
1896 generation of clock events.
1897
1898config BOARD_SCACHE 1928config BOARD_SCACHE
1899 bool 1929 bool
1900 1930
@@ -1908,7 +1938,6 @@ config IP22_CPU_SCACHE
1908config MIPS_CPU_SCACHE 1938config MIPS_CPU_SCACHE
1909 bool 1939 bool
1910 select BOARD_SCACHE 1940 select BOARD_SCACHE
1911 select MIPS_L1_CACHE_SHIFT_6
1912 1941
1913config R5000_CPU_SCACHE 1942config R5000_CPU_SCACHE
1914 bool 1943 bool
@@ -2095,11 +2124,8 @@ config SB1_PASS_2_1_WORKAROUNDS
2095 default y 2124 default y
2096 2125
2097 2126
2098config 64BIT_PHYS_ADDR
2099 bool
2100
2101config ARCH_PHYS_ADDR_T_64BIT 2127config ARCH_PHYS_ADDR_T_64BIT
2102 def_bool 64BIT_PHYS_ADDR 2128 bool
2103 2129
2104choice 2130choice
2105 prompt "SmartMIPS or microMIPS ASE support" 2131 prompt "SmartMIPS or microMIPS ASE support"
@@ -2122,7 +2148,7 @@ config CPU_HAS_SMARTMIPS
2122 here. 2148 here.
2123 2149
2124config CPU_MICROMIPS 2150config CPU_MICROMIPS
2125 depends on SYS_SUPPORTS_MICROMIPS 2151 depends on 32BIT && SYS_SUPPORTS_MICROMIPS
2126 bool "microMIPS" 2152 bool "microMIPS"
2127 help 2153 help
2128 When this option is enabled the kernel will be built using the 2154 When this option is enabled the kernel will be built using the
diff --git a/arch/mips/Kconfig.debug b/arch/mips/Kconfig.debug
index 3a2b775e8458..88a9f433f6fc 100644
--- a/arch/mips/Kconfig.debug
+++ b/arch/mips/Kconfig.debug
@@ -122,4 +122,17 @@ config SPINLOCK_TEST
122 help 122 help
123 Add several files to the debugfs to test spinlock speed. 123 Add several files to the debugfs to test spinlock speed.
124 124
125config FP32XX_HYBRID_FPRS
126 bool "Run FP32 & FPXX code with hybrid FPRs"
127 depends on MIPS_O32_FP64_SUPPORT
128 help
129 The hybrid FPR scheme is normally used only when a program needs to
130 execute a mix of FP32 & FP64A code, since the trapping & emulation
131 that it entails is expensive. When enabled, this option will lead
132 to the kernel running programs which use the FP32 & FPXX FP ABIs
133 using the hybrid FPR scheme, which can be useful for debugging
134 purposes.
135
136 If unsure, say N.
137
125endmenu 138endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 58076472bdd8..2563a088d3b8 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -380,6 +380,7 @@ define archhelp
380 echo ' vmlinux.ecoff - ECOFF boot image' 380 echo ' vmlinux.ecoff - ECOFF boot image'
381 echo ' vmlinux.bin - Raw binary boot image' 381 echo ' vmlinux.bin - Raw binary boot image'
382 echo ' vmlinux.srec - SREC boot image' 382 echo ' vmlinux.srec - SREC boot image'
383 echo ' vmlinux.32 - 64-bit boot image wrapped in 32bits (IP22/IP32)'
383 echo ' vmlinuz - Compressed boot(zboot) image' 384 echo ' vmlinuz - Compressed boot(zboot) image'
384 echo ' vmlinuz.ecoff - ECOFF zboot image' 385 echo ' vmlinuz.ecoff - ECOFF zboot image'
385 echo ' vmlinuz.bin - Raw binary zboot image' 386 echo ' vmlinuz.bin - Raw binary zboot image'
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index d7557cde271a..203e4403c366 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -37,7 +37,6 @@
37#include <linux/io.h> 37#include <linux/io.h>
38#include <linux/clk-provider.h> 38#include <linux/clk-provider.h>
39#include <linux/clkdev.h> 39#include <linux/clkdev.h>
40#include <linux/clk-private.h>
41#include <linux/slab.h> 40#include <linux/slab.h>
42#include <linux/spinlock.h> 41#include <linux/spinlock.h>
43#include <linux/types.h> 42#include <linux/types.h>
@@ -397,10 +396,10 @@ static long alchemy_clk_fgcs_detr(struct clk_hw *hw, unsigned long rate,
397 break; 396 break;
398 397
399 /* if this parent is currently unused, remember it. 398 /* if this parent is currently unused, remember it.
400 * XXX: I know it's a layering violation, but it works 399 * XXX: we would actually want clk_has_active_children()
401 * so well.. (if (!clk_has_active_children(pc)) ) 400 * but this is a good-enough approximation for now.
402 */ 401 */
403 if (pc->prepare_count == 0) { 402 if (!__clk_is_prepared(pc)) {
404 if (!free) 403 if (!free)
405 free = pc; 404 free = pc;
406 } 405 }
diff --git a/arch/mips/alchemy/common/setup.c b/arch/mips/alchemy/common/setup.c
index ea8f41869e56..4e72daf12c32 100644
--- a/arch/mips/alchemy/common/setup.c
+++ b/arch/mips/alchemy/common/setup.c
@@ -70,9 +70,9 @@ void __init plat_mem_setup(void)
70 iomem_resource.end = IOMEM_RESOURCE_END; 70 iomem_resource.end = IOMEM_RESOURCE_END;
71} 71}
72 72
73#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_PCI) 73#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_PCI)
74/* This routine should be valid for all Au1x based boards */ 74/* This routine should be valid for all Au1x based boards */
75phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) 75phys_addr_t __fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
76{ 76{
77 unsigned long start = ALCHEMY_PCI_MEMWIN_START; 77 unsigned long start = ALCHEMY_PCI_MEMWIN_START;
78 unsigned long end = ALCHEMY_PCI_MEMWIN_END; 78 unsigned long end = ALCHEMY_PCI_MEMWIN_END;
@@ -83,7 +83,7 @@ phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
83 83
84 /* Check for PCI memory window */ 84 /* Check for PCI memory window */
85 if (phys_addr >= start && (phys_addr + size - 1) <= end) 85 if (phys_addr >= start && (phys_addr + size - 1) <= end)
86 return (phys_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr); 86 return (phys_addr_t)(AU1500_PCI_MEM_PHYS_ADDR + phys_addr);
87 87
88 /* default nop */ 88 /* default nop */
89 return phys_addr; 89 return phys_addr;
diff --git a/arch/mips/ar7/platform.c b/arch/mips/ar7/platform.c
index 7e2356fd5fd6..af2441dbfc12 100644
--- a/arch/mips/ar7/platform.c
+++ b/arch/mips/ar7/platform.c
@@ -311,8 +311,7 @@ static void __init cpmac_get_mac(int instance, unsigned char *dev_addr)
311 &dev_addr[0], &dev_addr[1], 311 &dev_addr[0], &dev_addr[1],
312 &dev_addr[2], &dev_addr[3], 312 &dev_addr[2], &dev_addr[3],
313 &dev_addr[4], &dev_addr[5]) != 6) { 313 &dev_addr[4], &dev_addr[5]) != 6) {
314 pr_warning("cannot parse mac address, " 314 pr_warn("cannot parse mac address, using random address\n");
315 "using random address\n");
316 eth_random_addr(dev_addr); 315 eth_random_addr(dev_addr);
317 } 316 }
318 } else 317 } else
@@ -665,7 +664,7 @@ static int __init ar7_register_devices(void)
665 664
666 res = platform_device_register(&physmap_flash); 665 res = platform_device_register(&physmap_flash);
667 if (res) 666 if (res)
668 pr_warning("unable to register physmap-flash: %d\n", res); 667 pr_warn("unable to register physmap-flash: %d\n", res);
669 668
670 if (ar7_is_titan()) 669 if (ar7_is_titan())
671 titan_fixup_devices(); 670 titan_fixup_devices();
@@ -673,13 +672,13 @@ static int __init ar7_register_devices(void)
673 ar7_device_disable(vlynq_low_data.reset_bit); 672 ar7_device_disable(vlynq_low_data.reset_bit);
674 res = platform_device_register(&vlynq_low); 673 res = platform_device_register(&vlynq_low);
675 if (res) 674 if (res)
676 pr_warning("unable to register vlynq-low: %d\n", res); 675 pr_warn("unable to register vlynq-low: %d\n", res);
677 676
678 if (ar7_has_high_vlynq()) { 677 if (ar7_has_high_vlynq()) {
679 ar7_device_disable(vlynq_high_data.reset_bit); 678 ar7_device_disable(vlynq_high_data.reset_bit);
680 res = platform_device_register(&vlynq_high); 679 res = platform_device_register(&vlynq_high);
681 if (res) 680 if (res)
682 pr_warning("unable to register vlynq-high: %d\n", res); 681 pr_warn("unable to register vlynq-high: %d\n", res);
683 } 682 }
684 683
685 if (ar7_has_high_cpmac()) { 684 if (ar7_has_high_cpmac()) {
@@ -689,9 +688,10 @@ static int __init ar7_register_devices(void)
689 688
690 res = platform_device_register(&cpmac_high); 689 res = platform_device_register(&cpmac_high);
691 if (res) 690 if (res)
692 pr_warning("unable to register cpmac-high: %d\n", res); 691 pr_warn("unable to register cpmac-high: %d\n",
692 res);
693 } else 693 } else
694 pr_warning("unable to add cpmac-high phy: %d\n", res); 694 pr_warn("unable to add cpmac-high phy: %d\n", res);
695 } else 695 } else
696 cpmac_low_data.phy_mask = 0xffffffff; 696 cpmac_low_data.phy_mask = 0xffffffff;
697 697
@@ -700,18 +700,18 @@ static int __init ar7_register_devices(void)
700 cpmac_get_mac(0, cpmac_low_data.dev_addr); 700 cpmac_get_mac(0, cpmac_low_data.dev_addr);
701 res = platform_device_register(&cpmac_low); 701 res = platform_device_register(&cpmac_low);
702 if (res) 702 if (res)
703 pr_warning("unable to register cpmac-low: %d\n", res); 703 pr_warn("unable to register cpmac-low: %d\n", res);
704 } else 704 } else
705 pr_warning("unable to add cpmac-low phy: %d\n", res); 705 pr_warn("unable to add cpmac-low phy: %d\n", res);
706 706
707 detect_leds(); 707 detect_leds();
708 res = platform_device_register(&ar7_gpio_leds); 708 res = platform_device_register(&ar7_gpio_leds);
709 if (res) 709 if (res)
710 pr_warning("unable to register leds: %d\n", res); 710 pr_warn("unable to register leds: %d\n", res);
711 711
712 res = platform_device_register(&ar7_udc); 712 res = platform_device_register(&ar7_udc);
713 if (res) 713 if (res)
714 pr_warning("unable to register usb slave: %d\n", res); 714 pr_warn("unable to register usb slave: %d\n", res);
715 715
716 /* Register watchdog only if enabled in hardware */ 716 /* Register watchdog only if enabled in hardware */
717 bootcr = ioremap_nocache(AR7_REGS_DCL, 4); 717 bootcr = ioremap_nocache(AR7_REGS_DCL, 4);
@@ -726,7 +726,7 @@ static int __init ar7_register_devices(void)
726 ar7_wdt_res.end = ar7_wdt_res.start + 0x20; 726 ar7_wdt_res.end = ar7_wdt_res.start + 0x20;
727 res = platform_device_register(&ar7_wdt); 727 res = platform_device_register(&ar7_wdt);
728 if (res) 728 if (res)
729 pr_warning("unable to register watchdog: %d\n", res); 729 pr_warn("unable to register watchdog: %d\n", res);
730 } 730 }
731 731
732 return 0; 732 return 0;
diff --git a/arch/mips/ath25/Kconfig b/arch/mips/ath25/Kconfig
new file mode 100644
index 000000000000..fc19dd57e42d
--- /dev/null
+++ b/arch/mips/ath25/Kconfig
@@ -0,0 +1,16 @@
1config SOC_AR5312
2 bool "Atheros AR5312/AR2312+ SoC support"
3 depends on ATH25
4 default y
5
6config SOC_AR2315
7 bool "Atheros AR2315+ SoC support"
8 depends on ATH25
9 default y
10
11config PCI_AR2315
12 bool "Atheros AR2315 PCI controller support"
13 depends on SOC_AR2315
14 select HW_HAS_PCI
15 select PCI
16 default y
diff --git a/arch/mips/ath25/Makefile b/arch/mips/ath25/Makefile
new file mode 100644
index 000000000000..eabad7da446a
--- /dev/null
+++ b/arch/mips/ath25/Makefile
@@ -0,0 +1,16 @@
1#
2# This file is subject to the terms and conditions of the GNU General Public
3# License. See the file "COPYING" in the main directory of this archive
4# for more details.
5#
6# Copyright (C) 2006 FON Technology, SL.
7# Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
8# Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
9#
10
11obj-y += board.o prom.o devices.o
12
13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
14
15obj-$(CONFIG_SOC_AR5312) += ar5312.o
16obj-$(CONFIG_SOC_AR2315) += ar2315.o
diff --git a/arch/mips/ath25/Platform b/arch/mips/ath25/Platform
new file mode 100644
index 000000000000..ef3f81fa080b
--- /dev/null
+++ b/arch/mips/ath25/Platform
@@ -0,0 +1,6 @@
1#
2# Atheros AR531X/AR231X WiSoC
3#
4platform-$(CONFIG_ATH25) += ath25/
5cflags-$(CONFIG_ATH25) += -I$(srctree)/arch/mips/include/asm/mach-ath25
6load-$(CONFIG_ATH25) += 0xffffffff80041000
diff --git a/arch/mips/ath25/ar2315.c b/arch/mips/ath25/ar2315.c
new file mode 100644
index 000000000000..2befa7d766a6
--- /dev/null
+++ b/arch/mips/ath25/ar2315.c
@@ -0,0 +1,364 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
7 * Copyright (C) 2006 FON Technology, SL.
8 * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
9 * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
10 * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com>
11 */
12
13/*
14 * Platform devices for Atheros AR2315 SoCs
15 */
16
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/bitops.h>
20#include <linux/irqdomain.h>
21#include <linux/interrupt.h>
22#include <linux/platform_device.h>
23#include <linux/reboot.h>
24#include <asm/bootinfo.h>
25#include <asm/reboot.h>
26#include <asm/time.h>
27
28#include <ath25_platform.h>
29
30#include "devices.h"
31#include "ar2315.h"
32#include "ar2315_regs.h"
33
34static void __iomem *ar2315_rst_base;
35static struct irq_domain *ar2315_misc_irq_domain;
36
37static inline u32 ar2315_rst_reg_read(u32 reg)
38{
39 return __raw_readl(ar2315_rst_base + reg);
40}
41
42static inline void ar2315_rst_reg_write(u32 reg, u32 val)
43{
44 __raw_writel(val, ar2315_rst_base + reg);
45}
46
47static inline void ar2315_rst_reg_mask(u32 reg, u32 mask, u32 val)
48{
49 u32 ret = ar2315_rst_reg_read(reg);
50
51 ret &= ~mask;
52 ret |= val;
53 ar2315_rst_reg_write(reg, ret);
54}
55
56static irqreturn_t ar2315_ahb_err_handler(int cpl, void *dev_id)
57{
58 ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET);
59 ar2315_rst_reg_read(AR2315_AHB_ERR1);
60
61 pr_emerg("AHB fatal error\n");
62 machine_restart("AHB error"); /* Catastrophic failure */
63
64 return IRQ_HANDLED;
65}
66
67static struct irqaction ar2315_ahb_err_interrupt = {
68 .handler = ar2315_ahb_err_handler,
69 .name = "ar2315-ahb-error",
70};
71
72static void ar2315_misc_irq_handler(unsigned irq, struct irq_desc *desc)
73{
74 u32 pending = ar2315_rst_reg_read(AR2315_ISR) &
75 ar2315_rst_reg_read(AR2315_IMR);
76 unsigned nr, misc_irq = 0;
77
78 if (pending) {
79 struct irq_domain *domain = irq_get_handler_data(irq);
80
81 nr = __ffs(pending);
82 misc_irq = irq_find_mapping(domain, nr);
83 }
84
85 if (misc_irq) {
86 if (nr == AR2315_MISC_IRQ_GPIO)
87 ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_GPIO);
88 else if (nr == AR2315_MISC_IRQ_WATCHDOG)
89 ar2315_rst_reg_write(AR2315_ISR, AR2315_ISR_WD);
90 generic_handle_irq(misc_irq);
91 } else {
92 spurious_interrupt();
93 }
94}
95
96static void ar2315_misc_irq_unmask(struct irq_data *d)
97{
98 ar2315_rst_reg_mask(AR2315_IMR, 0, BIT(d->hwirq));
99}
100
101static void ar2315_misc_irq_mask(struct irq_data *d)
102{
103 ar2315_rst_reg_mask(AR2315_IMR, BIT(d->hwirq), 0);
104}
105
106static struct irq_chip ar2315_misc_irq_chip = {
107 .name = "ar2315-misc",
108 .irq_unmask = ar2315_misc_irq_unmask,
109 .irq_mask = ar2315_misc_irq_mask,
110};
111
112static int ar2315_misc_irq_map(struct irq_domain *d, unsigned irq,
113 irq_hw_number_t hw)
114{
115 irq_set_chip_and_handler(irq, &ar2315_misc_irq_chip, handle_level_irq);
116 return 0;
117}
118
119static struct irq_domain_ops ar2315_misc_irq_domain_ops = {
120 .map = ar2315_misc_irq_map,
121};
122
123/*
124 * Called when an interrupt is received, this function
125 * determines exactly which interrupt it was, and it
126 * invokes the appropriate handler.
127 *
128 * Implicitly, we also define interrupt priority by
129 * choosing which to dispatch first.
130 */
131static void ar2315_irq_dispatch(void)
132{
133 u32 pending = read_c0_status() & read_c0_cause();
134
135 if (pending & CAUSEF_IP3)
136 do_IRQ(AR2315_IRQ_WLAN0);
137#ifdef CONFIG_PCI_AR2315
138 else if (pending & CAUSEF_IP5)
139 do_IRQ(AR2315_IRQ_LCBUS_PCI);
140#endif
141 else if (pending & CAUSEF_IP2)
142 do_IRQ(AR2315_IRQ_MISC);
143 else if (pending & CAUSEF_IP7)
144 do_IRQ(ATH25_IRQ_CPU_CLOCK);
145 else
146 spurious_interrupt();
147}
148
149void __init ar2315_arch_init_irq(void)
150{
151 struct irq_domain *domain;
152 unsigned irq;
153
154 ath25_irq_dispatch = ar2315_irq_dispatch;
155
156 domain = irq_domain_add_linear(NULL, AR2315_MISC_IRQ_COUNT,
157 &ar2315_misc_irq_domain_ops, NULL);
158 if (!domain)
159 panic("Failed to add IRQ domain");
160
161 irq = irq_create_mapping(domain, AR2315_MISC_IRQ_AHB);
162 setup_irq(irq, &ar2315_ahb_err_interrupt);
163
164 irq_set_chained_handler(AR2315_IRQ_MISC, ar2315_misc_irq_handler);
165 irq_set_handler_data(AR2315_IRQ_MISC, domain);
166
167 ar2315_misc_irq_domain = domain;
168}
169
170void __init ar2315_init_devices(void)
171{
172 /* Find board configuration */
173 ath25_find_config(AR2315_SPI_READ_BASE, AR2315_SPI_READ_SIZE);
174
175 ath25_add_wmac(0, AR2315_WLAN0_BASE, AR2315_IRQ_WLAN0);
176}
177
178static void ar2315_restart(char *command)
179{
180 void (*mips_reset_vec)(void) = (void *)0xbfc00000;
181
182 local_irq_disable();
183
184 /* try reset the system via reset control */
185 ar2315_rst_reg_write(AR2315_COLD_RESET, AR2317_RESET_SYSTEM);
186
187 /* Cold reset does not work on the AR2315/6, use the GPIO reset bits
188 * a workaround. Give it some time to attempt a gpio based hardware
189 * reset (atheros reference design workaround) */
190
191 /* TODO: implement the GPIO reset workaround */
192
193 /* Some boards (e.g. Senao EOC-2610) don't implement the reset logic
194 * workaround. Attempt to jump to the mips reset location -
195 * the boot loader itself might be able to recover the system */
196 mips_reset_vec();
197}
198
199/*
200 * This table is indexed by bits 5..4 of the CLOCKCTL1 register
201 * to determine the predevisor value.
202 */
203static int clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 };
204static int pllc_divide_table[5] __initdata = { 2, 3, 4, 6, 3 };
205
206static unsigned __init ar2315_sys_clk(u32 clock_ctl)
207{
208 unsigned int pllc_ctrl, cpu_div;
209 unsigned int pllc_out, refdiv, fdiv, divby2;
210 unsigned int clk_div;
211
212 pllc_ctrl = ar2315_rst_reg_read(AR2315_PLLC_CTL);
213 refdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_REF_DIV);
214 refdiv = clockctl1_predivide_table[refdiv];
215 fdiv = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_FDBACK_DIV);
216 divby2 = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_ADD_FDBACK_DIV) + 1;
217 pllc_out = (40000000 / refdiv) * (2 * divby2) * fdiv;
218
219 /* clkm input selected */
220 switch (clock_ctl & AR2315_CPUCLK_CLK_SEL_M) {
221 case 0:
222 case 1:
223 clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKM_DIV);
224 clk_div = pllc_divide_table[clk_div];
225 break;
226 case 2:
227 clk_div = ATH25_REG_MS(pllc_ctrl, AR2315_PLLC_CLKC_DIV);
228 clk_div = pllc_divide_table[clk_div];
229 break;
230 default:
231 pllc_out = 40000000;
232 clk_div = 1;
233 break;
234 }
235
236 cpu_div = ATH25_REG_MS(clock_ctl, AR2315_CPUCLK_CLK_DIV);
237 cpu_div = cpu_div * 2 ?: 1;
238
239 return pllc_out / (clk_div * cpu_div);
240}
241
242static inline unsigned ar2315_cpu_frequency(void)
243{
244 return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_CPUCLK));
245}
246
247static inline unsigned ar2315_apb_frequency(void)
248{
249 return ar2315_sys_clk(ar2315_rst_reg_read(AR2315_AMBACLK));
250}
251
252void __init ar2315_plat_time_init(void)
253{
254 mips_hpt_frequency = ar2315_cpu_frequency() / 2;
255}
256
257void __init ar2315_plat_mem_setup(void)
258{
259 void __iomem *sdram_base;
260 u32 memsize, memcfg;
261 u32 devid;
262 u32 config;
263
264 /* Detect memory size */
265 sdram_base = ioremap_nocache(AR2315_SDRAMCTL_BASE,
266 AR2315_SDRAMCTL_SIZE);
267 memcfg = __raw_readl(sdram_base + AR2315_MEM_CFG);
268 memsize = 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_DATA_WIDTH);
269 memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_COL_WIDTH);
270 memsize <<= 1 + ATH25_REG_MS(memcfg, AR2315_MEM_CFG_ROW_WIDTH);
271 memsize <<= 3;
272 add_memory_region(0, memsize, BOOT_MEM_RAM);
273 iounmap(sdram_base);
274
275 ar2315_rst_base = ioremap_nocache(AR2315_RST_BASE, AR2315_RST_SIZE);
276
277 /* Detect the hardware based on the device ID */
278 devid = ar2315_rst_reg_read(AR2315_SREV) & AR2315_REV_CHIP;
279 switch (devid) {
280 case 0x91: /* Need to check */
281 ath25_soc = ATH25_SOC_AR2318;
282 break;
283 case 0x90:
284 ath25_soc = ATH25_SOC_AR2317;
285 break;
286 case 0x87:
287 ath25_soc = ATH25_SOC_AR2316;
288 break;
289 case 0x86:
290 default:
291 ath25_soc = ATH25_SOC_AR2315;
292 break;
293 }
294 ath25_board.devid = devid;
295
296 /* Clear any lingering AHB errors */
297 config = read_c0_config();
298 write_c0_config(config & ~0x3);
299 ar2315_rst_reg_write(AR2315_AHB_ERR0, AR2315_AHB_ERROR_DET);
300 ar2315_rst_reg_read(AR2315_AHB_ERR1);
301 ar2315_rst_reg_write(AR2315_WDT_CTRL, AR2315_WDT_CTRL_IGNORE);
302
303 _machine_restart = ar2315_restart;
304}
305
306#ifdef CONFIG_PCI_AR2315
307static struct resource ar2315_pci_res[] = {
308 {
309 .name = "ar2315-pci-ctrl",
310 .flags = IORESOURCE_MEM,
311 .start = AR2315_PCI_BASE,
312 .end = AR2315_PCI_BASE + AR2315_PCI_SIZE - 1,
313 },
314 {
315 .name = "ar2315-pci-ext",
316 .flags = IORESOURCE_MEM,
317 .start = AR2315_PCI_EXT_BASE,
318 .end = AR2315_PCI_EXT_BASE + AR2315_PCI_EXT_SIZE - 1,
319 },
320 {
321 .name = "ar2315-pci",
322 .flags = IORESOURCE_IRQ,
323 .start = AR2315_IRQ_LCBUS_PCI,
324 .end = AR2315_IRQ_LCBUS_PCI,
325 },
326};
327#endif
328
329void __init ar2315_arch_init(void)
330{
331 unsigned irq = irq_create_mapping(ar2315_misc_irq_domain,
332 AR2315_MISC_IRQ_UART0);
333
334 ath25_serial_setup(AR2315_UART0_BASE, irq, ar2315_apb_frequency());
335
336#ifdef CONFIG_PCI_AR2315
337 if (ath25_soc == ATH25_SOC_AR2315) {
338 /* Reset PCI DMA logic */
339 ar2315_rst_reg_mask(AR2315_RESET, 0, AR2315_RESET_PCIDMA);
340 msleep(20);
341 ar2315_rst_reg_mask(AR2315_RESET, AR2315_RESET_PCIDMA, 0);
342 msleep(20);
343
344 /* Configure endians */
345 ar2315_rst_reg_mask(AR2315_ENDIAN_CTL, 0, AR2315_CONFIG_PCIAHB |
346 AR2315_CONFIG_PCIAHB_BRIDGE);
347
348 /* Configure as PCI host with DMA */
349 ar2315_rst_reg_write(AR2315_PCICLK, AR2315_PCICLK_PLLC_CLKM |
350 (AR2315_PCICLK_IN_FREQ_DIV_6 <<
351 AR2315_PCICLK_DIV_S));
352 ar2315_rst_reg_mask(AR2315_AHB_ARB_CTL, 0, AR2315_ARB_PCI);
353 ar2315_rst_reg_mask(AR2315_IF_CTL, AR2315_IF_PCI_CLK_MASK |
354 AR2315_IF_MASK, AR2315_IF_PCI |
355 AR2315_IF_PCI_HOST | AR2315_IF_PCI_INTR |
356 (AR2315_IF_PCI_CLK_OUTPUT_CLK <<
357 AR2315_IF_PCI_CLK_SHIFT));
358
359 platform_device_register_simple("ar2315-pci", -1,
360 ar2315_pci_res,
361 ARRAY_SIZE(ar2315_pci_res));
362 }
363#endif
364}
diff --git a/arch/mips/ath25/ar2315.h b/arch/mips/ath25/ar2315.h
new file mode 100644
index 000000000000..877afe63eed5
--- /dev/null
+++ b/arch/mips/ath25/ar2315.h
@@ -0,0 +1,22 @@
1#ifndef __AR2315_H
2#define __AR2315_H
3
4#ifdef CONFIG_SOC_AR2315
5
6void ar2315_arch_init_irq(void);
7void ar2315_init_devices(void);
8void ar2315_plat_time_init(void);
9void ar2315_plat_mem_setup(void);
10void ar2315_arch_init(void);
11
12#else
13
14static inline void ar2315_arch_init_irq(void) {}
15static inline void ar2315_init_devices(void) {}
16static inline void ar2315_plat_time_init(void) {}
17static inline void ar2315_plat_mem_setup(void) {}
18static inline void ar2315_arch_init(void) {}
19
20#endif
21
22#endif /* __AR2315_H */
diff --git a/arch/mips/ath25/ar2315_regs.h b/arch/mips/ath25/ar2315_regs.h
new file mode 100644
index 000000000000..16e86149cb74
--- /dev/null
+++ b/arch/mips/ath25/ar2315_regs.h
@@ -0,0 +1,410 @@
1/*
2 * Register definitions for AR2315+
3 *
4 * This file is subject to the terms and conditions of the GNU General Public
5 * License. See the file "COPYING" in the main directory of this archive
6 * for more details.
7 *
8 * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
9 * Copyright (C) 2006 FON Technology, SL.
10 * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
11 * Copyright (C) 2006-2008 Felix Fietkau <nbd@openwrt.org>
12 */
13
14#ifndef __ASM_MACH_ATH25_AR2315_REGS_H
15#define __ASM_MACH_ATH25_AR2315_REGS_H
16
17/*
18 * IRQs
19 */
20#define AR2315_IRQ_MISC (MIPS_CPU_IRQ_BASE + 2) /* C0_CAUSE: 0x0400 */
21#define AR2315_IRQ_WLAN0 (MIPS_CPU_IRQ_BASE + 3) /* C0_CAUSE: 0x0800 */
22#define AR2315_IRQ_ENET0 (MIPS_CPU_IRQ_BASE + 4) /* C0_CAUSE: 0x1000 */
23#define AR2315_IRQ_LCBUS_PCI (MIPS_CPU_IRQ_BASE + 5) /* C0_CAUSE: 0x2000 */
24#define AR2315_IRQ_WLAN0_POLL (MIPS_CPU_IRQ_BASE + 6) /* C0_CAUSE: 0x4000 */
25
26/*
27 * Miscellaneous interrupts, which share IP2.
28 */
29#define AR2315_MISC_IRQ_UART0 0
30#define AR2315_MISC_IRQ_I2C_RSVD 1
31#define AR2315_MISC_IRQ_SPI 2
32#define AR2315_MISC_IRQ_AHB 3
33#define AR2315_MISC_IRQ_APB 4
34#define AR2315_MISC_IRQ_TIMER 5
35#define AR2315_MISC_IRQ_GPIO 6
36#define AR2315_MISC_IRQ_WATCHDOG 7
37#define AR2315_MISC_IRQ_IR_RSVD 8
38#define AR2315_MISC_IRQ_COUNT 9
39
40/*
41 * Address map
42 */
43#define AR2315_SPI_READ_BASE 0x08000000 /* SPI flash */
44#define AR2315_SPI_READ_SIZE 0x01000000
45#define AR2315_WLAN0_BASE 0x10000000 /* Wireless MMR */
46#define AR2315_PCI_BASE 0x10100000 /* PCI MMR */
47#define AR2315_PCI_SIZE 0x00001000
48#define AR2315_SDRAMCTL_BASE 0x10300000 /* SDRAM MMR */
49#define AR2315_SDRAMCTL_SIZE 0x00000020
50#define AR2315_LOCAL_BASE 0x10400000 /* Local bus MMR */
51#define AR2315_ENET0_BASE 0x10500000 /* Ethernet MMR */
52#define AR2315_RST_BASE 0x11000000 /* Reset control MMR */
53#define AR2315_RST_SIZE 0x00000100
54#define AR2315_UART0_BASE 0x11100000 /* UART MMR */
55#define AR2315_SPI_MMR_BASE 0x11300000 /* SPI flash MMR */
56#define AR2315_SPI_MMR_SIZE 0x00000010
57#define AR2315_PCI_EXT_BASE 0x80000000 /* PCI external */
58#define AR2315_PCI_EXT_SIZE 0x40000000
59
60/*
61 * Configuration registers
62 */
63
64/* Cold reset register */
65#define AR2315_COLD_RESET 0x0000
66
67#define AR2315_RESET_COLD_AHB 0x00000001
68#define AR2315_RESET_COLD_APB 0x00000002
69#define AR2315_RESET_COLD_CPU 0x00000004
70#define AR2315_RESET_COLD_CPUWARM 0x00000008
71#define AR2315_RESET_SYSTEM (RESET_COLD_CPU |\
72 RESET_COLD_APB |\
73 RESET_COLD_AHB) /* full system */
74#define AR2317_RESET_SYSTEM 0x00000010
75
76/* Reset register */
77#define AR2315_RESET 0x0004
78
79#define AR2315_RESET_WARM_WLAN0_MAC 0x00000001 /* warm reset WLAN0 MAC */
80#define AR2315_RESET_WARM_WLAN0_BB 0x00000002 /* warm reset WLAN0 BB */
81#define AR2315_RESET_MPEGTS_RSVD 0x00000004 /* warm reset MPEG-TS */
82#define AR2315_RESET_PCIDMA 0x00000008 /* warm reset PCI ahb/dma */
83#define AR2315_RESET_MEMCTL 0x00000010 /* warm reset mem control */
84#define AR2315_RESET_LOCAL 0x00000020 /* warm reset local bus */
85#define AR2315_RESET_I2C_RSVD 0x00000040 /* warm reset I2C bus */
86#define AR2315_RESET_SPI 0x00000080 /* warm reset SPI iface */
87#define AR2315_RESET_UART0 0x00000100 /* warm reset UART0 */
88#define AR2315_RESET_IR_RSVD 0x00000200 /* warm reset IR iface */
89#define AR2315_RESET_EPHY0 0x00000400 /* cold reset ENET0 phy */
90#define AR2315_RESET_ENET0 0x00000800 /* cold reset ENET0 MAC */
91
92/* AHB master arbitration control */
93#define AR2315_AHB_ARB_CTL 0x0008
94
95#define AR2315_ARB_CPU 0x00000001 /* CPU, default */
96#define AR2315_ARB_WLAN 0x00000002 /* WLAN */
97#define AR2315_ARB_MPEGTS_RSVD 0x00000004 /* MPEG-TS */
98#define AR2315_ARB_LOCAL 0x00000008 /* Local bus */
99#define AR2315_ARB_PCI 0x00000010 /* PCI bus */
100#define AR2315_ARB_ETHERNET 0x00000020 /* Ethernet */
101#define AR2315_ARB_RETRY 0x00000100 /* Retry policy (debug) */
102
103/* Config Register */
104#define AR2315_ENDIAN_CTL 0x000c
105
106#define AR2315_CONFIG_AHB 0x00000001 /* EC-AHB bridge endian */
107#define AR2315_CONFIG_WLAN 0x00000002 /* WLAN byteswap */
108#define AR2315_CONFIG_MPEGTS_RSVD 0x00000004 /* MPEG-TS byteswap */
109#define AR2315_CONFIG_PCI 0x00000008 /* PCI byteswap */
110#define AR2315_CONFIG_MEMCTL 0x00000010 /* Mem controller endian */
111#define AR2315_CONFIG_LOCAL 0x00000020 /* Local bus byteswap */
112#define AR2315_CONFIG_ETHERNET 0x00000040 /* Ethernet byteswap */
113#define AR2315_CONFIG_MERGE 0x00000200 /* CPU write buffer merge */
114#define AR2315_CONFIG_CPU 0x00000400 /* CPU big endian */
115#define AR2315_CONFIG_BIG 0x00000400
116#define AR2315_CONFIG_PCIAHB 0x00000800
117#define AR2315_CONFIG_PCIAHB_BRIDGE 0x00001000
118#define AR2315_CONFIG_SPI 0x00008000 /* SPI byteswap */
119#define AR2315_CONFIG_CPU_DRAM 0x00010000
120#define AR2315_CONFIG_CPU_PCI 0x00020000
121#define AR2315_CONFIG_CPU_MMR 0x00040000
122
123/* NMI control */
124#define AR2315_NMI_CTL 0x0010
125
126#define AR2315_NMI_EN 1
127
128/* Revision Register - Initial value is 0x3010 (WMAC 3.0, AR231X 1.0). */
129#define AR2315_SREV 0x0014
130
131#define AR2315_REV_MAJ 0x000000f0
132#define AR2315_REV_MAJ_S 4
133#define AR2315_REV_MIN 0x0000000f
134#define AR2315_REV_MIN_S 0
135#define AR2315_REV_CHIP (AR2315_REV_MAJ | AR2315_REV_MIN)
136
137/* Interface Enable */
138#define AR2315_IF_CTL 0x0018
139
140#define AR2315_IF_MASK 0x00000007
141#define AR2315_IF_DISABLED 0 /* Disable all */
142#define AR2315_IF_PCI 1 /* PCI */
143#define AR2315_IF_TS_LOCAL 2 /* Local bus */
144#define AR2315_IF_ALL 3 /* Emulation only */
145#define AR2315_IF_LOCAL_HOST 0x00000008
146#define AR2315_IF_PCI_HOST 0x00000010
147#define AR2315_IF_PCI_INTR 0x00000020
148#define AR2315_IF_PCI_CLK_MASK 0x00030000
149#define AR2315_IF_PCI_CLK_INPUT 0
150#define AR2315_IF_PCI_CLK_OUTPUT_LOW 1
151#define AR2315_IF_PCI_CLK_OUTPUT_CLK 2
152#define AR2315_IF_PCI_CLK_OUTPUT_HIGH 3
153#define AR2315_IF_PCI_CLK_SHIFT 16
154
155/* APB Interrupt control */
156#define AR2315_ISR 0x0020
157#define AR2315_IMR 0x0024
158#define AR2315_GISR 0x0028
159
160#define AR2315_ISR_UART0 0x00000001 /* high speed UART */
161#define AR2315_ISR_I2C_RSVD 0x00000002 /* I2C bus */
162#define AR2315_ISR_SPI 0x00000004 /* SPI bus */
163#define AR2315_ISR_AHB 0x00000008 /* AHB error */
164#define AR2315_ISR_APB 0x00000010 /* APB error */
165#define AR2315_ISR_TIMER 0x00000020 /* Timer */
166#define AR2315_ISR_GPIO 0x00000040 /* GPIO */
167#define AR2315_ISR_WD 0x00000080 /* Watchdog */
168#define AR2315_ISR_IR_RSVD 0x00000100 /* IR */
169
170#define AR2315_GISR_MISC 0x00000001 /* Misc */
171#define AR2315_GISR_WLAN0 0x00000002 /* WLAN0 */
172#define AR2315_GISR_MPEGTS_RSVD 0x00000004 /* MPEG-TS */
173#define AR2315_GISR_LOCALPCI 0x00000008 /* Local/PCI bus */
174#define AR2315_GISR_WMACPOLL 0x00000010
175#define AR2315_GISR_TIMER 0x00000020
176#define AR2315_GISR_ETHERNET 0x00000040 /* Ethernet */
177
178/* Generic timer */
179#define AR2315_TIMER 0x0030
180#define AR2315_RELOAD 0x0034
181
182/* Watchdog timer */
183#define AR2315_WDT_TIMER 0x0038
184#define AR2315_WDT_CTRL 0x003c
185
186#define AR2315_WDT_CTRL_IGNORE 0x00000000 /* ignore expiration */
187#define AR2315_WDT_CTRL_NMI 0x00000001 /* NMI on watchdog */
188#define AR2315_WDT_CTRL_RESET 0x00000002 /* reset on watchdog */
189
190/* CPU Performance Counters */
191#define AR2315_PERFCNT0 0x0048
192#define AR2315_PERFCNT1 0x004c
193
194#define AR2315_PERF0_DATAHIT 0x00000001 /* Count Data Cache Hits */
195#define AR2315_PERF0_DATAMISS 0x00000002 /* Count Data Cache Misses */
196#define AR2315_PERF0_INSTHIT 0x00000004 /* Count Instruction Cache Hits */
197#define AR2315_PERF0_INSTMISS 0x00000008 /* Count Instruction Cache Misses */
198#define AR2315_PERF0_ACTIVE 0x00000010 /* Count Active Processor Cycles */
199#define AR2315_PERF0_WBHIT 0x00000020 /* Count CPU Write Buffer Hits */
200#define AR2315_PERF0_WBMISS 0x00000040 /* Count CPU Write Buffer Misses */
201
202#define AR2315_PERF1_EB_ARDY 0x00000001 /* Count EB_ARdy signal */
203#define AR2315_PERF1_EB_AVALID 0x00000002 /* Count EB_AValid signal */
204#define AR2315_PERF1_EB_WDRDY 0x00000004 /* Count EB_WDRdy signal */
205#define AR2315_PERF1_EB_RDVAL 0x00000008 /* Count EB_RdVal signal */
206#define AR2315_PERF1_VRADDR 0x00000010 /* Count valid read address cycles*/
207#define AR2315_PERF1_VWADDR 0x00000020 /* Count valid write address cycl.*/
208#define AR2315_PERF1_VWDATA 0x00000040 /* Count valid write data cycles */
209
210/* AHB Error Reporting */
211#define AR2315_AHB_ERR0 0x0050 /* error */
212#define AR2315_AHB_ERR1 0x0054 /* haddr */
213#define AR2315_AHB_ERR2 0x0058 /* hwdata */
214#define AR2315_AHB_ERR3 0x005c /* hrdata */
215#define AR2315_AHB_ERR4 0x0060 /* status */
216
217#define AR2315_AHB_ERROR_DET 1 /* AHB Error has been detected, */
218 /* write 1 to clear all bits in ERR0 */
219#define AR2315_AHB_ERROR_OVR 2 /* AHB Error overflow has been detected */
220#define AR2315_AHB_ERROR_WDT 4 /* AHB Error due to wdt instead of hresp */
221
222#define AR2315_PROCERR_HMAST 0x0000000f
223#define AR2315_PROCERR_HMAST_DFLT 0
224#define AR2315_PROCERR_HMAST_WMAC 1
225#define AR2315_PROCERR_HMAST_ENET 2
226#define AR2315_PROCERR_HMAST_PCIENDPT 3
227#define AR2315_PROCERR_HMAST_LOCAL 4
228#define AR2315_PROCERR_HMAST_CPU 5
229#define AR2315_PROCERR_HMAST_PCITGT 6
230#define AR2315_PROCERR_HMAST_S 0
231#define AR2315_PROCERR_HWRITE 0x00000010
232#define AR2315_PROCERR_HSIZE 0x00000060
233#define AR2315_PROCERR_HSIZE_S 5
234#define AR2315_PROCERR_HTRANS 0x00000180
235#define AR2315_PROCERR_HTRANS_S 7
236#define AR2315_PROCERR_HBURST 0x00000e00
237#define AR2315_PROCERR_HBURST_S 9
238
239/* Clock Control */
240#define AR2315_PLLC_CTL 0x0064
241#define AR2315_PLLV_CTL 0x0068
242#define AR2315_CPUCLK 0x006c
243#define AR2315_AMBACLK 0x0070
244#define AR2315_SYNCCLK 0x0074
245#define AR2315_DSL_SLEEP_CTL 0x0080
246#define AR2315_DSL_SLEEP_DUR 0x0084
247
248/* PLLc Control fields */
249#define AR2315_PLLC_REF_DIV_M 0x00000003
250#define AR2315_PLLC_REF_DIV_S 0
251#define AR2315_PLLC_FDBACK_DIV_M 0x0000007c
252#define AR2315_PLLC_FDBACK_DIV_S 2
253#define AR2315_PLLC_ADD_FDBACK_DIV_M 0x00000080
254#define AR2315_PLLC_ADD_FDBACK_DIV_S 7
255#define AR2315_PLLC_CLKC_DIV_M 0x0001c000
256#define AR2315_PLLC_CLKC_DIV_S 14
257#define AR2315_PLLC_CLKM_DIV_M 0x00700000
258#define AR2315_PLLC_CLKM_DIV_S 20
259
260/* CPU CLK Control fields */
261#define AR2315_CPUCLK_CLK_SEL_M 0x00000003
262#define AR2315_CPUCLK_CLK_SEL_S 0
263#define AR2315_CPUCLK_CLK_DIV_M 0x0000000c
264#define AR2315_CPUCLK_CLK_DIV_S 2
265
266/* AMBA CLK Control fields */
267#define AR2315_AMBACLK_CLK_SEL_M 0x00000003
268#define AR2315_AMBACLK_CLK_SEL_S 0
269#define AR2315_AMBACLK_CLK_DIV_M 0x0000000c
270#define AR2315_AMBACLK_CLK_DIV_S 2
271
272/* PCI Clock Control */
273#define AR2315_PCICLK 0x00a4
274
275#define AR2315_PCICLK_INPUT_M 0x00000003
276#define AR2315_PCICLK_INPUT_S 0
277#define AR2315_PCICLK_PLLC_CLKM 0
278#define AR2315_PCICLK_PLLC_CLKM1 1
279#define AR2315_PCICLK_PLLC_CLKC 2
280#define AR2315_PCICLK_REF_CLK 3
281#define AR2315_PCICLK_DIV_M 0x0000000c
282#define AR2315_PCICLK_DIV_S 2
283#define AR2315_PCICLK_IN_FREQ 0
284#define AR2315_PCICLK_IN_FREQ_DIV_6 1
285#define AR2315_PCICLK_IN_FREQ_DIV_8 2
286#define AR2315_PCICLK_IN_FREQ_DIV_10 3
287
288/* Observation Control Register */
289#define AR2315_OCR 0x00b0
290
291#define AR2315_OCR_GPIO0_IRIN 0x00000040
292#define AR2315_OCR_GPIO1_IROUT 0x00000080
293#define AR2315_OCR_GPIO3_RXCLR 0x00000200
294
295/* General Clock Control */
296#define AR2315_MISCCLK 0x00b4
297
298#define AR2315_MISCCLK_PLLBYPASS_EN 0x00000001
299#define AR2315_MISCCLK_PROCREFCLK 0x00000002
300
301/*
302 * SDRAM Controller
303 * - No read or write buffers are included.
304 */
305#define AR2315_MEM_CFG 0x0000
306#define AR2315_MEM_CTRL 0x000c
307#define AR2315_MEM_REF 0x0010
308
309#define AR2315_MEM_CFG_DATA_WIDTH_M 0x00006000
310#define AR2315_MEM_CFG_DATA_WIDTH_S 13
311#define AR2315_MEM_CFG_COL_WIDTH_M 0x00001e00
312#define AR2315_MEM_CFG_COL_WIDTH_S 9
313#define AR2315_MEM_CFG_ROW_WIDTH_M 0x000001e0
314#define AR2315_MEM_CFG_ROW_WIDTH_S 5
315#define AR2315_MEM_CFG_BANKADDR_BITS_M 0x00000018
316#define AR2315_MEM_CFG_BANKADDR_BITS_S 3
317
318/*
319 * Local Bus Interface Registers
320 */
321#define AR2315_LB_CONFIG 0x0000
322
323#define AR2315_LBCONF_OE 0x00000001 /* =1 OE is low-true */
324#define AR2315_LBCONF_CS0 0x00000002 /* =1 first CS is low-true */
325#define AR2315_LBCONF_CS1 0x00000004 /* =1 2nd CS is low-true */
326#define AR2315_LBCONF_RDY 0x00000008 /* =1 RDY is low-true */
327#define AR2315_LBCONF_WE 0x00000010 /* =1 Write En is low-true */
328#define AR2315_LBCONF_WAIT 0x00000020 /* =1 WAIT is low-true */
329#define AR2315_LBCONF_ADS 0x00000040 /* =1 Adr Strobe is low-true */
330#define AR2315_LBCONF_MOT 0x00000080 /* =0 Intel, =1 Motorola */
331#define AR2315_LBCONF_8CS 0x00000100 /* =1 8 bits CS, 0= 16bits */
332#define AR2315_LBCONF_8DS 0x00000200 /* =1 8 bits Data S, 0=16bits */
333#define AR2315_LBCONF_ADS_EN 0x00000400 /* =1 Enable ADS */
334#define AR2315_LBCONF_ADR_OE 0x00000800 /* =1 Adr cap on OE, WE or DS */
335#define AR2315_LBCONF_ADDT_MUX 0x00001000 /* =1 Adr and Data share bus */
336#define AR2315_LBCONF_DATA_OE 0x00002000 /* =1 Data cap on OE, WE, DS */
337#define AR2315_LBCONF_16DATA 0x00004000 /* =1 Data is 16 bits wide */
338#define AR2315_LBCONF_SWAPDT 0x00008000 /* =1 Byte swap data */
339#define AR2315_LBCONF_SYNC 0x00010000 /* =1 Bus synchronous to clk */
340#define AR2315_LBCONF_INT 0x00020000 /* =1 Intr is low true */
341#define AR2315_LBCONF_INT_CTR0 0x00000000 /* GND high-Z, Vdd is high-Z */
342#define AR2315_LBCONF_INT_CTR1 0x00040000 /* GND drive, Vdd is high-Z */
343#define AR2315_LBCONF_INT_CTR2 0x00080000 /* GND high-Z, Vdd drive */
344#define AR2315_LBCONF_INT_CTR3 0x000c0000 /* GND drive, Vdd drive */
345#define AR2315_LBCONF_RDY_WAIT 0x00100000 /* =1 RDY is negative of WAIT */
346#define AR2315_LBCONF_INT_PULSE 0x00200000 /* =1 Interrupt is a pulse */
347#define AR2315_LBCONF_ENABLE 0x00400000 /* =1 Falcon respond to LB */
348
349#define AR2315_LB_CLKSEL 0x0004
350
351#define AR2315_LBCLK_EXT 0x00000001 /* use external clk for lb */
352
353#define AR2315_LB_1MS 0x0008
354
355#define AR2315_LB1MS_MASK 0x0003ffff /* # of AHB clk cycles in 1ms */
356
357#define AR2315_LB_MISCCFG 0x000c
358
359#define AR2315_LBM_TXD_EN 0x00000001 /* Enable TXD for fragments */
360#define AR2315_LBM_RX_INTEN 0x00000002 /* Enable LB ints on RX ready */
361#define AR2315_LBM_MBOXWR_INTEN 0x00000004 /* Enable LB ints on mbox wr */
362#define AR2315_LBM_MBOXRD_INTEN 0x00000008 /* Enable LB ints on mbox rd */
363#define AR2315_LMB_DESCSWAP_EN 0x00000010 /* Byte swap desc enable */
364#define AR2315_LBM_TIMEOUT_M 0x00ffff80
365#define AR2315_LBM_TIMEOUT_S 7
366#define AR2315_LBM_PORTMUX 0x07000000
367
368#define AR2315_LB_RXTSOFF 0x0010
369
370#define AR2315_LB_TX_CHAIN_EN 0x0100
371
372#define AR2315_LB_TXEN_0 0x00000001
373#define AR2315_LB_TXEN_1 0x00000002
374#define AR2315_LB_TXEN_2 0x00000004
375#define AR2315_LB_TXEN_3 0x00000008
376
377#define AR2315_LB_TX_CHAIN_DIS 0x0104
378#define AR2315_LB_TX_DESC_PTR 0x0200
379
380#define AR2315_LB_RX_CHAIN_EN 0x0400
381
382#define AR2315_LB_RXEN 0x00000001
383
384#define AR2315_LB_RX_CHAIN_DIS 0x0404
385#define AR2315_LB_RX_DESC_PTR 0x0408
386
387#define AR2315_LB_INT_STATUS 0x0500
388
389#define AR2315_LB_INT_TX_DESC 0x00000001
390#define AR2315_LB_INT_TX_OK 0x00000002
391#define AR2315_LB_INT_TX_ERR 0x00000004
392#define AR2315_LB_INT_TX_EOF 0x00000008
393#define AR2315_LB_INT_RX_DESC 0x00000010
394#define AR2315_LB_INT_RX_OK 0x00000020
395#define AR2315_LB_INT_RX_ERR 0x00000040
396#define AR2315_LB_INT_RX_EOF 0x00000080
397#define AR2315_LB_INT_TX_TRUNC 0x00000100
398#define AR2315_LB_INT_TX_STARVE 0x00000200
399#define AR2315_LB_INT_LB_TIMEOUT 0x00000400
400#define AR2315_LB_INT_LB_ERR 0x00000800
401#define AR2315_LB_INT_MBOX_WR 0x00001000
402#define AR2315_LB_INT_MBOX_RD 0x00002000
403
404/* Bit definitions for INT MASK are the same as INT_STATUS */
405#define AR2315_LB_INT_MASK 0x0504
406
407#define AR2315_LB_INT_EN 0x0508
408#define AR2315_LB_MBOX 0x0600
409
410#endif /* __ASM_MACH_ATH25_AR2315_REGS_H */
diff --git a/arch/mips/ath25/ar5312.c b/arch/mips/ath25/ar5312.c
new file mode 100644
index 000000000000..b6887f75144c
--- /dev/null
+++ b/arch/mips/ath25/ar5312.c
@@ -0,0 +1,393 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
7 * Copyright (C) 2006 FON Technology, SL.
8 * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
9 * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
10 * Copyright (C) 2012 Alexandros C. Couloumbis <alex@ozo.com>
11 */
12
13/*
14 * Platform devices for Atheros AR5312 SoCs
15 */
16
17#include <linux/init.h>
18#include <linux/kernel.h>
19#include <linux/bitops.h>
20#include <linux/irqdomain.h>
21#include <linux/interrupt.h>
22#include <linux/platform_device.h>
23#include <linux/mtd/physmap.h>
24#include <linux/reboot.h>
25#include <asm/bootinfo.h>
26#include <asm/reboot.h>
27#include <asm/time.h>
28
29#include <ath25_platform.h>
30
31#include "devices.h"
32#include "ar5312.h"
33#include "ar5312_regs.h"
34
35static void __iomem *ar5312_rst_base;
36static struct irq_domain *ar5312_misc_irq_domain;
37
38static inline u32 ar5312_rst_reg_read(u32 reg)
39{
40 return __raw_readl(ar5312_rst_base + reg);
41}
42
43static inline void ar5312_rst_reg_write(u32 reg, u32 val)
44{
45 __raw_writel(val, ar5312_rst_base + reg);
46}
47
48static inline void ar5312_rst_reg_mask(u32 reg, u32 mask, u32 val)
49{
50 u32 ret = ar5312_rst_reg_read(reg);
51
52 ret &= ~mask;
53 ret |= val;
54 ar5312_rst_reg_write(reg, ret);
55}
56
57static irqreturn_t ar5312_ahb_err_handler(int cpl, void *dev_id)
58{
59 u32 proc1 = ar5312_rst_reg_read(AR5312_PROC1);
60 u32 proc_addr = ar5312_rst_reg_read(AR5312_PROCADDR); /* clears error */
61 u32 dma1 = ar5312_rst_reg_read(AR5312_DMA1);
62 u32 dma_addr = ar5312_rst_reg_read(AR5312_DMAADDR); /* clears error */
63
64 pr_emerg("AHB interrupt: PROCADDR=0x%8.8x PROC1=0x%8.8x DMAADDR=0x%8.8x DMA1=0x%8.8x\n",
65 proc_addr, proc1, dma_addr, dma1);
66
67 machine_restart("AHB error"); /* Catastrophic failure */
68 return IRQ_HANDLED;
69}
70
71static struct irqaction ar5312_ahb_err_interrupt = {
72 .handler = ar5312_ahb_err_handler,
73 .name = "ar5312-ahb-error",
74};
75
76static void ar5312_misc_irq_handler(unsigned irq, struct irq_desc *desc)
77{
78 u32 pending = ar5312_rst_reg_read(AR5312_ISR) &
79 ar5312_rst_reg_read(AR5312_IMR);
80 unsigned nr, misc_irq = 0;
81
82 if (pending) {
83 struct irq_domain *domain = irq_get_handler_data(irq);
84
85 nr = __ffs(pending);
86 misc_irq = irq_find_mapping(domain, nr);
87 }
88
89 if (misc_irq) {
90 generic_handle_irq(misc_irq);
91 if (nr == AR5312_MISC_IRQ_TIMER)
92 ar5312_rst_reg_read(AR5312_TIMER);
93 } else {
94 spurious_interrupt();
95 }
96}
97
98/* Enable the specified AR5312_MISC_IRQ interrupt */
99static void ar5312_misc_irq_unmask(struct irq_data *d)
100{
101 ar5312_rst_reg_mask(AR5312_IMR, 0, BIT(d->hwirq));
102}
103
104/* Disable the specified AR5312_MISC_IRQ interrupt */
105static void ar5312_misc_irq_mask(struct irq_data *d)
106{
107 ar5312_rst_reg_mask(AR5312_IMR, BIT(d->hwirq), 0);
108 ar5312_rst_reg_read(AR5312_IMR); /* flush write buffer */
109}
110
111static struct irq_chip ar5312_misc_irq_chip = {
112 .name = "ar5312-misc",
113 .irq_unmask = ar5312_misc_irq_unmask,
114 .irq_mask = ar5312_misc_irq_mask,
115};
116
117static int ar5312_misc_irq_map(struct irq_domain *d, unsigned irq,
118 irq_hw_number_t hw)
119{
120 irq_set_chip_and_handler(irq, &ar5312_misc_irq_chip, handle_level_irq);
121 return 0;
122}
123
124static struct irq_domain_ops ar5312_misc_irq_domain_ops = {
125 .map = ar5312_misc_irq_map,
126};
127
128static void ar5312_irq_dispatch(void)
129{
130 u32 pending = read_c0_status() & read_c0_cause();
131
132 if (pending & CAUSEF_IP2)
133 do_IRQ(AR5312_IRQ_WLAN0);
134 else if (pending & CAUSEF_IP5)
135 do_IRQ(AR5312_IRQ_WLAN1);
136 else if (pending & CAUSEF_IP6)
137 do_IRQ(AR5312_IRQ_MISC);
138 else if (pending & CAUSEF_IP7)
139 do_IRQ(ATH25_IRQ_CPU_CLOCK);
140 else
141 spurious_interrupt();
142}
143
144void __init ar5312_arch_init_irq(void)
145{
146 struct irq_domain *domain;
147 unsigned irq;
148
149 ath25_irq_dispatch = ar5312_irq_dispatch;
150
151 domain = irq_domain_add_linear(NULL, AR5312_MISC_IRQ_COUNT,
152 &ar5312_misc_irq_domain_ops, NULL);
153 if (!domain)
154 panic("Failed to add IRQ domain");
155
156 irq = irq_create_mapping(domain, AR5312_MISC_IRQ_AHB_PROC);
157 setup_irq(irq, &ar5312_ahb_err_interrupt);
158
159 irq_set_chained_handler(AR5312_IRQ_MISC, ar5312_misc_irq_handler);
160 irq_set_handler_data(AR5312_IRQ_MISC, domain);
161
162 ar5312_misc_irq_domain = domain;
163}
164
165static struct physmap_flash_data ar5312_flash_data = {
166 .width = 2,
167};
168
169static struct resource ar5312_flash_resource = {
170 .start = AR5312_FLASH_BASE,
171 .end = AR5312_FLASH_BASE + AR5312_FLASH_SIZE - 1,
172 .flags = IORESOURCE_MEM,
173};
174
175static struct platform_device ar5312_physmap_flash = {
176 .name = "physmap-flash",
177 .id = 0,
178 .dev.platform_data = &ar5312_flash_data,
179 .resource = &ar5312_flash_resource,
180 .num_resources = 1,
181};
182
183static void __init ar5312_flash_init(void)
184{
185 void __iomem *flashctl_base;
186 u32 ctl;
187
188 flashctl_base = ioremap_nocache(AR5312_FLASHCTL_BASE,
189 AR5312_FLASHCTL_SIZE);
190
191 ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL0);
192 ctl &= AR5312_FLASHCTL_MW;
193
194 /* fixup flash width */
195 switch (ctl) {
196 case AR5312_FLASHCTL_MW16:
197 ar5312_flash_data.width = 2;
198 break;
199 case AR5312_FLASHCTL_MW8:
200 default:
201 ar5312_flash_data.width = 1;
202 break;
203 }
204
205 /*
206 * Configure flash bank 0.
207 * Assume 8M window size. Flash will be aliased if it's smaller
208 */
209 ctl |= AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC_8M | AR5312_FLASHCTL_RBLE;
210 ctl |= 0x01 << AR5312_FLASHCTL_IDCY_S;
211 ctl |= 0x07 << AR5312_FLASHCTL_WST1_S;
212 ctl |= 0x07 << AR5312_FLASHCTL_WST2_S;
213 __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL0);
214
215 /* Disable other flash banks */
216 ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL1);
217 ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC);
218 __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL1);
219 ctl = __raw_readl(flashctl_base + AR5312_FLASHCTL2);
220 ctl &= ~(AR5312_FLASHCTL_E | AR5312_FLASHCTL_AC);
221 __raw_writel(ctl, flashctl_base + AR5312_FLASHCTL2);
222
223 iounmap(flashctl_base);
224}
225
226void __init ar5312_init_devices(void)
227{
228 struct ath25_boarddata *config;
229
230 ar5312_flash_init();
231
232 /* Locate board/radio config data */
233 ath25_find_config(AR5312_FLASH_BASE, AR5312_FLASH_SIZE);
234 config = ath25_board.config;
235
236 /* AR2313 has CPU minor rev. 10 */
237 if ((current_cpu_data.processor_id & 0xff) == 0x0a)
238 ath25_soc = ATH25_SOC_AR2313;
239
240 /* AR2312 shares the same Silicon ID as AR5312 */
241 else if (config->flags & BD_ISCASPER)
242 ath25_soc = ATH25_SOC_AR2312;
243
244 /* Everything else is probably AR5312 or compatible */
245 else
246 ath25_soc = ATH25_SOC_AR5312;
247
248 platform_device_register(&ar5312_physmap_flash);
249
250 switch (ath25_soc) {
251 case ATH25_SOC_AR5312:
252 if (!ath25_board.radio)
253 return;
254
255 if (!(config->flags & BD_WLAN0))
256 break;
257
258 ath25_add_wmac(0, AR5312_WLAN0_BASE, AR5312_IRQ_WLAN0);
259 break;
260 case ATH25_SOC_AR2312:
261 case ATH25_SOC_AR2313:
262 if (!ath25_board.radio)
263 return;
264 break;
265 default:
266 break;
267 }
268
269 if (config->flags & BD_WLAN1)
270 ath25_add_wmac(1, AR5312_WLAN1_BASE, AR5312_IRQ_WLAN1);
271}
272
273static void ar5312_restart(char *command)
274{
275 /* reset the system */
276 local_irq_disable();
277 while (1)
278 ar5312_rst_reg_write(AR5312_RESET, AR5312_RESET_SYSTEM);
279}
280
281/*
282 * This table is indexed by bits 5..4 of the CLOCKCTL1 register
283 * to determine the predevisor value.
284 */
285static unsigned clockctl1_predivide_table[4] __initdata = { 1, 2, 4, 5 };
286
287static unsigned __init ar5312_cpu_frequency(void)
288{
289 u32 scratch, devid, clock_ctl1;
290 u32 predivide_mask, multiplier_mask, doubler_mask;
291 unsigned predivide_shift, multiplier_shift;
292 unsigned predivide_select, predivisor, multiplier;
293
294 /* Trust the bootrom's idea of cpu frequency. */
295 scratch = ar5312_rst_reg_read(AR5312_SCRATCH);
296 if (scratch)
297 return scratch;
298
299 devid = ar5312_rst_reg_read(AR5312_REV);
300 devid = (devid & AR5312_REV_MAJ) >> AR5312_REV_MAJ_S;
301 if (devid == AR5312_REV_MAJ_AR2313) {
302 predivide_mask = AR2313_CLOCKCTL1_PREDIVIDE_MASK;
303 predivide_shift = AR2313_CLOCKCTL1_PREDIVIDE_SHIFT;
304 multiplier_mask = AR2313_CLOCKCTL1_MULTIPLIER_MASK;
305 multiplier_shift = AR2313_CLOCKCTL1_MULTIPLIER_SHIFT;
306 doubler_mask = AR2313_CLOCKCTL1_DOUBLER_MASK;
307 } else { /* AR5312 and AR2312 */
308 predivide_mask = AR5312_CLOCKCTL1_PREDIVIDE_MASK;
309 predivide_shift = AR5312_CLOCKCTL1_PREDIVIDE_SHIFT;
310 multiplier_mask = AR5312_CLOCKCTL1_MULTIPLIER_MASK;
311 multiplier_shift = AR5312_CLOCKCTL1_MULTIPLIER_SHIFT;
312 doubler_mask = AR5312_CLOCKCTL1_DOUBLER_MASK;
313 }
314
315 /*
316 * Clocking is derived from a fixed 40MHz input clock.
317 *
318 * cpu_freq = input_clock * MULT (where MULT is PLL multiplier)
319 * sys_freq = cpu_freq / 4 (used for APB clock, serial,
320 * flash, Timer, Watchdog Timer)
321 *
322 * cnt_freq = cpu_freq / 2 (use for CPU count/compare)
323 *
324 * So, for example, with a PLL multiplier of 5, we have
325 *
326 * cpu_freq = 200MHz
327 * sys_freq = 50MHz
328 * cnt_freq = 100MHz
329 *
330 * We compute the CPU frequency, based on PLL settings.
331 */
332
333 clock_ctl1 = ar5312_rst_reg_read(AR5312_CLOCKCTL1);
334 predivide_select = (clock_ctl1 & predivide_mask) >> predivide_shift;
335 predivisor = clockctl1_predivide_table[predivide_select];
336 multiplier = (clock_ctl1 & multiplier_mask) >> multiplier_shift;
337
338 if (clock_ctl1 & doubler_mask)
339 multiplier <<= 1;
340
341 return (40000000 / predivisor) * multiplier;
342}
343
344static inline unsigned ar5312_sys_frequency(void)
345{
346 return ar5312_cpu_frequency() / 4;
347}
348
349void __init ar5312_plat_time_init(void)
350{
351 mips_hpt_frequency = ar5312_cpu_frequency() / 2;
352}
353
354void __init ar5312_plat_mem_setup(void)
355{
356 void __iomem *sdram_base;
357 u32 memsize, memcfg, bank0_ac, bank1_ac;
358 u32 devid;
359
360 /* Detect memory size */
361 sdram_base = ioremap_nocache(AR5312_SDRAMCTL_BASE,
362 AR5312_SDRAMCTL_SIZE);
363 memcfg = __raw_readl(sdram_base + AR5312_MEM_CFG1);
364 bank0_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC0);
365 bank1_ac = ATH25_REG_MS(memcfg, AR5312_MEM_CFG1_AC1);
366 memsize = (bank0_ac ? (1 << (bank0_ac + 1)) : 0) +
367 (bank1_ac ? (1 << (bank1_ac + 1)) : 0);
368 memsize <<= 20;
369 add_memory_region(0, memsize, BOOT_MEM_RAM);
370 iounmap(sdram_base);
371
372 ar5312_rst_base = ioremap_nocache(AR5312_RST_BASE, AR5312_RST_SIZE);
373
374 devid = ar5312_rst_reg_read(AR5312_REV);
375 devid >>= AR5312_REV_WMAC_MIN_S;
376 devid &= AR5312_REV_CHIP;
377 ath25_board.devid = (u16)devid;
378
379 /* Clear any lingering AHB errors */
380 ar5312_rst_reg_read(AR5312_PROCADDR);
381 ar5312_rst_reg_read(AR5312_DMAADDR);
382 ar5312_rst_reg_write(AR5312_WDT_CTRL, AR5312_WDT_CTRL_IGNORE);
383
384 _machine_restart = ar5312_restart;
385}
386
387void __init ar5312_arch_init(void)
388{
389 unsigned irq = irq_create_mapping(ar5312_misc_irq_domain,
390 AR5312_MISC_IRQ_UART0);
391
392 ath25_serial_setup(AR5312_UART0_BASE, irq, ar5312_sys_frequency());
393}
diff --git a/arch/mips/ath25/ar5312.h b/arch/mips/ath25/ar5312.h
new file mode 100644
index 000000000000..470abb0052bd
--- /dev/null
+++ b/arch/mips/ath25/ar5312.h
@@ -0,0 +1,22 @@
1#ifndef __AR5312_H
2#define __AR5312_H
3
4#ifdef CONFIG_SOC_AR5312
5
6void ar5312_arch_init_irq(void);
7void ar5312_init_devices(void);
8void ar5312_plat_time_init(void);
9void ar5312_plat_mem_setup(void);
10void ar5312_arch_init(void);
11
12#else
13
14static inline void ar5312_arch_init_irq(void) {}
15static inline void ar5312_init_devices(void) {}
16static inline void ar5312_plat_time_init(void) {}
17static inline void ar5312_plat_mem_setup(void) {}
18static inline void ar5312_arch_init(void) {}
19
20#endif
21
22#endif /* __AR5312_H */
diff --git a/arch/mips/ath25/ar5312_regs.h b/arch/mips/ath25/ar5312_regs.h
new file mode 100644
index 000000000000..4b947f967439
--- /dev/null
+++ b/arch/mips/ath25/ar5312_regs.h
@@ -0,0 +1,224 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
7 * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
8 * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
9 */
10
11#ifndef __ASM_MACH_ATH25_AR5312_REGS_H
12#define __ASM_MACH_ATH25_AR5312_REGS_H
13
14/*
15 * IRQs
16 */
17#define AR5312_IRQ_WLAN0 (MIPS_CPU_IRQ_BASE + 2) /* C0_CAUSE: 0x0400 */
18#define AR5312_IRQ_ENET0 (MIPS_CPU_IRQ_BASE + 3) /* C0_CAUSE: 0x0800 */
19#define AR5312_IRQ_ENET1 (MIPS_CPU_IRQ_BASE + 4) /* C0_CAUSE: 0x1000 */
20#define AR5312_IRQ_WLAN1 (MIPS_CPU_IRQ_BASE + 5) /* C0_CAUSE: 0x2000 */
21#define AR5312_IRQ_MISC (MIPS_CPU_IRQ_BASE + 6) /* C0_CAUSE: 0x4000 */
22
23/*
24 * Miscellaneous interrupts, which share IP6.
25 */
26#define AR5312_MISC_IRQ_TIMER 0
27#define AR5312_MISC_IRQ_AHB_PROC 1
28#define AR5312_MISC_IRQ_AHB_DMA 2
29#define AR5312_MISC_IRQ_GPIO 3
30#define AR5312_MISC_IRQ_UART0 4
31#define AR5312_MISC_IRQ_UART0_DMA 5
32#define AR5312_MISC_IRQ_WATCHDOG 6
33#define AR5312_MISC_IRQ_LOCAL 7
34#define AR5312_MISC_IRQ_SPI 8
35#define AR5312_MISC_IRQ_COUNT 9
36
37/*
38 * Address Map
39 *
40 * The AR5312 supports 2 enet MACS, even though many reference boards only
41 * actually use 1 of them (i.e. Only MAC 0 is actually connected to an enet
42 * PHY or PHY switch. The AR2312 supports 1 enet MAC.
43 */
44#define AR5312_WLAN0_BASE 0x18000000
45#define AR5312_ENET0_BASE 0x18100000
46#define AR5312_ENET1_BASE 0x18200000
47#define AR5312_SDRAMCTL_BASE 0x18300000
48#define AR5312_SDRAMCTL_SIZE 0x00000010
49#define AR5312_FLASHCTL_BASE 0x18400000
50#define AR5312_FLASHCTL_SIZE 0x00000010
51#define AR5312_WLAN1_BASE 0x18500000
52#define AR5312_UART0_BASE 0x1c000000 /* UART MMR */
53#define AR5312_GPIO_BASE 0x1c002000
54#define AR5312_GPIO_SIZE 0x00000010
55#define AR5312_RST_BASE 0x1c003000
56#define AR5312_RST_SIZE 0x00000100
57#define AR5312_FLASH_BASE 0x1e000000
58#define AR5312_FLASH_SIZE 0x00800000
59
60/*
61 * Need these defines to determine true number of ethernet MACs
62 */
63#define AR5312_AR5312_REV2 0x0052 /* AR5312 WMAC (AP31) */
64#define AR5312_AR5312_REV7 0x0057 /* AR5312 WMAC (AP30-040) */
65#define AR5312_AR2313_REV8 0x0058 /* AR2313 WMAC (AP43-030) */
66
67/* Reset/Timer Block Address Map */
68#define AR5312_TIMER 0x0000 /* countdown timer */
69#define AR5312_RELOAD 0x0004 /* timer reload value */
70#define AR5312_WDT_CTRL 0x0008 /* watchdog cntrl */
71#define AR5312_WDT_TIMER 0x000c /* watchdog timer */
72#define AR5312_ISR 0x0010 /* Intr Status Reg */
73#define AR5312_IMR 0x0014 /* Intr Mask Reg */
74#define AR5312_RESET 0x0020
75#define AR5312_CLOCKCTL1 0x0064
76#define AR5312_SCRATCH 0x006c
77#define AR5312_PROCADDR 0x0070
78#define AR5312_PROC1 0x0074
79#define AR5312_DMAADDR 0x0078
80#define AR5312_DMA1 0x007c
81#define AR5312_ENABLE 0x0080 /* interface enb */
82#define AR5312_REV 0x0090 /* revision */
83
84/* AR5312_WDT_CTRL register bit field definitions */
85#define AR5312_WDT_CTRL_IGNORE 0x00000000 /* ignore expiration */
86#define AR5312_WDT_CTRL_NMI 0x00000001
87#define AR5312_WDT_CTRL_RESET 0x00000002
88
89/* AR5312_ISR register bit field definitions */
90#define AR5312_ISR_TIMER 0x00000001
91#define AR5312_ISR_AHBPROC 0x00000002
92#define AR5312_ISR_AHBDMA 0x00000004
93#define AR5312_ISR_GPIO 0x00000008
94#define AR5312_ISR_UART0 0x00000010
95#define AR5312_ISR_UART0DMA 0x00000020
96#define AR5312_ISR_WD 0x00000040
97#define AR5312_ISR_LOCAL 0x00000080
98
99/* AR5312_RESET register bit field definitions */
100#define AR5312_RESET_SYSTEM 0x00000001 /* cold reset full system */
101#define AR5312_RESET_PROC 0x00000002 /* cold reset MIPS core */
102#define AR5312_RESET_WLAN0 0x00000004 /* cold reset WLAN MAC/BB */
103#define AR5312_RESET_EPHY0 0x00000008 /* cold reset ENET0 phy */
104#define AR5312_RESET_EPHY1 0x00000010 /* cold reset ENET1 phy */
105#define AR5312_RESET_ENET0 0x00000020 /* cold reset ENET0 MAC */
106#define AR5312_RESET_ENET1 0x00000040 /* cold reset ENET1 MAC */
107#define AR5312_RESET_UART0 0x00000100 /* cold reset UART0 */
108#define AR5312_RESET_WLAN1 0x00000200 /* cold reset WLAN MAC/BB */
109#define AR5312_RESET_APB 0x00000400 /* cold reset APB ar5312 */
110#define AR5312_RESET_WARM_PROC 0x00001000 /* warm reset MIPS core */
111#define AR5312_RESET_WARM_WLAN0_MAC 0x00002000 /* warm reset WLAN0 MAC */
112#define AR5312_RESET_WARM_WLAN0_BB 0x00004000 /* warm reset WLAN0 BB */
113#define AR5312_RESET_NMI 0x00010000 /* send an NMI to the CPU */
114#define AR5312_RESET_WARM_WLAN1_MAC 0x00020000 /* warm reset WLAN1 MAC */
115#define AR5312_RESET_WARM_WLAN1_BB 0x00040000 /* warm reset WLAN1 BB */
116#define AR5312_RESET_LOCAL_BUS 0x00080000 /* reset local bus */
117#define AR5312_RESET_WDOG 0x00100000 /* last reset was a wdt */
118
119#define AR5312_RESET_WMAC0_BITS (AR5312_RESET_WLAN0 |\
120 AR5312_RESET_WARM_WLAN0_MAC |\
121 AR5312_RESET_WARM_WLAN0_BB)
122
123#define AR5312_RESET_WMAC1_BITS (AR5312_RESET_WLAN1 |\
124 AR5312_RESET_WARM_WLAN1_MAC |\
125 AR5312_RESET_WARM_WLAN1_BB)
126
127/* AR5312_CLOCKCTL1 register bit field definitions */
128#define AR5312_CLOCKCTL1_PREDIVIDE_MASK 0x00000030
129#define AR5312_CLOCKCTL1_PREDIVIDE_SHIFT 4
130#define AR5312_CLOCKCTL1_MULTIPLIER_MASK 0x00001f00
131#define AR5312_CLOCKCTL1_MULTIPLIER_SHIFT 8
132#define AR5312_CLOCKCTL1_DOUBLER_MASK 0x00010000
133
134/* Valid for AR5312 and AR2312 */
135#define AR5312_CLOCKCTL1_PREDIVIDE_MASK 0x00000030
136#define AR5312_CLOCKCTL1_PREDIVIDE_SHIFT 4
137#define AR5312_CLOCKCTL1_MULTIPLIER_MASK 0x00001f00
138#define AR5312_CLOCKCTL1_MULTIPLIER_SHIFT 8
139#define AR5312_CLOCKCTL1_DOUBLER_MASK 0x00010000
140
141/* Valid for AR2313 */
142#define AR2313_CLOCKCTL1_PREDIVIDE_MASK 0x00003000
143#define AR2313_CLOCKCTL1_PREDIVIDE_SHIFT 12
144#define AR2313_CLOCKCTL1_MULTIPLIER_MASK 0x001f0000
145#define AR2313_CLOCKCTL1_MULTIPLIER_SHIFT 16
146#define AR2313_CLOCKCTL1_DOUBLER_MASK 0x00000000
147
148/* AR5312_ENABLE register bit field definitions */
149#define AR5312_ENABLE_WLAN0 0x00000001
150#define AR5312_ENABLE_ENET0 0x00000002
151#define AR5312_ENABLE_ENET1 0x00000004
152#define AR5312_ENABLE_UART_AND_WLAN1_PIO 0x00000008/* UART & WLAN1 PIO */
153#define AR5312_ENABLE_WLAN1_DMA 0x00000010/* WLAN1 DMAs */
154#define AR5312_ENABLE_WLAN1 (AR5312_ENABLE_UART_AND_WLAN1_PIO |\
155 AR5312_ENABLE_WLAN1_DMA)
156
157/* AR5312_REV register bit field definitions */
158#define AR5312_REV_WMAC_MAJ 0x0000f000
159#define AR5312_REV_WMAC_MAJ_S 12
160#define AR5312_REV_WMAC_MIN 0x00000f00
161#define AR5312_REV_WMAC_MIN_S 8
162#define AR5312_REV_MAJ 0x000000f0
163#define AR5312_REV_MAJ_S 4
164#define AR5312_REV_MIN 0x0000000f
165#define AR5312_REV_MIN_S 0
166#define AR5312_REV_CHIP (AR5312_REV_MAJ|AR5312_REV_MIN)
167
168/* Major revision numbers, bits 7..4 of Revision ID register */
169#define AR5312_REV_MAJ_AR5312 0x4
170#define AR5312_REV_MAJ_AR2313 0x5
171
172/* Minor revision numbers, bits 3..0 of Revision ID register */
173#define AR5312_REV_MIN_DUAL 0x0 /* Dual WLAN version */
174#define AR5312_REV_MIN_SINGLE 0x1 /* Single WLAN version */
175
176/*
177 * ARM Flash Controller -- 3 flash banks with either x8 or x16 devices
178 */
179#define AR5312_FLASHCTL0 0x0000
180#define AR5312_FLASHCTL1 0x0004
181#define AR5312_FLASHCTL2 0x0008
182
183/* AR5312_FLASHCTL register bit field definitions */
184#define AR5312_FLASHCTL_IDCY 0x0000000f /* Idle cycle turnaround time */
185#define AR5312_FLASHCTL_IDCY_S 0
186#define AR5312_FLASHCTL_WST1 0x000003e0 /* Wait state 1 */
187#define AR5312_FLASHCTL_WST1_S 5
188#define AR5312_FLASHCTL_RBLE 0x00000400 /* Read byte lane enable */
189#define AR5312_FLASHCTL_WST2 0x0000f800 /* Wait state 2 */
190#define AR5312_FLASHCTL_WST2_S 11
191#define AR5312_FLASHCTL_AC 0x00070000 /* Flash addr check (added) */
192#define AR5312_FLASHCTL_AC_S 16
193#define AR5312_FLASHCTL_AC_128K 0x00000000
194#define AR5312_FLASHCTL_AC_256K 0x00010000
195#define AR5312_FLASHCTL_AC_512K 0x00020000
196#define AR5312_FLASHCTL_AC_1M 0x00030000
197#define AR5312_FLASHCTL_AC_2M 0x00040000
198#define AR5312_FLASHCTL_AC_4M 0x00050000
199#define AR5312_FLASHCTL_AC_8M 0x00060000
200#define AR5312_FLASHCTL_AC_RES 0x00070000 /* 16MB is not supported */
201#define AR5312_FLASHCTL_E 0x00080000 /* Flash bank enable (added) */
202#define AR5312_FLASHCTL_BUSERR 0x01000000 /* Bus transfer error flag */
203#define AR5312_FLASHCTL_WPERR 0x02000000 /* Write protect error flag */
204#define AR5312_FLASHCTL_WP 0x04000000 /* Write protect */
205#define AR5312_FLASHCTL_BM 0x08000000 /* Burst mode */
206#define AR5312_FLASHCTL_MW 0x30000000 /* Mem width */
207#define AR5312_FLASHCTL_MW8 0x00000000 /* Mem width x8 */
208#define AR5312_FLASHCTL_MW16 0x10000000 /* Mem width x16 */
209#define AR5312_FLASHCTL_MW32 0x20000000 /* Mem width x32 (not supp) */
210#define AR5312_FLASHCTL_ATNR 0x00000000 /* Access == no retry */
211#define AR5312_FLASHCTL_ATR 0x80000000 /* Access == retry every */
212#define AR5312_FLASHCTL_ATR4 0xc0000000 /* Access == retry every 4 */
213
214/*
215 * ARM SDRAM Controller -- just enough to determine memory size
216 */
217#define AR5312_MEM_CFG1 0x0004
218
219#define AR5312_MEM_CFG1_AC0_M 0x00000700 /* bank 0: SDRAM addr check */
220#define AR5312_MEM_CFG1_AC0_S 8
221#define AR5312_MEM_CFG1_AC1_M 0x00007000 /* bank 1: SDRAM addr check */
222#define AR5312_MEM_CFG1_AC1_S 12
223
224#endif /* __ASM_MACH_ATH25_AR5312_REGS_H */
diff --git a/arch/mips/ath25/board.c b/arch/mips/ath25/board.c
new file mode 100644
index 000000000000..b8bb78282d6a
--- /dev/null
+++ b/arch/mips/ath25/board.c
@@ -0,0 +1,234 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
7 * Copyright (C) 2006 FON Technology, SL.
8 * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
9 * Copyright (C) 2006-2009 Felix Fietkau <nbd@openwrt.org>
10 */
11
12#include <linux/init.h>
13#include <linux/interrupt.h>
14#include <asm/irq_cpu.h>
15#include <asm/reboot.h>
16#include <asm/bootinfo.h>
17#include <asm/time.h>
18
19#include <ath25_platform.h>
20#include "devices.h"
21#include "ar5312.h"
22#include "ar2315.h"
23
24void (*ath25_irq_dispatch)(void);
25
26static inline bool check_radio_magic(const void __iomem *addr)
27{
28 addr += 0x7a; /* offset for flash magic */
29 return (__raw_readb(addr) == 0x5a) && (__raw_readb(addr + 1) == 0xa5);
30}
31
32static inline bool check_notempty(const void __iomem *addr)
33{
34 return __raw_readl(addr) != 0xffffffff;
35}
36
37static inline bool check_board_data(const void __iomem *addr, bool broken)
38{
39 /* config magic found */
40 if (__raw_readl(addr) == ATH25_BD_MAGIC)
41 return true;
42
43 if (!broken)
44 return false;
45
46 /* broken board data detected, use radio data to find the
47 * offset, user will fix this */
48
49 if (check_radio_magic(addr + 0x1000))
50 return true;
51 if (check_radio_magic(addr + 0xf8))
52 return true;
53
54 return false;
55}
56
57static const void __iomem * __init find_board_config(const void __iomem *limit,
58 const bool broken)
59{
60 const void __iomem *addr;
61 const void __iomem *begin = limit - 0x1000;
62 const void __iomem *end = limit - 0x30000;
63
64 for (addr = begin; addr >= end; addr -= 0x1000)
65 if (check_board_data(addr, broken))
66 return addr;
67
68 return NULL;
69}
70
71static const void __iomem * __init find_radio_config(const void __iomem *limit,
72 const void __iomem *bcfg)
73{
74 const void __iomem *rcfg, *begin, *end;
75
76 /*
77 * Now find the start of Radio Configuration data, using heuristics:
78 * Search forward from Board Configuration data by 0x1000 bytes
79 * at a time until we find non-0xffffffff.
80 */
81 begin = bcfg + 0x1000;
82 end = limit;
83 for (rcfg = begin; rcfg < end; rcfg += 0x1000)
84 if (check_notempty(rcfg) && check_radio_magic(rcfg))
85 return rcfg;
86
87 /* AR2316 relocates radio config to new location */
88 begin = bcfg + 0xf8;
89 end = limit - 0x1000 + 0xf8;
90 for (rcfg = begin; rcfg < end; rcfg += 0x1000)
91 if (check_notempty(rcfg) && check_radio_magic(rcfg))
92 return rcfg;
93
94 return NULL;
95}
96
97/*
98 * NB: Search region size could be larger than the actual flash size,
99 * but this shouldn't be a problem here, because the flash
100 * will simply be mapped multiple times.
101 */
102int __init ath25_find_config(phys_addr_t base, unsigned long size)
103{
104 const void __iomem *flash_base, *flash_limit;
105 struct ath25_boarddata *config;
106 unsigned int rcfg_size;
107 int broken_boarddata = 0;
108 const void __iomem *bcfg, *rcfg;
109 u8 *board_data;
110 u8 *radio_data;
111 u8 *mac_addr;
112 u32 offset;
113
114 flash_base = ioremap_nocache(base, size);
115 flash_limit = flash_base + size;
116
117 ath25_board.config = NULL;
118 ath25_board.radio = NULL;
119
120 /* Copy the board and radio data to RAM, because accessing the mapped
121 * memory of the flash directly after booting is not safe */
122
123 /* Try to find valid board and radio data */
124 bcfg = find_board_config(flash_limit, false);
125
126 /* If that fails, try to at least find valid radio data */
127 if (!bcfg) {
128 bcfg = find_board_config(flash_limit, true);
129 broken_boarddata = 1;
130 }
131
132 if (!bcfg) {
133 pr_warn("WARNING: No board configuration data found!\n");
134 goto error;
135 }
136
137 board_data = kzalloc(BOARD_CONFIG_BUFSZ, GFP_KERNEL);
138 ath25_board.config = (struct ath25_boarddata *)board_data;
139 memcpy_fromio(board_data, bcfg, 0x100);
140 if (broken_boarddata) {
141 pr_warn("WARNING: broken board data detected\n");
142 config = ath25_board.config;
143 if (is_zero_ether_addr(config->enet0_mac)) {
144 pr_info("Fixing up empty mac addresses\n");
145 config->reset_config_gpio = 0xffff;
146 config->sys_led_gpio = 0xffff;
147 random_ether_addr(config->wlan0_mac);
148 config->wlan0_mac[0] &= ~0x06;
149 random_ether_addr(config->enet0_mac);
150 random_ether_addr(config->enet1_mac);
151 }
152 }
153
154 /* Radio config starts 0x100 bytes after board config, regardless
155 * of what the physical layout on the flash chip looks like */
156
157 rcfg = find_radio_config(flash_limit, bcfg);
158 if (!rcfg) {
159 pr_warn("WARNING: Could not find Radio Configuration data\n");
160 goto error;
161 }
162
163 radio_data = board_data + 0x100 + ((rcfg - bcfg) & 0xfff);
164 ath25_board.radio = radio_data;
165 offset = radio_data - board_data;
166 pr_info("Radio config found at offset 0x%x (0x%x)\n", rcfg - bcfg,
167 offset);
168 rcfg_size = BOARD_CONFIG_BUFSZ - offset;
169 memcpy_fromio(radio_data, rcfg, rcfg_size);
170
171 mac_addr = &radio_data[0x1d * 2];
172 if (is_broadcast_ether_addr(mac_addr)) {
173 pr_info("Radio MAC is blank; using board-data\n");
174 ether_addr_copy(mac_addr, ath25_board.config->wlan0_mac);
175 }
176
177 iounmap(flash_base);
178
179 return 0;
180
181error:
182 iounmap(flash_base);
183 return -ENODEV;
184}
185
186static void ath25_halt(void)
187{
188 local_irq_disable();
189 unreachable();
190}
191
192void __init plat_mem_setup(void)
193{
194 _machine_halt = ath25_halt;
195 pm_power_off = ath25_halt;
196
197 if (is_ar5312())
198 ar5312_plat_mem_setup();
199 else
200 ar2315_plat_mem_setup();
201
202 /* Disable data watchpoints */
203 write_c0_watchlo0(0);
204}
205
206asmlinkage void plat_irq_dispatch(void)
207{
208 ath25_irq_dispatch();
209}
210
211void __init plat_time_init(void)
212{
213 if (is_ar5312())
214 ar5312_plat_time_init();
215 else
216 ar2315_plat_time_init();
217}
218
219unsigned int __cpuinit get_c0_compare_int(void)
220{
221 return CP0_LEGACY_COMPARE_IRQ;
222}
223
224void __init arch_init_irq(void)
225{
226 clear_c0_status(ST0_IM);
227 mips_cpu_irq_init();
228
229 /* Initialize interrupt controllers */
230 if (is_ar5312())
231 ar5312_arch_init_irq();
232 else
233 ar2315_arch_init_irq();
234}
diff --git a/arch/mips/ath25/devices.c b/arch/mips/ath25/devices.c
new file mode 100644
index 000000000000..7a64567d1ac3
--- /dev/null
+++ b/arch/mips/ath25/devices.c
@@ -0,0 +1,125 @@
1#include <linux/kernel.h>
2#include <linux/init.h>
3#include <linux/serial_8250.h>
4#include <linux/platform_device.h>
5#include <asm/bootinfo.h>
6
7#include <ath25_platform.h>
8#include "devices.h"
9#include "ar5312.h"
10#include "ar2315.h"
11
12struct ar231x_board_config ath25_board;
13enum ath25_soc_type ath25_soc = ATH25_SOC_UNKNOWN;
14
15static struct resource ath25_wmac0_res[] = {
16 {
17 .name = "wmac0_membase",
18 .flags = IORESOURCE_MEM,
19 },
20 {
21 .name = "wmac0_irq",
22 .flags = IORESOURCE_IRQ,
23 }
24};
25
26static struct resource ath25_wmac1_res[] = {
27 {
28 .name = "wmac1_membase",
29 .flags = IORESOURCE_MEM,
30 },
31 {
32 .name = "wmac1_irq",
33 .flags = IORESOURCE_IRQ,
34 }
35};
36
37static struct platform_device ath25_wmac[] = {
38 {
39 .id = 0,
40 .name = "ar231x-wmac",
41 .resource = ath25_wmac0_res,
42 .num_resources = ARRAY_SIZE(ath25_wmac0_res),
43 .dev.platform_data = &ath25_board,
44 },
45 {
46 .id = 1,
47 .name = "ar231x-wmac",
48 .resource = ath25_wmac1_res,
49 .num_resources = ARRAY_SIZE(ath25_wmac1_res),
50 .dev.platform_data = &ath25_board,
51 },
52};
53
54static const char * const soc_type_strings[] = {
55 [ATH25_SOC_AR5312] = "Atheros AR5312",
56 [ATH25_SOC_AR2312] = "Atheros AR2312",
57 [ATH25_SOC_AR2313] = "Atheros AR2313",
58 [ATH25_SOC_AR2315] = "Atheros AR2315",
59 [ATH25_SOC_AR2316] = "Atheros AR2316",
60 [ATH25_SOC_AR2317] = "Atheros AR2317",
61 [ATH25_SOC_AR2318] = "Atheros AR2318",
62 [ATH25_SOC_UNKNOWN] = "Atheros (unknown)",
63};
64
65const char *get_system_type(void)
66{
67 if ((ath25_soc >= ARRAY_SIZE(soc_type_strings)) ||
68 !soc_type_strings[ath25_soc])
69 return soc_type_strings[ATH25_SOC_UNKNOWN];
70 return soc_type_strings[ath25_soc];
71}
72
73void __init ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk)
74{
75 struct uart_port s;
76
77 memset(&s, 0, sizeof(s));
78
79 s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP;
80 s.iotype = UPIO_MEM32;
81 s.irq = irq;
82 s.regshift = 2;
83 s.mapbase = mapbase;
84 s.uartclk = uartclk;
85
86 early_serial_setup(&s);
87}
88
89int __init ath25_add_wmac(int nr, u32 base, int irq)
90{
91 struct resource *res;
92
93 ath25_wmac[nr].dev.platform_data = &ath25_board;
94 res = &ath25_wmac[nr].resource[0];
95 res->start = base;
96 res->end = base + 0x10000 - 1;
97 res++;
98 res->start = irq;
99 res->end = irq;
100 return platform_device_register(&ath25_wmac[nr]);
101}
102
103static int __init ath25_register_devices(void)
104{
105 if (is_ar5312())
106 ar5312_init_devices();
107 else
108 ar2315_init_devices();
109
110 return 0;
111}
112
113device_initcall(ath25_register_devices);
114
115static int __init ath25_arch_init(void)
116{
117 if (is_ar5312())
118 ar5312_arch_init();
119 else
120 ar2315_arch_init();
121
122 return 0;
123}
124
125arch_initcall(ath25_arch_init);
diff --git a/arch/mips/ath25/devices.h b/arch/mips/ath25/devices.h
new file mode 100644
index 000000000000..04d414115356
--- /dev/null
+++ b/arch/mips/ath25/devices.h
@@ -0,0 +1,43 @@
1#ifndef __ATH25_DEVICES_H
2#define __ATH25_DEVICES_H
3
4#include <linux/cpu.h>
5
6#define ATH25_REG_MS(_val, _field) (((_val) & _field##_M) >> _field##_S)
7
8#define ATH25_IRQ_CPU_CLOCK (MIPS_CPU_IRQ_BASE + 7) /* C0_CAUSE: 0x8000 */
9
10enum ath25_soc_type {
11 /* handled by ar5312.c */
12 ATH25_SOC_AR2312,
13 ATH25_SOC_AR2313,
14 ATH25_SOC_AR5312,
15
16 /* handled by ar2315.c */
17 ATH25_SOC_AR2315,
18 ATH25_SOC_AR2316,
19 ATH25_SOC_AR2317,
20 ATH25_SOC_AR2318,
21
22 ATH25_SOC_UNKNOWN
23};
24
25extern enum ath25_soc_type ath25_soc;
26extern struct ar231x_board_config ath25_board;
27extern void (*ath25_irq_dispatch)(void);
28
29int ath25_find_config(phys_addr_t offset, unsigned long size);
30void ath25_serial_setup(u32 mapbase, int irq, unsigned int uartclk);
31int ath25_add_wmac(int nr, u32 base, int irq);
32
33static inline bool is_ar2315(void)
34{
35 return (current_cpu_data.cputype == CPU_4KEC);
36}
37
38static inline bool is_ar5312(void)
39{
40 return !is_ar2315();
41}
42
43#endif
diff --git a/arch/mips/ath25/early_printk.c b/arch/mips/ath25/early_printk.c
new file mode 100644
index 000000000000..36035b628161
--- /dev/null
+++ b/arch/mips/ath25/early_printk.c
@@ -0,0 +1,44 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2010 Gabor Juhos <juhosg@openwrt.org>
7 */
8
9#include <linux/mm.h>
10#include <linux/io.h>
11#include <linux/serial_reg.h>
12
13#include "devices.h"
14#include "ar2315_regs.h"
15#include "ar5312_regs.h"
16
17static inline void prom_uart_wr(void __iomem *base, unsigned reg,
18 unsigned char ch)
19{
20 __raw_writel(ch, base + 4 * reg);
21}
22
23static inline unsigned char prom_uart_rr(void __iomem *base, unsigned reg)
24{
25 return __raw_readl(base + 4 * reg);
26}
27
28void prom_putchar(unsigned char ch)
29{
30 static void __iomem *base;
31
32 if (unlikely(base == NULL)) {
33 if (is_ar2315())
34 base = (void __iomem *)(KSEG1ADDR(AR2315_UART0_BASE));
35 else
36 base = (void __iomem *)(KSEG1ADDR(AR5312_UART0_BASE));
37 }
38
39 while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
40 ;
41 prom_uart_wr(base, UART_TX, ch);
42 while ((prom_uart_rr(base, UART_LSR) & UART_LSR_THRE) == 0)
43 ;
44}
diff --git a/arch/mips/ath25/prom.c b/arch/mips/ath25/prom.c
new file mode 100644
index 000000000000..edf82be8870d
--- /dev/null
+++ b/arch/mips/ath25/prom.c
@@ -0,0 +1,26 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright MontaVista Software Inc
7 * Copyright (C) 2003 Atheros Communications, Inc., All Rights Reserved.
8 * Copyright (C) 2006 FON Technology, SL.
9 * Copyright (C) 2006 Imre Kaloz <kaloz@openwrt.org>
10 * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
11 */
12
13/*
14 * Prom setup file for AR5312/AR231x SoCs
15 */
16
17#include <linux/init.h>
18#include <asm/bootinfo.h>
19
20void __init prom_init(void)
21{
22}
23
24void __init prom_free_prom_memory(void)
25{
26}
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 9c0e1761773f..6adae366f11a 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -359,7 +359,6 @@ void __init arch_init_irq(void)
359 BUG(); 359 BUG();
360 } 360 }
361 361
362 cp0_perfcount_irq = ATH79_MISC_IRQ(5);
363 mips_cpu_irq_init(); 362 mips_cpu_irq_init();
364 ath79_misc_irq_init(); 363 ath79_misc_irq_init();
365 364
diff --git a/arch/mips/ath79/prom.c b/arch/mips/ath79/prom.c
index e9cbd7c2918f..e1fe63051136 100644
--- a/arch/mips/ath79/prom.c
+++ b/arch/mips/ath79/prom.c
@@ -13,42 +13,24 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/io.h> 14#include <linux/io.h>
15#include <linux/string.h> 15#include <linux/string.h>
16#include <linux/initrd.h>
16 17
17#include <asm/bootinfo.h> 18#include <asm/bootinfo.h>
18#include <asm/addrspace.h> 19#include <asm/addrspace.h>
20#include <asm/fw/fw.h>
19 21
20#include "common.h" 22#include "common.h"
21 23
22static inline int is_valid_ram_addr(void *addr)
23{
24 if (((u32) addr > KSEG0) &&
25 ((u32) addr < (KSEG0 + ATH79_MEM_SIZE_MAX)))
26 return 1;
27
28 if (((u32) addr > KSEG1) &&
29 ((u32) addr < (KSEG1 + ATH79_MEM_SIZE_MAX)))
30 return 1;
31
32 return 0;
33}
34
35static __init void ath79_prom_init_cmdline(int argc, char **argv)
36{
37 int i;
38
39 if (!is_valid_ram_addr(argv))
40 return;
41
42 for (i = 0; i < argc; i++)
43 if (is_valid_ram_addr(argv[i])) {
44 strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
45 strlcat(arcs_cmdline, argv[i], sizeof(arcs_cmdline));
46 }
47}
48
49void __init prom_init(void) 24void __init prom_init(void)
50{ 25{
51 ath79_prom_init_cmdline(fw_arg0, (char **)fw_arg1); 26 fw_init_cmdline();
27
28 /* Read the initrd address from the firmware environment */
29 initrd_start = fw_getenvl("initrd_start");
30 if (initrd_start) {
31 initrd_start = KSEG0ADDR(initrd_start);
32 initrd_end = initrd_start + fw_getenvl("initrd_size");
33 }
52} 34}
53 35
54void __init prom_free_prom_memory(void) 36void __init prom_free_prom_memory(void)
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 64807a4809d0..a73c93c3d44a 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -182,6 +182,11 @@ const char *get_system_type(void)
182 return ath79_sys_type; 182 return ath79_sys_type;
183} 183}
184 184
185int get_c0_perfcount_int(void)
186{
187 return ATH79_MISC_IRQ(5);
188}
189
185unsigned int get_c0_compare_int(void) 190unsigned int get_c0_compare_int(void)
186{ 191{
187 return CP0_LEGACY_COMPARE_IRQ; 192 return CP0_LEGACY_COMPARE_IRQ;
diff --git a/arch/mips/bcm3384/Makefile b/arch/mips/bcm3384/Makefile
new file mode 100644
index 000000000000..a393955cba08
--- /dev/null
+++ b/arch/mips/bcm3384/Makefile
@@ -0,0 +1 @@
obj-y += setup.o irq.o dma.o
diff --git a/arch/mips/bcm3384/Platform b/arch/mips/bcm3384/Platform
new file mode 100644
index 000000000000..8e1ca0819e1b
--- /dev/null
+++ b/arch/mips/bcm3384/Platform
@@ -0,0 +1,7 @@
1#
2# Broadcom BCM3384 boards
3#
4platform-$(CONFIG_BCM3384) += bcm3384/
5cflags-$(CONFIG_BCM3384) += \
6 -I$(srctree)/arch/mips/include/asm/mach-bcm3384/
7load-$(CONFIG_BCM3384) := 0xffffffff80010000
diff --git a/arch/mips/bcm3384/dma.c b/arch/mips/bcm3384/dma.c
new file mode 100644
index 000000000000..ea42012fd4f5
--- /dev/null
+++ b/arch/mips/bcm3384/dma.c
@@ -0,0 +1,81 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
7 */
8
9#include <linux/device.h>
10#include <linux/dma-direction.h>
11#include <linux/dma-mapping.h>
12#include <linux/init.h>
13#include <linux/mm.h>
14#include <linux/of.h>
15#include <linux/pci.h>
16#include <linux/types.h>
17#include <dma-coherence.h>
18
19/*
20 * BCM3384 has configurable address translation windows which allow the
21 * peripherals' DMA addresses to be different from the Zephyr-visible
22 * physical addresses. e.g. usb_dma_addr = zephyr_pa ^ 0x08000000
23 *
24 * If our DT "memory" node has a "dma-xor-mask" property we will enable this
25 * translation using the provided offset.
26 */
27static u32 bcm3384_dma_xor_mask;
28static u32 bcm3384_dma_xor_limit = 0xffffffff;
29
30/*
31 * PCI collapses the memory hole at 0x10000000 - 0x1fffffff.
32 * On systems with a dma-xor-mask, this range is guaranteed to live above
33 * the dma-xor-limit.
34 */
35#define BCM3384_MEM_HOLE_PA 0x10000000
36#define BCM3384_MEM_HOLE_SIZE 0x10000000
37
38static dma_addr_t bcm3384_phys_to_dma(struct device *dev, phys_addr_t pa)
39{
40 if (dev && dev_is_pci(dev) &&
41 pa >= (BCM3384_MEM_HOLE_PA + BCM3384_MEM_HOLE_SIZE))
42 return pa - BCM3384_MEM_HOLE_SIZE;
43 if (pa <= bcm3384_dma_xor_limit)
44 return pa ^ bcm3384_dma_xor_mask;
45 return pa;
46}
47
48dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size)
49{
50 return bcm3384_phys_to_dma(dev, virt_to_phys(addr));
51}
52
53dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page)
54{
55 return bcm3384_phys_to_dma(dev, page_to_phys(page));
56}
57
58unsigned long plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
59{
60 if (dev && dev_is_pci(dev) &&
61 dma_addr >= BCM3384_MEM_HOLE_PA)
62 return dma_addr + BCM3384_MEM_HOLE_SIZE;
63 if ((dma_addr ^ bcm3384_dma_xor_mask) <= bcm3384_dma_xor_limit)
64 return dma_addr ^ bcm3384_dma_xor_mask;
65 return dma_addr;
66}
67
68static int __init bcm3384_init_dma_xor(void)
69{
70 struct device_node *np = of_find_node_by_type(NULL, "memory");
71
72 if (!np)
73 return 0;
74
75 of_property_read_u32(np, "dma-xor-mask", &bcm3384_dma_xor_mask);
76 of_property_read_u32(np, "dma-xor-limit", &bcm3384_dma_xor_limit);
77
78 of_node_put(np);
79 return 0;
80}
81arch_initcall(bcm3384_init_dma_xor);
diff --git a/arch/mips/bcm3384/irq.c b/arch/mips/bcm3384/irq.c
new file mode 100644
index 000000000000..0fb5134fb832
--- /dev/null
+++ b/arch/mips/bcm3384/irq.c
@@ -0,0 +1,193 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Partially based on arch/mips/ralink/irq.c
7 *
8 * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
9 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
10 * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
11 */
12
13#include <linux/io.h>
14#include <linux/bitops.h>
15#include <linux/of_platform.h>
16#include <linux/of_address.h>
17#include <linux/of_irq.h>
18#include <linux/irqdomain.h>
19#include <linux/interrupt.h>
20#include <linux/slab.h>
21#include <linux/spinlock.h>
22
23#include <asm/bmips.h>
24#include <asm/irq_cpu.h>
25#include <asm/mipsregs.h>
26
27/* INTC register offsets */
28#define INTC_REG_ENABLE 0x00
29#define INTC_REG_STATUS 0x04
30
31#define MAX_WORDS 2
32#define IRQS_PER_WORD 32
33
34struct bcm3384_intc {
35 int n_words;
36 void __iomem *reg[MAX_WORDS];
37 u32 enable[MAX_WORDS];
38 spinlock_t lock;
39};
40
41static void bcm3384_intc_irq_unmask(struct irq_data *d)
42{
43 struct bcm3384_intc *priv = d->domain->host_data;
44 unsigned long flags;
45 int idx = d->hwirq / IRQS_PER_WORD;
46 int bit = d->hwirq % IRQS_PER_WORD;
47
48 spin_lock_irqsave(&priv->lock, flags);
49 priv->enable[idx] |= BIT(bit);
50 __raw_writel(priv->enable[idx], priv->reg[idx] + INTC_REG_ENABLE);
51 spin_unlock_irqrestore(&priv->lock, flags);
52}
53
54static void bcm3384_intc_irq_mask(struct irq_data *d)
55{
56 struct bcm3384_intc *priv = d->domain->host_data;
57 unsigned long flags;
58 int idx = d->hwirq / IRQS_PER_WORD;
59 int bit = d->hwirq % IRQS_PER_WORD;
60
61 spin_lock_irqsave(&priv->lock, flags);
62 priv->enable[idx] &= ~BIT(bit);
63 __raw_writel(priv->enable[idx], priv->reg[idx] + INTC_REG_ENABLE);
64 spin_unlock_irqrestore(&priv->lock, flags);
65}
66
67static struct irq_chip bcm3384_intc_irq_chip = {
68 .name = "INTC",
69 .irq_unmask = bcm3384_intc_irq_unmask,
70 .irq_mask = bcm3384_intc_irq_mask,
71 .irq_mask_ack = bcm3384_intc_irq_mask,
72};
73
74unsigned int get_c0_compare_int(void)
75{
76 return CP0_LEGACY_COMPARE_IRQ;
77}
78
79static void bcm3384_intc_irq_handler(unsigned int irq, struct irq_desc *desc)
80{
81 struct irq_domain *domain = irq_get_handler_data(irq);
82 struct bcm3384_intc *priv = domain->host_data;
83 unsigned long flags;
84 unsigned int idx;
85
86 for (idx = 0; idx < priv->n_words; idx++) {
87 unsigned long pending;
88 int hwirq;
89
90 spin_lock_irqsave(&priv->lock, flags);
91 pending = __raw_readl(priv->reg[idx] + INTC_REG_STATUS) &
92 priv->enable[idx];
93 spin_unlock_irqrestore(&priv->lock, flags);
94
95 for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) {
96 generic_handle_irq(irq_find_mapping(domain,
97 hwirq + idx * IRQS_PER_WORD));
98 }
99 }
100}
101
102asmlinkage void plat_irq_dispatch(void)
103{
104 unsigned long pending =
105 (read_c0_status() & read_c0_cause() & ST0_IM) >> STATUSB_IP0;
106 int bit;
107
108 for_each_set_bit(bit, &pending, 8)
109 do_IRQ(MIPS_CPU_IRQ_BASE + bit);
110}
111
112static int intc_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
113{
114 irq_set_chip_and_handler(irq, &bcm3384_intc_irq_chip, handle_level_irq);
115 return 0;
116}
117
118static const struct irq_domain_ops irq_domain_ops = {
119 .xlate = irq_domain_xlate_onecell,
120 .map = intc_map,
121};
122
123static int __init ioremap_one_pair(struct bcm3384_intc *priv,
124 struct device_node *node,
125 int idx)
126{
127 struct resource res;
128
129 if (of_address_to_resource(node, idx, &res))
130 return 0;
131
132 if (request_mem_region(res.start, resource_size(&res),
133 res.name) < 0)
134 pr_err("Failed to request INTC register region\n");
135
136 priv->reg[idx] = ioremap_nocache(res.start, resource_size(&res));
137 if (!priv->reg[idx])
138 panic("Failed to ioremap INTC register range");
139
140 /* start up with everything masked before we hook the parent IRQ */
141 __raw_writel(0, priv->reg[idx] + INTC_REG_ENABLE);
142 priv->enable[idx] = 0;
143
144 return IRQS_PER_WORD;
145}
146
147static int __init intc_of_init(struct device_node *node,
148 struct device_node *parent)
149{
150 struct irq_domain *domain;
151 unsigned int parent_irq, n_irqs = 0;
152 struct bcm3384_intc *priv;
153
154 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
155 if (!priv)
156 panic("Failed to allocate bcm3384_intc struct");
157
158 spin_lock_init(&priv->lock);
159
160 parent_irq = irq_of_parse_and_map(node, 0);
161 if (!parent_irq)
162 panic("Failed to get INTC IRQ");
163
164 n_irqs += ioremap_one_pair(priv, node, 0);
165 n_irqs += ioremap_one_pair(priv, node, 1);
166
167 if (!n_irqs)
168 panic("Failed to map INTC registers");
169
170 priv->n_words = n_irqs / IRQS_PER_WORD;
171 domain = irq_domain_add_linear(node, n_irqs, &irq_domain_ops, priv);
172 if (!domain)
173 panic("Failed to add irqdomain");
174
175 irq_set_chained_handler(parent_irq, bcm3384_intc_irq_handler);
176 irq_set_handler_data(parent_irq, domain);
177
178 return 0;
179}
180
181static struct of_device_id of_irq_ids[] __initdata = {
182 { .compatible = "mti,cpu-interrupt-controller",
183 .data = mips_cpu_intc_init },
184 { .compatible = "brcm,bcm3384-intc",
185 .data = intc_of_init },
186 {},
187};
188
189void __init arch_init_irq(void)
190{
191 bmips_tp1_irqs = 0;
192 of_irq_init(of_irq_ids);
193}
diff --git a/arch/mips/bcm3384/setup.c b/arch/mips/bcm3384/setup.c
new file mode 100644
index 000000000000..d84b8400b874
--- /dev/null
+++ b/arch/mips/bcm3384/setup.c
@@ -0,0 +1,97 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 * Copyright (C) 2014 Kevin Cernekee <cernekee@gmail.com>
8 */
9
10#include <linux/init.h>
11#include <linux/bootmem.h>
12#include <linux/clk-provider.h>
13#include <linux/ioport.h>
14#include <linux/of.h>
15#include <linux/of_fdt.h>
16#include <linux/of_platform.h>
17#include <linux/smp.h>
18#include <asm/addrspace.h>
19#include <asm/bmips.h>
20#include <asm/bootinfo.h>
21#include <asm/prom.h>
22#include <asm/smp-ops.h>
23#include <asm/time.h>
24
25void __init prom_init(void)
26{
27 register_bmips_smp_ops();
28}
29
30void __init prom_free_prom_memory(void)
31{
32}
33
34const char *get_system_type(void)
35{
36 return "BCM3384";
37}
38
39void __init plat_time_init(void)
40{
41 struct device_node *np;
42 u32 freq;
43
44 np = of_find_node_by_name(NULL, "cpus");
45 if (!np)
46 panic("missing 'cpus' DT node");
47 if (of_property_read_u32(np, "mips-hpt-frequency", &freq) < 0)
48 panic("missing 'mips-hpt-frequency' property");
49 of_node_put(np);
50
51 mips_hpt_frequency = freq;
52}
53
54void __init plat_mem_setup(void)
55{
56 void *dtb = __dtb_start;
57
58 set_io_port_base(0);
59 ioport_resource.start = 0;
60 ioport_resource.end = ~0;
61
62 /* intended to somewhat resemble ARM; see Documentation/arm/Booting */
63 if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
64 dtb = phys_to_virt(fw_arg2);
65
66 __dt_setup_arch(dtb);
67
68 strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
69}
70
71void __init device_tree_init(void)
72{
73 struct device_node *np;
74
75 unflatten_and_copy_device_tree();
76
77 /* Disable SMP boot unless both CPUs are listed in DT and !disabled */
78 np = of_find_node_by_name(NULL, "cpus");
79 if (np && of_get_available_child_count(np) <= 1)
80 bmips_smp_enabled = 0;
81 of_node_put(np);
82}
83
84int __init plat_of_setup(void)
85{
86 return __dt_register_buses("brcm,bcm3384", "simple-bus");
87}
88
89arch_initcall(plat_of_setup);
90
91static int __init plat_dev_init(void)
92{
93 of_clk_init(NULL);
94 return 0;
95}
96
97device_initcall(plat_dev_init);
diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h
index f1cc9d0495d8..ea909a56a3ee 100644
--- a/arch/mips/bcm47xx/bcm47xx_private.h
+++ b/arch/mips/bcm47xx/bcm47xx_private.h
@@ -6,12 +6,18 @@
6/* prom.c */ 6/* prom.c */
7void __init bcm47xx_prom_highmem_init(void); 7void __init bcm47xx_prom_highmem_init(void);
8 8
9/* sprom.c */
10void bcm47xx_sprom_register_fallbacks(void);
11
9/* buttons.c */ 12/* buttons.c */
10int __init bcm47xx_buttons_register(void); 13int __init bcm47xx_buttons_register(void);
11 14
12/* leds.c */ 15/* leds.c */
13void __init bcm47xx_leds_register(void); 16void __init bcm47xx_leds_register(void);
14 17
18/* setup.c */
19void __init bcm47xx_bus_setup(void);
20
15/* workarounds.c */ 21/* workarounds.c */
16void __init bcm47xx_workarounds(void); 22void __init bcm47xx_workarounds(void);
17 23
diff --git a/arch/mips/bcm47xx/irq.c b/arch/mips/bcm47xx/irq.c
index e0585b76ec19..21b4497f09be 100644
--- a/arch/mips/bcm47xx/irq.c
+++ b/arch/mips/bcm47xx/irq.c
@@ -22,6 +22,8 @@
22 * 675 Mass Ave, Cambridge, MA 02139, USA. 22 * 675 Mass Ave, Cambridge, MA 02139, USA.
23 */ 23 */
24 24
25#include "bcm47xx_private.h"
26
25#include <linux/types.h> 27#include <linux/types.h>
26#include <linux/interrupt.h> 28#include <linux/interrupt.h>
27#include <linux/irq.h> 29#include <linux/irq.h>
@@ -65,6 +67,12 @@ DEFINE_HWx_IRQDISPATCH(7)
65 67
66void __init arch_init_irq(void) 68void __init arch_init_irq(void)
67{ 69{
70 /*
71 * This is the first arch callback after mm_init (we can use kmalloc),
72 * so let's finish bus initialization now.
73 */
74 bcm47xx_bus_setup();
75
68#ifdef CONFIG_BCM47XX_BCMA 76#ifdef CONFIG_BCM47XX_BCMA
69 if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { 77 if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
70 bcma_write32(bcm47xx_bus.bcma.bus.drv_mips.core, 78 bcma_write32(bcm47xx_bus.bcma.bus.drv_mips.core,
diff --git a/arch/mips/bcm47xx/nvram.c b/arch/mips/bcm47xx/nvram.c
index 2bed73a684ae..c5c381c43f17 100644
--- a/arch/mips/bcm47xx/nvram.c
+++ b/arch/mips/bcm47xx/nvram.c
@@ -13,24 +13,35 @@
13 13
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/module.h> 15#include <linux/module.h>
16#include <linux/ssb/ssb.h>
17#include <linux/kernel.h> 16#include <linux/kernel.h>
18#include <linux/string.h> 17#include <linux/string.h>
19#include <asm/addrspace.h> 18#include <linux/mtd/mtd.h>
20#include <bcm47xx_nvram.h> 19#include <bcm47xx_nvram.h>
21#include <asm/mach-bcm47xx/bcm47xx.h> 20
21#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */
22#define NVRAM_SPACE 0x8000
23
24#define FLASH_MIN 0x00020000 /* Minimum flash size */
25
26struct nvram_header {
27 u32 magic;
28 u32 len;
29 u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
30 u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
31 u32 config_ncdl; /* ncdl values for memc */
32};
22 33
23static char nvram_buf[NVRAM_SPACE]; 34static char nvram_buf[NVRAM_SPACE];
24static const u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000}; 35static const u32 nvram_sizes[] = {0x8000, 0xF000, 0x10000};
25 36
26static u32 find_nvram_size(u32 end) 37static u32 find_nvram_size(void __iomem *end)
27{ 38{
28 struct nvram_header *header; 39 struct nvram_header __iomem *header;
29 int i; 40 int i;
30 41
31 for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) { 42 for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
32 header = (struct nvram_header *)KSEG1ADDR(end - nvram_sizes[i]); 43 header = (struct nvram_header *)(end - nvram_sizes[i]);
33 if (header->magic == NVRAM_HEADER) 44 if (header->magic == NVRAM_MAGIC)
34 return nvram_sizes[i]; 45 return nvram_sizes[i];
35 } 46 }
36 47
@@ -38,36 +49,40 @@ static u32 find_nvram_size(u32 end)
38} 49}
39 50
40/* Probe for NVRAM header */ 51/* Probe for NVRAM header */
41static int nvram_find_and_copy(u32 base, u32 lim) 52static int nvram_find_and_copy(void __iomem *iobase, u32 lim)
42{ 53{
43 struct nvram_header *header; 54 struct nvram_header __iomem *header;
44 int i; 55 int i;
45 u32 off; 56 u32 off;
46 u32 *src, *dst; 57 u32 *src, *dst;
47 u32 size; 58 u32 size;
48 59
60 if (nvram_buf[0]) {
61 pr_warn("nvram already initialized\n");
62 return -EEXIST;
63 }
64
49 /* TODO: when nvram is on nand flash check for bad blocks first. */ 65 /* TODO: when nvram is on nand flash check for bad blocks first. */
50 off = FLASH_MIN; 66 off = FLASH_MIN;
51 while (off <= lim) { 67 while (off <= lim) {
52 /* Windowed flash access */ 68 /* Windowed flash access */
53 size = find_nvram_size(base + off); 69 size = find_nvram_size(iobase + off);
54 if (size) { 70 if (size) {
55 header = (struct nvram_header *)KSEG1ADDR(base + off - 71 header = (struct nvram_header *)(iobase + off - size);
56 size);
57 goto found; 72 goto found;
58 } 73 }
59 off <<= 1; 74 off <<= 1;
60 } 75 }
61 76
62 /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */ 77 /* Try embedded NVRAM at 4 KB and 1 KB as last resorts */
63 header = (struct nvram_header *) KSEG1ADDR(base + 4096); 78 header = (struct nvram_header *)(iobase + 4096);
64 if (header->magic == NVRAM_HEADER) { 79 if (header->magic == NVRAM_MAGIC) {
65 size = NVRAM_SPACE; 80 size = NVRAM_SPACE;
66 goto found; 81 goto found;
67 } 82 }
68 83
69 header = (struct nvram_header *) KSEG1ADDR(base + 1024); 84 header = (struct nvram_header *)(iobase + 1024);
70 if (header->magic == NVRAM_HEADER) { 85 if (header->magic == NVRAM_MAGIC) {
71 size = NVRAM_SPACE; 86 size = NVRAM_SPACE;
72 goto found; 87 goto found;
73 } 88 }
@@ -94,71 +109,73 @@ found:
94 return 0; 109 return 0;
95} 110}
96 111
97#ifdef CONFIG_BCM47XX_SSB 112/*
98static int nvram_init_ssb(void) 113 * On bcm47xx we need access to the NVRAM very early, so we can't use mtd
114 * subsystem to access flash. We can't even use platform device / driver to
115 * store memory offset.
116 * To handle this we provide following symbol. It's supposed to be called as
117 * soon as we get info about flash device, before any NVRAM entry is needed.
118 */
119int bcm47xx_nvram_init_from_mem(u32 base, u32 lim)
99{ 120{
100 struct ssb_mipscore *mcore = &bcm47xx_bus.ssb.mipscore; 121 void __iomem *iobase;
101 u32 base; 122 int err;
102 u32 lim;
103
104 if (mcore->pflash.present) {
105 base = mcore->pflash.window;
106 lim = mcore->pflash.window_size;
107 } else {
108 pr_err("Couldn't find supported flash memory\n");
109 return -ENXIO;
110 }
111 123
112 return nvram_find_and_copy(base, lim); 124 iobase = ioremap_nocache(base, lim);
113} 125 if (!iobase)
114#endif 126 return -ENOMEM;
115 127
116#ifdef CONFIG_BCM47XX_BCMA 128 err = nvram_find_and_copy(iobase, lim);
117static int nvram_init_bcma(void) 129
118{ 130 iounmap(iobase);
119 struct bcma_drv_cc *cc = &bcm47xx_bus.bcma.bus.drv_cc;
120 u32 base;
121 u32 lim;
122
123#ifdef CONFIG_BCMA_NFLASH
124 if (cc->nflash.boot) {
125 base = BCMA_SOC_FLASH1;
126 lim = BCMA_SOC_FLASH1_SZ;
127 } else
128#endif
129 if (cc->pflash.present) {
130 base = cc->pflash.window;
131 lim = cc->pflash.window_size;
132#ifdef CONFIG_BCMA_SFLASH
133 } else if (cc->sflash.present) {
134 base = cc->sflash.window;
135 lim = cc->sflash.size;
136#endif
137 } else {
138 pr_err("Couldn't find supported flash memory\n");
139 return -ENXIO;
140 }
141 131
142 return nvram_find_and_copy(base, lim); 132 return err;
143} 133}
144#endif
145 134
146static int nvram_init(void) 135static int nvram_init(void)
147{ 136{
148 switch (bcm47xx_bus_type) { 137#ifdef CONFIG_MTD
149#ifdef CONFIG_BCM47XX_SSB 138 struct mtd_info *mtd;
150 case BCM47XX_BUS_TYPE_SSB: 139 struct nvram_header header;
151 return nvram_init_ssb(); 140 size_t bytes_read;
152#endif 141 int err, i;
153#ifdef CONFIG_BCM47XX_BCMA 142
154 case BCM47XX_BUS_TYPE_BCMA: 143 mtd = get_mtd_device_nm("nvram");
155 return nvram_init_bcma(); 144 if (IS_ERR(mtd))
156#endif 145 return -ENODEV;
146
147 for (i = 0; i < ARRAY_SIZE(nvram_sizes); i++) {
148 loff_t from = mtd->size - nvram_sizes[i];
149
150 if (from < 0)
151 continue;
152
153 err = mtd_read(mtd, from, sizeof(header), &bytes_read,
154 (uint8_t *)&header);
155 if (!err && header.magic == NVRAM_MAGIC) {
156 u8 *dst = (uint8_t *)nvram_buf;
157 size_t len = header.len;
158
159 if (header.len > NVRAM_SPACE) {
160 pr_err("nvram on flash (%i bytes) is bigger than the reserved space in memory, will just copy the first %i bytes\n",
161 header.len, NVRAM_SPACE);
162 len = NVRAM_SPACE;
163 }
164
165 err = mtd_read(mtd, from, len, &bytes_read, dst);
166 if (err)
167 return err;
168 memset(dst + bytes_read, 0x0, NVRAM_SPACE - bytes_read);
169
170 return 0;
171 }
157 } 172 }
173#endif
174
158 return -ENXIO; 175 return -ENXIO;
159} 176}
160 177
161int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len) 178int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len)
162{ 179{
163 char *var, *value, *end, *eq; 180 char *var, *value, *end, *eq;
164 int err; 181 int err;
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index c00585d915bc..e43b5046cb30 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -102,23 +102,6 @@ static void bcm47xx_machine_halt(void)
102} 102}
103 103
104#ifdef CONFIG_BCM47XX_SSB 104#ifdef CONFIG_BCM47XX_SSB
105static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
106{
107 char prefix[10];
108
109 if (bus->bustype == SSB_BUSTYPE_PCI) {
110 memset(out, 0, sizeof(struct ssb_sprom));
111 snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
112 bus->host_pci->bus->number + 1,
113 PCI_SLOT(bus->host_pci->devfn));
114 bcm47xx_fill_sprom(out, prefix, false);
115 return 0;
116 } else {
117 printk(KERN_WARNING "bcm47xx: unable to fill SPROM for given bustype.\n");
118 return -EINVAL;
119 }
120}
121
122static int bcm47xx_get_invariants(struct ssb_bus *bus, 105static int bcm47xx_get_invariants(struct ssb_bus *bus,
123 struct ssb_init_invariants *iv) 106 struct ssb_init_invariants *iv)
124{ 107{
@@ -144,11 +127,6 @@ static void __init bcm47xx_register_ssb(void)
144 char buf[100]; 127 char buf[100];
145 struct ssb_mipscore *mcore; 128 struct ssb_mipscore *mcore;
146 129
147 err = ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb);
148 if (err)
149 printk(KERN_WARNING "bcm47xx: someone else already registered"
150 " a ssb SPROM callback handler (err %d)\n", err);
151
152 err = ssb_bus_ssbbus_register(&(bcm47xx_bus.ssb), SSB_ENUM_BASE, 130 err = ssb_bus_ssbbus_register(&(bcm47xx_bus.ssb), SSB_ENUM_BASE,
153 bcm47xx_get_invariants); 131 bcm47xx_get_invariants);
154 if (err) 132 if (err)
@@ -171,56 +149,21 @@ static void __init bcm47xx_register_ssb(void)
171#endif 149#endif
172 150
173#ifdef CONFIG_BCM47XX_BCMA 151#ifdef CONFIG_BCM47XX_BCMA
174static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
175{
176 char prefix[10];
177 struct bcma_device *core;
178
179 switch (bus->hosttype) {
180 case BCMA_HOSTTYPE_PCI:
181 memset(out, 0, sizeof(struct ssb_sprom));
182 snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
183 bus->host_pci->bus->number + 1,
184 PCI_SLOT(bus->host_pci->devfn));
185 bcm47xx_fill_sprom(out, prefix, false);
186 return 0;
187 case BCMA_HOSTTYPE_SOC:
188 memset(out, 0, sizeof(struct ssb_sprom));
189 core = bcma_find_core(bus, BCMA_CORE_80211);
190 if (core) {
191 snprintf(prefix, sizeof(prefix), "sb/%u/",
192 core->core_index);
193 bcm47xx_fill_sprom(out, prefix, true);
194 } else {
195 bcm47xx_fill_sprom(out, NULL, false);
196 }
197 return 0;
198 default:
199 pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
200 return -EINVAL;
201 }
202}
203
204static void __init bcm47xx_register_bcma(void) 152static void __init bcm47xx_register_bcma(void)
205{ 153{
206 int err; 154 int err;
207 155
208 err = bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma);
209 if (err)
210 pr_warn("bcm47xx: someone else already registered a bcma SPROM callback handler (err %d)\n", err);
211
212 err = bcma_host_soc_register(&bcm47xx_bus.bcma); 156 err = bcma_host_soc_register(&bcm47xx_bus.bcma);
213 if (err) 157 if (err)
214 panic("Failed to register BCMA bus (err %d)", err); 158 panic("Failed to register BCMA bus (err %d)", err);
215
216 err = bcma_host_soc_init(&bcm47xx_bus.bcma);
217 if (err)
218 panic("Failed to initialize BCMA bus (err %d)", err);
219
220 bcm47xx_fill_bcma_boardinfo(&bcm47xx_bus.bcma.bus.boardinfo, NULL);
221} 159}
222#endif 160#endif
223 161
162/*
163 * Memory setup is done in the early part of MIPS's arch_mem_init. It's supposed
164 * to detect memory and record it with add_memory_region.
165 * Any extra initializaion performed here must not use kmalloc or bootmem.
166 */
224void __init plat_mem_setup(void) 167void __init plat_mem_setup(void)
225{ 168{
226 struct cpuinfo_mips *c = &current_cpu_data; 169 struct cpuinfo_mips *c = &current_cpu_data;
@@ -229,6 +172,7 @@ void __init plat_mem_setup(void)
229 printk(KERN_INFO "bcm47xx: using bcma bus\n"); 172 printk(KERN_INFO "bcm47xx: using bcma bus\n");
230#ifdef CONFIG_BCM47XX_BCMA 173#ifdef CONFIG_BCM47XX_BCMA
231 bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA; 174 bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
175 bcm47xx_sprom_register_fallbacks();
232 bcm47xx_register_bcma(); 176 bcm47xx_register_bcma();
233 bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id); 177 bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id);
234#ifdef CONFIG_HIGHMEM 178#ifdef CONFIG_HIGHMEM
@@ -239,6 +183,7 @@ void __init plat_mem_setup(void)
239 printk(KERN_INFO "bcm47xx: using ssb bus\n"); 183 printk(KERN_INFO "bcm47xx: using ssb bus\n");
240#ifdef CONFIG_BCM47XX_SSB 184#ifdef CONFIG_BCM47XX_SSB
241 bcm47xx_bus_type = BCM47XX_BUS_TYPE_SSB; 185 bcm47xx_bus_type = BCM47XX_BUS_TYPE_SSB;
186 bcm47xx_sprom_register_fallbacks();
242 bcm47xx_register_ssb(); 187 bcm47xx_register_ssb();
243 bcm47xx_set_system_type(bcm47xx_bus.ssb.chip_id); 188 bcm47xx_set_system_type(bcm47xx_bus.ssb.chip_id);
244#endif 189#endif
@@ -247,6 +192,28 @@ void __init plat_mem_setup(void)
247 _machine_restart = bcm47xx_machine_restart; 192 _machine_restart = bcm47xx_machine_restart;
248 _machine_halt = bcm47xx_machine_halt; 193 _machine_halt = bcm47xx_machine_halt;
249 pm_power_off = bcm47xx_machine_halt; 194 pm_power_off = bcm47xx_machine_halt;
195}
196
197/*
198 * This finishes bus initialization doing things that were not possible without
199 * kmalloc. Make sure to call it late enough (after mm_init).
200 */
201void __init bcm47xx_bus_setup(void)
202{
203#ifdef CONFIG_BCM47XX_BCMA
204 if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) {
205 int err;
206
207 err = bcma_host_soc_init(&bcm47xx_bus.bcma);
208 if (err)
209 panic("Failed to initialize BCMA bus (err %d)", err);
210
211 bcm47xx_fill_bcma_boardinfo(&bcm47xx_bus.bcma.bus.boardinfo,
212 NULL);
213 }
214#endif
215
216 /* With bus initialized we can access NVRAM and detect the board */
250 bcm47xx_board_detect(); 217 bcm47xx_board_detect();
251 mips_set_machine_name(bcm47xx_board_get_name()); 218 mips_set_machine_name(bcm47xx_board_get_name());
252} 219}
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
index 41226b68de3d..2eff7fe99c6b 100644
--- a/arch/mips/bcm47xx/sprom.c
+++ b/arch/mips/bcm47xx/sprom.c
@@ -136,6 +136,20 @@ static void nvram_read_leddc(const char *prefix, const char *name,
136 *leddc_off_time = (val >> 16) & 0xff; 136 *leddc_off_time = (val >> 16) & 0xff;
137} 137}
138 138
139static void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
140{
141 if (strchr(buf, ':'))
142 sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
143 &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
144 &macaddr[5]);
145 else if (strchr(buf, '-'))
146 sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
147 &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
148 &macaddr[5]);
149 else
150 pr_warn("Can not parse mac address: %s\n", buf);
151}
152
139static void nvram_read_macaddr(const char *prefix, const char *name, 153static void nvram_read_macaddr(const char *prefix, const char *name,
140 u8 val[6], bool fallback) 154 u8 val[6], bool fallback)
141{ 155{
@@ -801,3 +815,71 @@ void bcm47xx_fill_bcma_boardinfo(struct bcma_boardinfo *boardinfo,
801 nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true); 815 nvram_read_u16(prefix, NULL, "boardtype", &boardinfo->type, 0, true);
802} 816}
803#endif 817#endif
818
819#if defined(CONFIG_BCM47XX_SSB)
820static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
821{
822 char prefix[10];
823
824 if (bus->bustype == SSB_BUSTYPE_PCI) {
825 memset(out, 0, sizeof(struct ssb_sprom));
826 snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
827 bus->host_pci->bus->number + 1,
828 PCI_SLOT(bus->host_pci->devfn));
829 bcm47xx_fill_sprom(out, prefix, false);
830 return 0;
831 } else {
832 pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
833 return -EINVAL;
834 }
835}
836#endif
837
838#if defined(CONFIG_BCM47XX_BCMA)
839static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
840{
841 char prefix[10];
842 struct bcma_device *core;
843
844 switch (bus->hosttype) {
845 case BCMA_HOSTTYPE_PCI:
846 memset(out, 0, sizeof(struct ssb_sprom));
847 snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
848 bus->host_pci->bus->number + 1,
849 PCI_SLOT(bus->host_pci->devfn));
850 bcm47xx_fill_sprom(out, prefix, false);
851 return 0;
852 case BCMA_HOSTTYPE_SOC:
853 memset(out, 0, sizeof(struct ssb_sprom));
854 core = bcma_find_core(bus, BCMA_CORE_80211);
855 if (core) {
856 snprintf(prefix, sizeof(prefix), "sb/%u/",
857 core->core_index);
858 bcm47xx_fill_sprom(out, prefix, true);
859 } else {
860 bcm47xx_fill_sprom(out, NULL, false);
861 }
862 return 0;
863 default:
864 pr_warn("bcm47xx: unable to fill SPROM for given bustype.\n");
865 return -EINVAL;
866 }
867}
868#endif
869
870/*
871 * On bcm47xx we need to register SPROM fallback handler very early, so we can't
872 * use anything like platform device / driver for this.
873 */
874void bcm47xx_sprom_register_fallbacks(void)
875{
876#if defined(CONFIG_BCM47XX_SSB)
877 if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
878 pr_warn("Failed to registered ssb SPROM handler\n");
879#endif
880
881#if defined(CONFIG_BCM47XX_BCMA)
882 if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
883 pr_warn("Failed to registered bcma SPROM handler\n");
884#endif
885}
diff --git a/arch/mips/bcm63xx/cpu.c b/arch/mips/bcm63xx/cpu.c
index 536f64443031..307ec8b8e41c 100644
--- a/arch/mips/bcm63xx/cpu.c
+++ b/arch/mips/bcm63xx/cpu.c
@@ -263,7 +263,7 @@ static unsigned int detect_memory_size(void)
263 263
264 if (BCMCPU_IS_6345()) { 264 if (BCMCPU_IS_6345()) {
265 val = bcm_sdram_readl(SDRAM_MBASE_REG); 265 val = bcm_sdram_readl(SDRAM_MBASE_REG);
266 return (val * 8 * 1024 * 1024); 266 return val * 8 * 1024 * 1024;
267 } 267 }
268 268
269 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) { 269 if (BCMCPU_IS_6338() || BCMCPU_IS_6348()) {
diff --git a/arch/mips/boot/dts/Makefile b/arch/mips/boot/dts/Makefile
index ca9c90e2cabf..4f49fa477f14 100644
--- a/arch/mips/boot/dts/Makefile
+++ b/arch/mips/boot/dts/Makefile
@@ -1,3 +1,4 @@
1dtb-$(CONFIG_BCM3384) += bcm93384wvg.dtb
1dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb 2dtb-$(CONFIG_CAVIUM_OCTEON_SOC) += octeon_3xxx.dtb octeon_68xx.dtb
2dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb 3dtb-$(CONFIG_DT_EASY50712) += easy50712.dtb
3dtb-$(CONFIG_DT_XLP_EVP) += xlp_evp.dtb 4dtb-$(CONFIG_DT_XLP_EVP) += xlp_evp.dtb
diff --git a/arch/mips/boot/dts/bcm3384.dtsi b/arch/mips/boot/dts/bcm3384.dtsi
new file mode 100644
index 000000000000..21b074a99c94
--- /dev/null
+++ b/arch/mips/boot/dts/bcm3384.dtsi
@@ -0,0 +1,109 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 compatible = "brcm,bcm3384", "brcm,bcm33843";
5
6 cpus {
7 #address-cells = <1>;
8 #size-cells = <0>;
9
10 /* On BMIPS5000 this is 1/8th of the CPU core clock */
11 mips-hpt-frequency = <100000000>;
12
13 cpu@0 {
14 compatible = "brcm,bmips5000";
15 device_type = "cpu";
16 reg = <0>;
17 };
18
19 cpu@1 {
20 compatible = "brcm,bmips5000";
21 device_type = "cpu";
22 reg = <1>;
23 };
24 };
25
26 clocks {
27 #address-cells = <1>;
28 #size-cells = <0>;
29
30 periph_clk: periph_clk@0 {
31 compatible = "fixed-clock";
32 #clock-cells = <0>;
33 clock-frequency = <54000000>;
34 };
35 };
36
37 aliases {
38 uart0 = &uart0;
39 };
40
41 cpu_intc: cpu_intc@0 {
42 #address-cells = <0>;
43 compatible = "mti,cpu-interrupt-controller";
44
45 interrupt-controller;
46 #interrupt-cells = <1>;
47 };
48
49 periph_intc: periph_intc@14e00038 {
50 compatible = "brcm,bcm3384-intc";
51 reg = <0x14e00038 0x8 0x14e00340 0x8>;
52
53 interrupt-controller;
54 #interrupt-cells = <1>;
55
56 interrupt-parent = <&cpu_intc>;
57 interrupts = <4>;
58 };
59
60 zmips_intc: zmips_intc@104b0060 {
61 compatible = "brcm,bcm3384-intc";
62 reg = <0x104b0060 0x8>;
63
64 interrupt-controller;
65 #interrupt-cells = <1>;
66
67 interrupt-parent = <&periph_intc>;
68 interrupts = <29>;
69 };
70
71 iop_intc: iop_intc@14e00058 {
72 compatible = "brcm,bcm3384-intc";
73 reg = <0x14e00058 0x8>;
74
75 interrupt-controller;
76 #interrupt-cells = <1>;
77
78 interrupt-parent = <&cpu_intc>;
79 interrupts = <6>;
80 };
81
82 uart0: serial@14e00520 {
83 compatible = "brcm,bcm6345-uart";
84 reg = <0x14e00520 0x18>;
85 interrupt-parent = <&periph_intc>;
86 interrupts = <2>;
87 clocks = <&periph_clk>;
88 status = "disabled";
89 };
90
91 ehci0: usb@15400300 {
92 compatible = "brcm,bcm3384-ehci", "generic-ehci";
93 reg = <0x15400300 0x100>;
94 big-endian;
95 interrupt-parent = <&periph_intc>;
96 interrupts = <41>;
97 status = "disabled";
98 };
99
100 ohci0: usb@15400400 {
101 compatible = "brcm,bcm3384-ohci", "generic-ohci";
102 reg = <0x15400400 0x100>;
103 big-endian;
104 no-big-frame-no;
105 interrupt-parent = <&periph_intc>;
106 interrupts = <40>;
107 status = "disabled";
108 };
109};
diff --git a/arch/mips/boot/dts/bcm93384wvg.dts b/arch/mips/boot/dts/bcm93384wvg.dts
new file mode 100644
index 000000000000..831741179212
--- /dev/null
+++ b/arch/mips/boot/dts/bcm93384wvg.dts
@@ -0,0 +1,32 @@
1/dts-v1/;
2
3/include/ "bcm3384.dtsi"
4
5/ {
6 compatible = "brcm,bcm93384wvg", "brcm,bcm3384";
7 model = "Broadcom BCM93384WVG";
8
9 chosen {
10 bootargs = "console=ttyS0,115200";
11 stdout-path = &uart0;
12 };
13
14 memory@0 {
15 device_type = "memory";
16 reg = <0x0 0x04000000>;
17 dma-xor-mask = <0x08000000>;
18 dma-xor-limit = <0x0fffffff>;
19 };
20};
21
22&uart0 {
23 status = "okay";
24};
25
26&ehci0 {
27 status = "okay";
28};
29
30&ohci0 {
31 status = "okay";
32};
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 02f244475207..3778655c4a37 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -262,8 +262,8 @@ char *octeon_swiotlb;
262void __init plat_swiotlb_setup(void) 262void __init plat_swiotlb_setup(void)
263{ 263{
264 int i; 264 int i;
265 phys_t max_addr; 265 phys_addr_t max_addr;
266 phys_t addr_size; 266 phys_addr_t addr_size;
267 size_t swiotlbsize; 267 size_t swiotlbsize;
268 unsigned long swiotlb_nslabs; 268 unsigned long swiotlb_nslabs;
269 269
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index f4c1b36fdf65..e15b049b3bd7 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -28,22 +28,23 @@
28#include <asm/octeon/octeon.h> 28#include <asm/octeon/octeon.h>
29 29
30/** 30/**
31 * Given the chip processor ID from COP0, this function returns a 31 * Read a byte of fuse data
32 * string representing the chip model number. The string is of the 32 * @byte_addr: address to read
33 * form CNXXXXpX.X-FREQ-SUFFIX.
34 * - XXXX = The chip model number
35 * - X.X = Chip pass number
36 * - FREQ = Current frequency in Mhz
37 * - SUFFIX = NSP, EXP, SCP, SSP, or CP
38 *
39 * @chip_id: Chip ID
40 * 33 *
41 * Returns Model string 34 * Returns fuse value: 0 or 1
42 */ 35 */
43const char *octeon_model_get_string(uint32_t chip_id) 36static uint8_t __init cvmx_fuse_read_byte(int byte_addr)
44{ 37{
45 static char buffer[32]; 38 union cvmx_mio_fus_rcmd read_cmd;
46 return octeon_model_get_string_buffer(chip_id, buffer); 39
40 read_cmd.u64 = 0;
41 read_cmd.s.addr = byte_addr;
42 read_cmd.s.pend = 1;
43 cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
44 while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
45 && read_cmd.s.pend)
46 ;
47 return read_cmd.s.dat;
47} 48}
48 49
49/* 50/*
@@ -51,7 +52,8 @@ const char *octeon_model_get_string(uint32_t chip_id)
51 * as running early in u-boot static/global variables don't work when 52 * as running early in u-boot static/global variables don't work when
52 * running from flash. 53 * running from flash.
53 */ 54 */
54const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer) 55static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
56 char *buffer)
55{ 57{
56 const char *family; 58 const char *family;
57 const char *core_model; 59 const char *core_model;
@@ -407,3 +409,22 @@ const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer)
407 sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix); 409 sprintf(buffer, "CN%s%sp%s-%d-%s", family, core_model, pass, clock_mhz, suffix);
408 return buffer; 410 return buffer;
409} 411}
412
413/**
414 * Given the chip processor ID from COP0, this function returns a
415 * string representing the chip model number. The string is of the
416 * form CNXXXXpX.X-FREQ-SUFFIX.
417 * - XXXX = The chip model number
418 * - X.X = Chip pass number
419 * - FREQ = Current frequency in Mhz
420 * - SUFFIX = NSP, EXP, SCP, SSP, or CP
421 *
422 * @chip_id: Chip ID
423 *
424 * Returns Model string
425 */
426const char *__init octeon_model_get_string(uint32_t chip_id)
427{
428 static char buffer[32];
429 return octeon_model_get_string_buffer(chip_id, buffer);
430}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 5ebdb32d9a2b..94f888d3384e 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -1092,7 +1092,7 @@ static int __init edac_devinit(void)
1092 name = edac_device_names[i]; 1092 name = edac_device_names[i];
1093 dev = platform_device_register_simple(name, -1, NULL, 0); 1093 dev = platform_device_register_simple(name, -1, NULL, 0);
1094 if (IS_ERR(dev)) { 1094 if (IS_ERR(dev)) {
1095 pr_err("Registation of %s failed!\n", name); 1095 pr_err("Registration of %s failed!\n", name);
1096 err = PTR_ERR(dev); 1096 err = PTR_ERR(dev);
1097 } 1097 }
1098 } 1098 }
@@ -1103,7 +1103,7 @@ static int __init edac_devinit(void)
1103 dev = platform_device_register_simple("octeon_lmc_edac", 1103 dev = platform_device_register_simple("octeon_lmc_edac",
1104 i, NULL, 0); 1104 i, NULL, 0);
1105 if (IS_ERR(dev)) { 1105 if (IS_ERR(dev)) {
1106 pr_err("Registation of octeon_lmc_edac %d failed!\n", i); 1106 pr_err("Registration of octeon_lmc_edac %d failed!\n", i);
1107 err = PTR_ERR(dev); 1107 err = PTR_ERR(dev);
1108 } 1108 }
1109 } 1109 }
diff --git a/arch/mips/configs/bcm3384_defconfig b/arch/mips/configs/bcm3384_defconfig
new file mode 100644
index 000000000000..88711c28ff32
--- /dev/null
+++ b/arch/mips/configs/bcm3384_defconfig
@@ -0,0 +1,78 @@
1CONFIG_BCM3384=y
2CONFIG_HIGHMEM=y
3CONFIG_SMP=y
4CONFIG_NR_CPUS=4
5# CONFIG_SECCOMP is not set
6CONFIG_MIPS_O32_FP64_SUPPORT=y
7# CONFIG_LOCALVERSION_AUTO is not set
8# CONFIG_SWAP is not set
9CONFIG_NO_HZ=y
10CONFIG_BLK_DEV_INITRD=y
11# CONFIG_RD_GZIP is not set
12CONFIG_EXPERT=y
13# CONFIG_VM_EVENT_COUNTERS is not set
14# CONFIG_SLUB_DEBUG is not set
15# CONFIG_BLK_DEV_BSG is not set
16# CONFIG_IOSCHED_DEADLINE is not set
17# CONFIG_IOSCHED_CFQ is not set
18CONFIG_NET=y
19CONFIG_PACKET=y
20CONFIG_PACKET_DIAG=y
21CONFIG_UNIX=y
22CONFIG_INET=y
23# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
24# CONFIG_INET_XFRM_MODE_TUNNEL is not set
25# CONFIG_INET_XFRM_MODE_BEET is not set
26# CONFIG_INET_LRO is not set
27# CONFIG_INET_DIAG is not set
28CONFIG_CFG80211=y
29CONFIG_NL80211_TESTMODE=y
30CONFIG_MAC80211=y
31CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
32CONFIG_DEVTMPFS=y
33CONFIG_DEVTMPFS_MOUNT=y
34# CONFIG_STANDALONE is not set
35# CONFIG_PREVENT_FIRMWARE_BUILD is not set
36CONFIG_MTD=y
37CONFIG_MTD_CFI=y
38CONFIG_MTD_CFI_INTELEXT=y
39CONFIG_MTD_CFI_AMDSTD=y
40CONFIG_MTD_PHYSMAP=y
41# CONFIG_BLK_DEV is not set
42CONFIG_SCSI=y
43CONFIG_BLK_DEV_SD=y
44# CONFIG_SCSI_LOWLEVEL is not set
45CONFIG_NETDEVICES=y
46CONFIG_USB_USBNET=y
47# CONFIG_INPUT is not set
48# CONFIG_SERIO is not set
49# CONFIG_VT is not set
50# CONFIG_DEVKMEM is not set
51CONFIG_SERIAL_EARLYCON_FORCE=y
52CONFIG_SERIAL_BCM63XX=y
53CONFIG_SERIAL_BCM63XX_CONSOLE=y
54# CONFIG_HW_RANDOM is not set
55# CONFIG_HWMON is not set
56CONFIG_USB=y
57CONFIG_USB_EHCI_HCD=y
58# CONFIG_USB_EHCI_TT_NEWSCHED is not set
59CONFIG_USB_EHCI_HCD_PLATFORM=y
60CONFIG_USB_OHCI_HCD=y
61CONFIG_USB_OHCI_HCD_PLATFORM=y
62CONFIG_USB_STORAGE=y
63CONFIG_EXT4_FS=y
64CONFIG_EXT4_FS_POSIX_ACL=y
65CONFIG_EXT4_FS_SECURITY=y
66# CONFIG_DNOTIFY is not set
67CONFIG_FUSE_FS=y
68CONFIG_VFAT_FS=y
69CONFIG_PROC_KCORE=y
70CONFIG_TMPFS=y
71CONFIG_NFS_FS=y
72CONFIG_CIFS=y
73CONFIG_NLS_CODEPAGE_437=y
74CONFIG_NLS_ASCII=y
75CONFIG_NLS_ISO8859_1=y
76CONFIG_DEBUG_FS=y
77CONFIG_MAGIC_SYSRQ=y
78# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/fw/lib/cmdline.c b/arch/mips/fw/lib/cmdline.c
index ffd0345780ae..6ecda64ad184 100644
--- a/arch/mips/fw/lib/cmdline.c
+++ b/arch/mips/fw/lib/cmdline.c
@@ -68,7 +68,7 @@ char *fw_getenv(char *envname)
68 result = fw_envp(index + 1); 68 result = fw_envp(index + 1);
69 break; 69 break;
70 } else if (fw_envp(index)[i] == '=') { 70 } else if (fw_envp(index)[i] == '=') {
71 result = (fw_envp(index + 1) + i); 71 result = fw_envp(index) + i + 1;
72 break; 72 break;
73 } 73 }
74 } 74 }
@@ -88,13 +88,13 @@ unsigned long fw_getenvl(char *envname)
88{ 88{
89 unsigned long envl = 0UL; 89 unsigned long envl = 0UL;
90 char *str; 90 char *str;
91 long val;
92 int tmp; 91 int tmp;
93 92
94 str = fw_getenv(envname); 93 str = fw_getenv(envname);
95 if (str) { 94 if (str) {
96 tmp = kstrtol(str, 0, &val); 95 tmp = kstrtoul(str, 0, &envl);
97 envl = (unsigned long)val; 96 if (tmp)
97 envl = 0;
98 } 98 }
99 99
100 return envl; 100 return envl;
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 6dd6bfc607e9..857da84cfc92 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -17,6 +17,7 @@
17#include <linux/irqflags.h> 17#include <linux/irqflags.h>
18#include <linux/types.h> 18#include <linux/types.h>
19#include <asm/barrier.h> 19#include <asm/barrier.h>
20#include <asm/compiler.h>
20#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
21#include <asm/cmpxchg.h> 22#include <asm/cmpxchg.h>
22#include <asm/war.h> 23#include <asm/war.h>
@@ -40,95 +41,97 @@
40 */ 41 */
41#define atomic_set(v, i) ((v)->counter = (i)) 42#define atomic_set(v, i) ((v)->counter = (i))
42 43
43#define ATOMIC_OP(op, c_op, asm_op) \ 44#define ATOMIC_OP(op, c_op, asm_op) \
44static __inline__ void atomic_##op(int i, atomic_t * v) \ 45static __inline__ void atomic_##op(int i, atomic_t * v) \
45{ \ 46{ \
46 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 47 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
47 int temp; \ 48 int temp; \
48 \ 49 \
49 __asm__ __volatile__( \ 50 __asm__ __volatile__( \
50 " .set arch=r4000 \n" \ 51 " .set arch=r4000 \n" \
51 "1: ll %0, %1 # atomic_" #op " \n" \ 52 "1: ll %0, %1 # atomic_" #op " \n" \
52 " " #asm_op " %0, %2 \n" \ 53 " " #asm_op " %0, %2 \n" \
53 " sc %0, %1 \n" \ 54 " sc %0, %1 \n" \
54 " beqzl %0, 1b \n" \ 55 " beqzl %0, 1b \n" \
55 " .set mips0 \n" \ 56 " .set mips0 \n" \
56 : "=&r" (temp), "+m" (v->counter) \ 57 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
57 : "Ir" (i)); \ 58 : "Ir" (i)); \
58 } else if (kernel_uses_llsc) { \ 59 } else if (kernel_uses_llsc) { \
59 int temp; \ 60 int temp; \
60 \ 61 \
61 do { \ 62 do { \
62 __asm__ __volatile__( \ 63 __asm__ __volatile__( \
63 " .set arch=r4000 \n" \ 64 " .set arch=r4000 \n" \
64 " ll %0, %1 # atomic_" #op "\n" \ 65 " ll %0, %1 # atomic_" #op "\n" \
65 " " #asm_op " %0, %2 \n" \ 66 " " #asm_op " %0, %2 \n" \
66 " sc %0, %1 \n" \ 67 " sc %0, %1 \n" \
67 " .set mips0 \n" \ 68 " .set mips0 \n" \
68 : "=&r" (temp), "+m" (v->counter) \ 69 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
69 : "Ir" (i)); \ 70 : "Ir" (i)); \
70 } while (unlikely(!temp)); \ 71 } while (unlikely(!temp)); \
71 } else { \ 72 } else { \
72 unsigned long flags; \ 73 unsigned long flags; \
73 \ 74 \
74 raw_local_irq_save(flags); \ 75 raw_local_irq_save(flags); \
75 v->counter c_op i; \ 76 v->counter c_op i; \
76 raw_local_irq_restore(flags); \ 77 raw_local_irq_restore(flags); \
77 } \ 78 } \
78} \
79
80#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
81static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
82{ \
83 int result; \
84 \
85 smp_mb__before_llsc(); \
86 \
87 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
88 int temp; \
89 \
90 __asm__ __volatile__( \
91 " .set arch=r4000 \n" \
92 "1: ll %1, %2 # atomic_" #op "_return \n" \
93 " " #asm_op " %0, %1, %3 \n" \
94 " sc %0, %2 \n" \
95 " beqzl %0, 1b \n" \
96 " " #asm_op " %0, %1, %3 \n" \
97 " .set mips0 \n" \
98 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
99 : "Ir" (i)); \
100 } else if (kernel_uses_llsc) { \
101 int temp; \
102 \
103 do { \
104 __asm__ __volatile__( \
105 " .set arch=r4000 \n" \
106 " ll %1, %2 # atomic_" #op "_return \n" \
107 " " #asm_op " %0, %1, %3 \n" \
108 " sc %0, %2 \n" \
109 " .set mips0 \n" \
110 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \
111 : "Ir" (i)); \
112 } while (unlikely(!result)); \
113 \
114 result = temp; result c_op i; \
115 } else { \
116 unsigned long flags; \
117 \
118 raw_local_irq_save(flags); \
119 result = v->counter; \
120 result c_op i; \
121 v->counter = result; \
122 raw_local_irq_restore(flags); \
123 } \
124 \
125 smp_llsc_mb(); \
126 \
127 return result; \
128} 79}
129 80
130#define ATOMIC_OPS(op, c_op, asm_op) \ 81#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
131 ATOMIC_OP(op, c_op, asm_op) \ 82static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
83{ \
84 int result; \
85 \
86 smp_mb__before_llsc(); \
87 \
88 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
89 int temp; \
90 \
91 __asm__ __volatile__( \
92 " .set arch=r4000 \n" \
93 "1: ll %1, %2 # atomic_" #op "_return \n" \
94 " " #asm_op " %0, %1, %3 \n" \
95 " sc %0, %2 \n" \
96 " beqzl %0, 1b \n" \
97 " " #asm_op " %0, %1, %3 \n" \
98 " .set mips0 \n" \
99 : "=&r" (result), "=&r" (temp), \
100 "+" GCC_OFF12_ASM() (v->counter) \
101 : "Ir" (i)); \
102 } else if (kernel_uses_llsc) { \
103 int temp; \
104 \
105 do { \
106 __asm__ __volatile__( \
107 " .set arch=r4000 \n" \
108 " ll %1, %2 # atomic_" #op "_return \n" \
109 " " #asm_op " %0, %1, %3 \n" \
110 " sc %0, %2 \n" \
111 " .set mips0 \n" \
112 : "=&r" (result), "=&r" (temp), \
113 "+" GCC_OFF12_ASM() (v->counter) \
114 : "Ir" (i)); \
115 } while (unlikely(!result)); \
116 \
117 result = temp; result c_op i; \
118 } else { \
119 unsigned long flags; \
120 \
121 raw_local_irq_save(flags); \
122 result = v->counter; \
123 result c_op i; \
124 v->counter = result; \
125 raw_local_irq_restore(flags); \
126 } \
127 \
128 smp_llsc_mb(); \
129 \
130 return result; \
131}
132
133#define ATOMIC_OPS(op, c_op, asm_op) \
134 ATOMIC_OP(op, c_op, asm_op) \
132 ATOMIC_OP_RETURN(op, c_op, asm_op) 135 ATOMIC_OP_RETURN(op, c_op, asm_op)
133 136
134ATOMIC_OPS(add, +=, addu) 137ATOMIC_OPS(add, +=, addu)
@@ -167,8 +170,9 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
167 " .set reorder \n" 170 " .set reorder \n"
168 "1: \n" 171 "1: \n"
169 " .set mips0 \n" 172 " .set mips0 \n"
170 : "=&r" (result), "=&r" (temp), "+m" (v->counter) 173 : "=&r" (result), "=&r" (temp),
171 : "Ir" (i), "m" (v->counter) 174 "+" GCC_OFF12_ASM() (v->counter)
175 : "Ir" (i), GCC_OFF12_ASM() (v->counter)
172 : "memory"); 176 : "memory");
173 } else if (kernel_uses_llsc) { 177 } else if (kernel_uses_llsc) {
174 int temp; 178 int temp;
@@ -185,7 +189,8 @@ static __inline__ int atomic_sub_if_positive(int i, atomic_t * v)
185 " .set reorder \n" 189 " .set reorder \n"
186 "1: \n" 190 "1: \n"
187 " .set mips0 \n" 191 " .set mips0 \n"
188 : "=&r" (result), "=&r" (temp), "+m" (v->counter) 192 : "=&r" (result), "=&r" (temp),
193 "+" GCC_OFF12_ASM() (v->counter)
189 : "Ir" (i)); 194 : "Ir" (i));
190 } else { 195 } else {
191 unsigned long flags; 196 unsigned long flags;
@@ -315,96 +320,98 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
315 */ 320 */
316#define atomic64_set(v, i) ((v)->counter = (i)) 321#define atomic64_set(v, i) ((v)->counter = (i))
317 322
318#define ATOMIC64_OP(op, c_op, asm_op) \ 323#define ATOMIC64_OP(op, c_op, asm_op) \
319static __inline__ void atomic64_##op(long i, atomic64_t * v) \ 324static __inline__ void atomic64_##op(long i, atomic64_t * v) \
320{ \ 325{ \
321 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 326 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
322 long temp; \ 327 long temp; \
323 \ 328 \
324 __asm__ __volatile__( \ 329 __asm__ __volatile__( \
325 " .set arch=r4000 \n" \ 330 " .set arch=r4000 \n" \
326 "1: lld %0, %1 # atomic64_" #op " \n" \ 331 "1: lld %0, %1 # atomic64_" #op " \n" \
327 " " #asm_op " %0, %2 \n" \ 332 " " #asm_op " %0, %2 \n" \
328 " scd %0, %1 \n" \ 333 " scd %0, %1 \n" \
329 " beqzl %0, 1b \n" \ 334 " beqzl %0, 1b \n" \
330 " .set mips0 \n" \ 335 " .set mips0 \n" \
331 : "=&r" (temp), "+m" (v->counter) \ 336 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
332 : "Ir" (i)); \ 337 : "Ir" (i)); \
333 } else if (kernel_uses_llsc) { \ 338 } else if (kernel_uses_llsc) { \
334 long temp; \ 339 long temp; \
335 \ 340 \
336 do { \ 341 do { \
337 __asm__ __volatile__( \ 342 __asm__ __volatile__( \
338 " .set arch=r4000 \n" \ 343 " .set arch=r4000 \n" \
339 " lld %0, %1 # atomic64_" #op "\n" \ 344 " lld %0, %1 # atomic64_" #op "\n" \
340 " " #asm_op " %0, %2 \n" \ 345 " " #asm_op " %0, %2 \n" \
341 " scd %0, %1 \n" \ 346 " scd %0, %1 \n" \
342 " .set mips0 \n" \ 347 " .set mips0 \n" \
343 : "=&r" (temp), "+m" (v->counter) \ 348 : "=&r" (temp), "+" GCC_OFF12_ASM() (v->counter) \
344 : "Ir" (i)); \ 349 : "Ir" (i)); \
345 } while (unlikely(!temp)); \ 350 } while (unlikely(!temp)); \
346 } else { \ 351 } else { \
347 unsigned long flags; \ 352 unsigned long flags; \
348 \ 353 \
349 raw_local_irq_save(flags); \ 354 raw_local_irq_save(flags); \
350 v->counter c_op i; \ 355 v->counter c_op i; \
351 raw_local_irq_restore(flags); \ 356 raw_local_irq_restore(flags); \
352 } \ 357 } \
353} \ 358}
354 359
355#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \ 360#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
356static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \ 361static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
357{ \ 362{ \
358 long result; \ 363 long result; \
359 \ 364 \
360 smp_mb__before_llsc(); \ 365 smp_mb__before_llsc(); \
361 \ 366 \
362 if (kernel_uses_llsc && R10000_LLSC_WAR) { \ 367 if (kernel_uses_llsc && R10000_LLSC_WAR) { \
363 long temp; \ 368 long temp; \
364 \ 369 \
365 __asm__ __volatile__( \ 370 __asm__ __volatile__( \
366 " .set arch=r4000 \n" \ 371 " .set arch=r4000 \n" \
367 "1: lld %1, %2 # atomic64_" #op "_return\n" \ 372 "1: lld %1, %2 # atomic64_" #op "_return\n" \
368 " " #asm_op " %0, %1, %3 \n" \ 373 " " #asm_op " %0, %1, %3 \n" \
369 " scd %0, %2 \n" \ 374 " scd %0, %2 \n" \
370 " beqzl %0, 1b \n" \ 375 " beqzl %0, 1b \n" \
371 " " #asm_op " %0, %1, %3 \n" \ 376 " " #asm_op " %0, %1, %3 \n" \
372 " .set mips0 \n" \ 377 " .set mips0 \n" \
373 : "=&r" (result), "=&r" (temp), "+m" (v->counter) \ 378 : "=&r" (result), "=&r" (temp), \
374 : "Ir" (i)); \ 379 "+" GCC_OFF12_ASM() (v->counter) \
375 } else if (kernel_uses_llsc) { \ 380 : "Ir" (i)); \
376 long temp; \ 381 } else if (kernel_uses_llsc) { \
377 \ 382 long temp; \
378 do { \ 383 \
379 __asm__ __volatile__( \ 384 do { \
380 " .set arch=r4000 \n" \ 385 __asm__ __volatile__( \
381 " lld %1, %2 # atomic64_" #op "_return\n" \ 386 " .set arch=r4000 \n" \
382 " " #asm_op " %0, %1, %3 \n" \ 387 " lld %1, %2 # atomic64_" #op "_return\n" \
383 " scd %0, %2 \n" \ 388 " " #asm_op " %0, %1, %3 \n" \
384 " .set mips0 \n" \ 389 " scd %0, %2 \n" \
385 : "=&r" (result), "=&r" (temp), "=m" (v->counter) \ 390 " .set mips0 \n" \
386 : "Ir" (i), "m" (v->counter) \ 391 : "=&r" (result), "=&r" (temp), \
387 : "memory"); \ 392 "=" GCC_OFF12_ASM() (v->counter) \
388 } while (unlikely(!result)); \ 393 : "Ir" (i), GCC_OFF12_ASM() (v->counter) \
389 \ 394 : "memory"); \
390 result = temp; result c_op i; \ 395 } while (unlikely(!result)); \
391 } else { \ 396 \
392 unsigned long flags; \ 397 result = temp; result c_op i; \
393 \ 398 } else { \
394 raw_local_irq_save(flags); \ 399 unsigned long flags; \
395 result = v->counter; \ 400 \
396 result c_op i; \ 401 raw_local_irq_save(flags); \
397 v->counter = result; \ 402 result = v->counter; \
398 raw_local_irq_restore(flags); \ 403 result c_op i; \
399 } \ 404 v->counter = result; \
400 \ 405 raw_local_irq_restore(flags); \
401 smp_llsc_mb(); \ 406 } \
402 \ 407 \
403 return result; \ 408 smp_llsc_mb(); \
409 \
410 return result; \
404} 411}
405 412
406#define ATOMIC64_OPS(op, c_op, asm_op) \ 413#define ATOMIC64_OPS(op, c_op, asm_op) \
407 ATOMIC64_OP(op, c_op, asm_op) \ 414 ATOMIC64_OP(op, c_op, asm_op) \
408 ATOMIC64_OP_RETURN(op, c_op, asm_op) 415 ATOMIC64_OP_RETURN(op, c_op, asm_op)
409 416
410ATOMIC64_OPS(add, +=, daddu) 417ATOMIC64_OPS(add, +=, daddu)
@@ -415,7 +422,8 @@ ATOMIC64_OPS(sub, -=, dsubu)
415#undef ATOMIC64_OP 422#undef ATOMIC64_OP
416 423
417/* 424/*
418 * atomic64_sub_if_positive - conditionally subtract integer from atomic variable 425 * atomic64_sub_if_positive - conditionally subtract integer from atomic
426 * variable
419 * @i: integer value to subtract 427 * @i: integer value to subtract
420 * @v: pointer of type atomic64_t 428 * @v: pointer of type atomic64_t
421 * 429 *
@@ -443,8 +451,9 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
443 " .set reorder \n" 451 " .set reorder \n"
444 "1: \n" 452 "1: \n"
445 " .set mips0 \n" 453 " .set mips0 \n"
446 : "=&r" (result), "=&r" (temp), "=m" (v->counter) 454 : "=&r" (result), "=&r" (temp),
447 : "Ir" (i), "m" (v->counter) 455 "=" GCC_OFF12_ASM() (v->counter)
456 : "Ir" (i), GCC_OFF12_ASM() (v->counter)
448 : "memory"); 457 : "memory");
449 } else if (kernel_uses_llsc) { 458 } else if (kernel_uses_llsc) {
450 long temp; 459 long temp;
@@ -461,7 +470,8 @@ static __inline__ long atomic64_sub_if_positive(long i, atomic64_t * v)
461 " .set reorder \n" 470 " .set reorder \n"
462 "1: \n" 471 "1: \n"
463 " .set mips0 \n" 472 " .set mips0 \n"
464 : "=&r" (result), "=&r" (temp), "+m" (v->counter) 473 : "=&r" (result), "=&r" (temp),
474 "+" GCC_OFF12_ASM() (v->counter)
465 : "Ir" (i)); 475 : "Ir" (i));
466 } else { 476 } else {
467 unsigned long flags; 477 unsigned long flags;
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index bae6b0fa8ab5..6663bcca9d0c 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -17,6 +17,7 @@
17#include <linux/types.h> 17#include <linux/types.h>
18#include <asm/barrier.h> 18#include <asm/barrier.h>
19#include <asm/byteorder.h> /* sigh ... */ 19#include <asm/byteorder.h> /* sigh ... */
20#include <asm/compiler.h>
20#include <asm/cpu-features.h> 21#include <asm/cpu-features.h>
21#include <asm/sgidefs.h> 22#include <asm/sgidefs.h>
22#include <asm/war.h> 23#include <asm/war.h>
@@ -78,8 +79,8 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
78 " " __SC "%0, %1 \n" 79 " " __SC "%0, %1 \n"
79 " beqzl %0, 1b \n" 80 " beqzl %0, 1b \n"
80 " .set mips0 \n" 81 " .set mips0 \n"
81 : "=&r" (temp), "=m" (*m) 82 : "=&r" (temp), "=" GCC_OFF12_ASM() (*m)
82 : "ir" (1UL << bit), "m" (*m)); 83 : "ir" (1UL << bit), GCC_OFF12_ASM() (*m));
83#ifdef CONFIG_CPU_MIPSR2 84#ifdef CONFIG_CPU_MIPSR2
84 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 85 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
85 do { 86 do {
@@ -87,7 +88,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
87 " " __LL "%0, %1 # set_bit \n" 88 " " __LL "%0, %1 # set_bit \n"
88 " " __INS "%0, %3, %2, 1 \n" 89 " " __INS "%0, %3, %2, 1 \n"
89 " " __SC "%0, %1 \n" 90 " " __SC "%0, %1 \n"
90 : "=&r" (temp), "+m" (*m) 91 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
91 : "ir" (bit), "r" (~0)); 92 : "ir" (bit), "r" (~0));
92 } while (unlikely(!temp)); 93 } while (unlikely(!temp));
93#endif /* CONFIG_CPU_MIPSR2 */ 94#endif /* CONFIG_CPU_MIPSR2 */
@@ -99,7 +100,7 @@ static inline void set_bit(unsigned long nr, volatile unsigned long *addr)
99 " or %0, %2 \n" 100 " or %0, %2 \n"
100 " " __SC "%0, %1 \n" 101 " " __SC "%0, %1 \n"
101 " .set mips0 \n" 102 " .set mips0 \n"
102 : "=&r" (temp), "+m" (*m) 103 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
103 : "ir" (1UL << bit)); 104 : "ir" (1UL << bit));
104 } while (unlikely(!temp)); 105 } while (unlikely(!temp));
105 } else 106 } else
@@ -130,7 +131,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
130 " " __SC "%0, %1 \n" 131 " " __SC "%0, %1 \n"
131 " beqzl %0, 1b \n" 132 " beqzl %0, 1b \n"
132 " .set mips0 \n" 133 " .set mips0 \n"
133 : "=&r" (temp), "+m" (*m) 134 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
134 : "ir" (~(1UL << bit))); 135 : "ir" (~(1UL << bit)));
135#ifdef CONFIG_CPU_MIPSR2 136#ifdef CONFIG_CPU_MIPSR2
136 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) { 137 } else if (kernel_uses_llsc && __builtin_constant_p(bit)) {
@@ -139,7 +140,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
139 " " __LL "%0, %1 # clear_bit \n" 140 " " __LL "%0, %1 # clear_bit \n"
140 " " __INS "%0, $0, %2, 1 \n" 141 " " __INS "%0, $0, %2, 1 \n"
141 " " __SC "%0, %1 \n" 142 " " __SC "%0, %1 \n"
142 : "=&r" (temp), "+m" (*m) 143 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
143 : "ir" (bit)); 144 : "ir" (bit));
144 } while (unlikely(!temp)); 145 } while (unlikely(!temp));
145#endif /* CONFIG_CPU_MIPSR2 */ 146#endif /* CONFIG_CPU_MIPSR2 */
@@ -151,7 +152,7 @@ static inline void clear_bit(unsigned long nr, volatile unsigned long *addr)
151 " and %0, %2 \n" 152 " and %0, %2 \n"
152 " " __SC "%0, %1 \n" 153 " " __SC "%0, %1 \n"
153 " .set mips0 \n" 154 " .set mips0 \n"
154 : "=&r" (temp), "+m" (*m) 155 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
155 : "ir" (~(1UL << bit))); 156 : "ir" (~(1UL << bit)));
156 } while (unlikely(!temp)); 157 } while (unlikely(!temp));
157 } else 158 } else
@@ -196,7 +197,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
196 " " __SC "%0, %1 \n" 197 " " __SC "%0, %1 \n"
197 " beqzl %0, 1b \n" 198 " beqzl %0, 1b \n"
198 " .set mips0 \n" 199 " .set mips0 \n"
199 : "=&r" (temp), "+m" (*m) 200 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
200 : "ir" (1UL << bit)); 201 : "ir" (1UL << bit));
201 } else if (kernel_uses_llsc) { 202 } else if (kernel_uses_llsc) {
202 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG); 203 unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
@@ -209,7 +210,7 @@ static inline void change_bit(unsigned long nr, volatile unsigned long *addr)
209 " xor %0, %2 \n" 210 " xor %0, %2 \n"
210 " " __SC "%0, %1 \n" 211 " " __SC "%0, %1 \n"
211 " .set mips0 \n" 212 " .set mips0 \n"
212 : "=&r" (temp), "+m" (*m) 213 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m)
213 : "ir" (1UL << bit)); 214 : "ir" (1UL << bit));
214 } while (unlikely(!temp)); 215 } while (unlikely(!temp));
215 } else 216 } else
@@ -244,7 +245,7 @@ static inline int test_and_set_bit(unsigned long nr,
244 " beqzl %2, 1b \n" 245 " beqzl %2, 1b \n"
245 " and %2, %0, %3 \n" 246 " and %2, %0, %3 \n"
246 " .set mips0 \n" 247 " .set mips0 \n"
247 : "=&r" (temp), "+m" (*m), "=&r" (res) 248 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
248 : "r" (1UL << bit) 249 : "r" (1UL << bit)
249 : "memory"); 250 : "memory");
250 } else if (kernel_uses_llsc) { 251 } else if (kernel_uses_llsc) {
@@ -258,7 +259,7 @@ static inline int test_and_set_bit(unsigned long nr,
258 " or %2, %0, %3 \n" 259 " or %2, %0, %3 \n"
259 " " __SC "%2, %1 \n" 260 " " __SC "%2, %1 \n"
260 " .set mips0 \n" 261 " .set mips0 \n"
261 : "=&r" (temp), "+m" (*m), "=&r" (res) 262 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
262 : "r" (1UL << bit) 263 : "r" (1UL << bit)
263 : "memory"); 264 : "memory");
264 } while (unlikely(!res)); 265 } while (unlikely(!res));
@@ -312,7 +313,7 @@ static inline int test_and_set_bit_lock(unsigned long nr,
312 " or %2, %0, %3 \n" 313 " or %2, %0, %3 \n"
313 " " __SC "%2, %1 \n" 314 " " __SC "%2, %1 \n"
314 " .set mips0 \n" 315 " .set mips0 \n"
315 : "=&r" (temp), "+m" (*m), "=&r" (res) 316 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
316 : "r" (1UL << bit) 317 : "r" (1UL << bit)
317 : "memory"); 318 : "memory");
318 } while (unlikely(!res)); 319 } while (unlikely(!res));
@@ -354,7 +355,7 @@ static inline int test_and_clear_bit(unsigned long nr,
354 " beqzl %2, 1b \n" 355 " beqzl %2, 1b \n"
355 " and %2, %0, %3 \n" 356 " and %2, %0, %3 \n"
356 " .set mips0 \n" 357 " .set mips0 \n"
357 : "=&r" (temp), "+m" (*m), "=&r" (res) 358 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
358 : "r" (1UL << bit) 359 : "r" (1UL << bit)
359 : "memory"); 360 : "memory");
360#ifdef CONFIG_CPU_MIPSR2 361#ifdef CONFIG_CPU_MIPSR2
@@ -368,7 +369,7 @@ static inline int test_and_clear_bit(unsigned long nr,
368 " " __EXT "%2, %0, %3, 1 \n" 369 " " __EXT "%2, %0, %3, 1 \n"
369 " " __INS "%0, $0, %3, 1 \n" 370 " " __INS "%0, $0, %3, 1 \n"
370 " " __SC "%0, %1 \n" 371 " " __SC "%0, %1 \n"
371 : "=&r" (temp), "+m" (*m), "=&r" (res) 372 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
372 : "ir" (bit) 373 : "ir" (bit)
373 : "memory"); 374 : "memory");
374 } while (unlikely(!temp)); 375 } while (unlikely(!temp));
@@ -385,7 +386,7 @@ static inline int test_and_clear_bit(unsigned long nr,
385 " xor %2, %3 \n" 386 " xor %2, %3 \n"
386 " " __SC "%2, %1 \n" 387 " " __SC "%2, %1 \n"
387 " .set mips0 \n" 388 " .set mips0 \n"
388 : "=&r" (temp), "+m" (*m), "=&r" (res) 389 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
389 : "r" (1UL << bit) 390 : "r" (1UL << bit)
390 : "memory"); 391 : "memory");
391 } while (unlikely(!res)); 392 } while (unlikely(!res));
@@ -427,7 +428,7 @@ static inline int test_and_change_bit(unsigned long nr,
427 " beqzl %2, 1b \n" 428 " beqzl %2, 1b \n"
428 " and %2, %0, %3 \n" 429 " and %2, %0, %3 \n"
429 " .set mips0 \n" 430 " .set mips0 \n"
430 : "=&r" (temp), "+m" (*m), "=&r" (res) 431 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
431 : "r" (1UL << bit) 432 : "r" (1UL << bit)
432 : "memory"); 433 : "memory");
433 } else if (kernel_uses_llsc) { 434 } else if (kernel_uses_llsc) {
@@ -441,7 +442,7 @@ static inline int test_and_change_bit(unsigned long nr,
441 " xor %2, %0, %3 \n" 442 " xor %2, %0, %3 \n"
442 " " __SC "\t%2, %1 \n" 443 " " __SC "\t%2, %1 \n"
443 " .set mips0 \n" 444 " .set mips0 \n"
444 : "=&r" (temp), "+m" (*m), "=&r" (res) 445 : "=&r" (temp), "+" GCC_OFF12_ASM() (*m), "=&r" (res)
445 : "r" (1UL << bit) 446 : "r" (1UL << bit)
446 : "memory"); 447 : "memory");
447 } while (unlikely(!res)); 448 } while (unlikely(!res));
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index cbaccebf5065..30939b02e3ff 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -84,6 +84,7 @@ extern char bmips_smp_int_vec_end;
84extern int bmips_smp_enabled; 84extern int bmips_smp_enabled;
85extern int bmips_cpu_offset; 85extern int bmips_cpu_offset;
86extern cpumask_t bmips_booted_mask; 86extern cpumask_t bmips_booted_mask;
87extern unsigned long bmips_tp1_irqs;
87 88
88extern void bmips_ebase_setup(void); 89extern void bmips_ebase_setup(void);
89extern asmlinkage void plat_wired_tlb_setup(void); 90extern asmlinkage void plat_wired_tlb_setup(void);
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index 1f7ca8b00404..b603804caac5 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -70,10 +70,7 @@ enum loongson_machine_type {
70 MACH_DEXXON_GDIUM2F10, 70 MACH_DEXXON_GDIUM2F10,
71 MACH_LEMOTE_NAS, 71 MACH_LEMOTE_NAS,
72 MACH_LEMOTE_LL2F, 72 MACH_LEMOTE_LL2F,
73 MACH_LEMOTE_A1004, 73 MACH_LOONGSON_GENERIC,
74 MACH_LEMOTE_A1101,
75 MACH_LEMOTE_A1201,
76 MACH_LEMOTE_A1205,
77 MACH_LOONGSON_END 74 MACH_LOONGSON_END
78}; 75};
79 76
@@ -101,16 +98,16 @@ extern unsigned long mips_machtype;
101struct boot_mem_map { 98struct boot_mem_map {
102 int nr_map; 99 int nr_map;
103 struct boot_mem_map_entry { 100 struct boot_mem_map_entry {
104 phys_t addr; /* start of memory segment */ 101 phys_addr_t addr; /* start of memory segment */
105 phys_t size; /* size of memory segment */ 102 phys_addr_t size; /* size of memory segment */
106 long type; /* type of memory segment */ 103 long type; /* type of memory segment */
107 } map[BOOT_MEM_MAP_MAX]; 104 } map[BOOT_MEM_MAP_MAX];
108}; 105};
109 106
110extern struct boot_mem_map boot_mem_map; 107extern struct boot_mem_map boot_mem_map;
111 108
112extern void add_memory_region(phys_t start, phys_t size, long type); 109extern void add_memory_region(phys_addr_t start, phys_addr_t size, long type);
113extern void detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max); 110extern void detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max);
114 111
115extern void prom_init(void); 112extern void prom_init(void);
116extern void prom_free_prom_memory(void); 113extern void prom_free_prom_memory(void);
diff --git a/arch/mips/include/asm/clock.h b/arch/mips/include/asm/clock.h
index 778e32d817bc..4809c29a4890 100644
--- a/arch/mips/include/asm/clock.h
+++ b/arch/mips/include/asm/clock.h
@@ -35,9 +35,6 @@ struct clk {
35#define CLK_ALWAYS_ENABLED (1 << 0) 35#define CLK_ALWAYS_ENABLED (1 << 0)
36#define CLK_RATE_PROPAGATES (1 << 1) 36#define CLK_RATE_PROPAGATES (1 << 1)
37 37
38/* Should be defined by processor-specific code */
39void arch_init_clk_ops(struct clk_ops **, int type);
40
41int clk_init(void); 38int clk_init(void);
42 39
43int __clk_enable(struct clk *); 40int __clk_enable(struct clk *);
diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h
index eefcaa363a87..28b1edf19501 100644
--- a/arch/mips/include/asm/cmpxchg.h
+++ b/arch/mips/include/asm/cmpxchg.h
@@ -10,6 +10,7 @@
10 10
11#include <linux/bug.h> 11#include <linux/bug.h>
12#include <linux/irqflags.h> 12#include <linux/irqflags.h>
13#include <asm/compiler.h>
13#include <asm/war.h> 14#include <asm/war.h>
14 15
15static inline unsigned long __xchg_u32(volatile int * m, unsigned int val) 16static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
@@ -30,8 +31,8 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
30 " sc %2, %1 \n" 31 " sc %2, %1 \n"
31 " beqzl %2, 1b \n" 32 " beqzl %2, 1b \n"
32 " .set mips0 \n" 33 " .set mips0 \n"
33 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 34 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
34 : "R" (*m), "Jr" (val) 35 : GCC_OFF12_ASM() (*m), "Jr" (val)
35 : "memory"); 36 : "memory");
36 } else if (kernel_uses_llsc) { 37 } else if (kernel_uses_llsc) {
37 unsigned long dummy; 38 unsigned long dummy;
@@ -45,8 +46,9 @@ static inline unsigned long __xchg_u32(volatile int * m, unsigned int val)
45 " .set arch=r4000 \n" 46 " .set arch=r4000 \n"
46 " sc %2, %1 \n" 47 " sc %2, %1 \n"
47 " .set mips0 \n" 48 " .set mips0 \n"
48 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 49 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
49 : "R" (*m), "Jr" (val) 50 "=&r" (dummy)
51 : GCC_OFF12_ASM() (*m), "Jr" (val)
50 : "memory"); 52 : "memory");
51 } while (unlikely(!dummy)); 53 } while (unlikely(!dummy));
52 } else { 54 } else {
@@ -80,8 +82,8 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
80 " scd %2, %1 \n" 82 " scd %2, %1 \n"
81 " beqzl %2, 1b \n" 83 " beqzl %2, 1b \n"
82 " .set mips0 \n" 84 " .set mips0 \n"
83 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 85 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m), "=&r" (dummy)
84 : "R" (*m), "Jr" (val) 86 : GCC_OFF12_ASM() (*m), "Jr" (val)
85 : "memory"); 87 : "memory");
86 } else if (kernel_uses_llsc) { 88 } else if (kernel_uses_llsc) {
87 unsigned long dummy; 89 unsigned long dummy;
@@ -93,8 +95,9 @@ static inline __u64 __xchg_u64(volatile __u64 * m, __u64 val)
93 " move %2, %z4 \n" 95 " move %2, %z4 \n"
94 " scd %2, %1 \n" 96 " scd %2, %1 \n"
95 " .set mips0 \n" 97 " .set mips0 \n"
96 : "=&r" (retval), "=m" (*m), "=&r" (dummy) 98 : "=&r" (retval), "=" GCC_OFF12_ASM() (*m),
97 : "R" (*m), "Jr" (val) 99 "=&r" (dummy)
100 : GCC_OFF12_ASM() (*m), "Jr" (val)
98 : "memory"); 101 : "memory");
99 } while (unlikely(!dummy)); 102 } while (unlikely(!dummy));
100 } else { 103 } else {
@@ -155,8 +158,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
155 " beqzl $1, 1b \n" \ 158 " beqzl $1, 1b \n" \
156 "2: \n" \ 159 "2: \n" \
157 " .set pop \n" \ 160 " .set pop \n" \
158 : "=&r" (__ret), "=R" (*m) \ 161 : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \
159 : "R" (*m), "Jr" (old), "Jr" (new) \ 162 : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \
160 : "memory"); \ 163 : "memory"); \
161 } else if (kernel_uses_llsc) { \ 164 } else if (kernel_uses_llsc) { \
162 __asm__ __volatile__( \ 165 __asm__ __volatile__( \
@@ -172,8 +175,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
172 " beqz $1, 1b \n" \ 175 " beqz $1, 1b \n" \
173 " .set pop \n" \ 176 " .set pop \n" \
174 "2: \n" \ 177 "2: \n" \
175 : "=&r" (__ret), "=R" (*m) \ 178 : "=&r" (__ret), "=" GCC_OFF12_ASM() (*m) \
176 : "R" (*m), "Jr" (old), "Jr" (new) \ 179 : GCC_OFF12_ASM() (*m), "Jr" (old), "Jr" (new) \
177 : "memory"); \ 180 : "memory"); \
178 } else { \ 181 } else { \
179 unsigned long __flags; \ 182 unsigned long __flags; \
diff --git a/arch/mips/include/asm/compiler.h b/arch/mips/include/asm/compiler.h
index 71f5c5cfc58a..c73815e0123a 100644
--- a/arch/mips/include/asm/compiler.h
+++ b/arch/mips/include/asm/compiler.h
@@ -16,4 +16,12 @@
16#define GCC_REG_ACCUM "accum" 16#define GCC_REG_ACCUM "accum"
17#endif 17#endif
18 18
19#ifndef CONFIG_CPU_MICROMIPS
20#define GCC_OFF12_ASM() "R"
21#elif __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 9)
22#define GCC_OFF12_ASM() "ZC"
23#else
24#error "microMIPS compilation unsupported with GCC older than 4.9"
25#endif
26
19#endif /* _ASM_COMPILER_H */ 27#endif /* _ASM_COMPILER_H */
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index 3325f3eb248c..2897cfafcaf0 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -344,4 +344,8 @@
344# define cpu_has_msa 0 344# define cpu_has_msa 0
345#endif 345#endif
346 346
347#ifndef cpu_has_fre
348# define cpu_has_fre (cpu_data[0].options & MIPS_CPU_FRE)
349#endif
350
347#endif /* __ASM_CPU_FEATURES_H */ 351#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index dfdc77ed1839..33866fce4d63 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -142,6 +142,7 @@
142#define PRID_IMP_BMIPS3300_BUG 0x0000 142#define PRID_IMP_BMIPS3300_BUG 0x0000
143#define PRID_IMP_BMIPS43XX 0xa000 143#define PRID_IMP_BMIPS43XX 0xa000
144#define PRID_IMP_BMIPS5000 0x5a00 144#define PRID_IMP_BMIPS5000 0x5a00
145#define PRID_IMP_BMIPS5200 0x5b00
145 146
146#define PRID_REV_BMIPS4380_LO 0x0040 147#define PRID_REV_BMIPS4380_LO 0x0040
147#define PRID_REV_BMIPS4380_HI 0x006f 148#define PRID_REV_BMIPS4380_HI 0x006f
@@ -368,6 +369,7 @@ enum cpu_type_enum {
368#define MIPS_CPU_HTW 0x100000000ull /* CPU support Hardware Page Table Walker */ 369#define MIPS_CPU_HTW 0x100000000ull /* CPU support Hardware Page Table Walker */
369#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */ 370#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
370#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */ 371#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
372#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
371 373
372/* 374/*
373 * CPU ASE encodings 375 * CPU ASE encodings
diff --git a/arch/mips/include/asm/edac.h b/arch/mips/include/asm/edac.h
index 4da0c1fe30d9..ae6fedcb0060 100644
--- a/arch/mips/include/asm/edac.h
+++ b/arch/mips/include/asm/edac.h
@@ -1,6 +1,8 @@
1#ifndef ASM_EDAC_H 1#ifndef ASM_EDAC_H
2#define ASM_EDAC_H 2#define ASM_EDAC_H
3 3
4#include <asm/compiler.h>
5
4/* ECC atomic, DMA, SMP and interrupt safe scrub function */ 6/* ECC atomic, DMA, SMP and interrupt safe scrub function */
5 7
6static inline void atomic_scrub(void *va, u32 size) 8static inline void atomic_scrub(void *va, u32 size)
@@ -24,8 +26,8 @@ static inline void atomic_scrub(void *va, u32 size)
24 " sc %0, %1 \n" 26 " sc %0, %1 \n"
25 " beqz %0, 1b \n" 27 " beqz %0, 1b \n"
26 " .set mips0 \n" 28 " .set mips0 \n"
27 : "=&r" (temp), "=m" (*virt_addr) 29 : "=&r" (temp), "=" GCC_OFF12_ASM() (*virt_addr)
28 : "m" (*virt_addr)); 30 : GCC_OFF12_ASM() (*virt_addr));
29 31
30 virt_addr++; 32 virt_addr++;
31 } 33 }
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index 1d38fe0edd2d..eb4d95de619c 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -8,6 +8,8 @@
8#ifndef _ASM_ELF_H 8#ifndef _ASM_ELF_H
9#define _ASM_ELF_H 9#define _ASM_ELF_H
10 10
11#include <linux/fs.h>
12#include <uapi/linux/elf.h>
11 13
12/* ELF header e_flags defines. */ 14/* ELF header e_flags defines. */
13/* MIPS architecture level. */ 15/* MIPS architecture level. */
@@ -28,6 +30,7 @@
28#define PT_MIPS_REGINFO 0x70000000 30#define PT_MIPS_REGINFO 0x70000000
29#define PT_MIPS_RTPROC 0x70000001 31#define PT_MIPS_RTPROC 0x70000001
30#define PT_MIPS_OPTIONS 0x70000002 32#define PT_MIPS_OPTIONS 0x70000002
33#define PT_MIPS_ABIFLAGS 0x70000003
31 34
32/* Flags in the e_flags field of the header */ 35/* Flags in the e_flags field of the header */
33#define EF_MIPS_NOREORDER 0x00000001 36#define EF_MIPS_NOREORDER 0x00000001
@@ -174,6 +177,30 @@ typedef elf_greg_t elf_gregset_t[ELF_NGREG];
174typedef double elf_fpreg_t; 177typedef double elf_fpreg_t;
175typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; 178typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
176 179
180struct mips_elf_abiflags_v0 {
181 uint16_t version; /* Version of flags structure */
182 uint8_t isa_level; /* The level of the ISA: 1-5, 32, 64 */
183 uint8_t isa_rev; /* The revision of ISA: 0 for MIPS V and below,
184 1-n otherwise */
185 uint8_t gpr_size; /* The size of general purpose registers */
186 uint8_t cpr1_size; /* The size of co-processor 1 registers */
187 uint8_t cpr2_size; /* The size of co-processor 2 registers */
188 uint8_t fp_abi; /* The floating-point ABI */
189 uint32_t isa_ext; /* Mask of processor-specific extensions */
190 uint32_t ases; /* Mask of ASEs used */
191 uint32_t flags1; /* Mask of general flags */
192 uint32_t flags2;
193};
194
195#define MIPS_ABI_FP_ANY 0 /* FP ABI doesn't matter */
196#define MIPS_ABI_FP_DOUBLE 1 /* -mdouble-float */
197#define MIPS_ABI_FP_SINGLE 2 /* -msingle-float */
198#define MIPS_ABI_FP_SOFT 3 /* -msoft-float */
199#define MIPS_ABI_FP_OLD_64 4 /* -mips32r2 -mfp64 */
200#define MIPS_ABI_FP_XX 5 /* -mfpxx */
201#define MIPS_ABI_FP_64 6 /* -mips32r2 -mfp64 */
202#define MIPS_ABI_FP_64A 7 /* -mips32r2 -mfp64 -mno-odd-spreg */
203
177#ifdef CONFIG_32BIT 204#ifdef CONFIG_32BIT
178 205
179/* 206/*
@@ -262,16 +289,13 @@ extern struct mips_abi mips_abi_n32;
262 289
263#ifdef CONFIG_32BIT 290#ifdef CONFIG_32BIT
264 291
265#define SET_PERSONALITY(ex) \ 292#define SET_PERSONALITY2(ex, state) \
266do { \ 293do { \
267 if ((ex).e_flags & EF_MIPS_FP64) \
268 clear_thread_flag(TIF_32BIT_FPREGS); \
269 else \
270 set_thread_flag(TIF_32BIT_FPREGS); \
271 \
272 if (personality(current->personality) != PER_LINUX) \ 294 if (personality(current->personality) != PER_LINUX) \
273 set_personality(PER_LINUX); \ 295 set_personality(PER_LINUX); \
274 \ 296 \
297 mips_set_personality_fp(state); \
298 \
275 current->thread.abi = &mips_abi; \ 299 current->thread.abi = &mips_abi; \
276} while (0) 300} while (0)
277 301
@@ -291,44 +315,44 @@ do { \
291#endif 315#endif
292 316
293#ifdef CONFIG_MIPS32_O32 317#ifdef CONFIG_MIPS32_O32
294#define __SET_PERSONALITY32_O32(ex) \ 318#define __SET_PERSONALITY32_O32(ex, state) \
295 do { \ 319 do { \
296 set_thread_flag(TIF_32BIT_REGS); \ 320 set_thread_flag(TIF_32BIT_REGS); \
297 set_thread_flag(TIF_32BIT_ADDR); \ 321 set_thread_flag(TIF_32BIT_ADDR); \
298 \ 322 \
299 if (!((ex).e_flags & EF_MIPS_FP64)) \ 323 mips_set_personality_fp(state); \
300 set_thread_flag(TIF_32BIT_FPREGS); \
301 \ 324 \
302 current->thread.abi = &mips_abi_32; \ 325 current->thread.abi = &mips_abi_32; \
303 } while (0) 326 } while (0)
304#else 327#else
305#define __SET_PERSONALITY32_O32(ex) \ 328#define __SET_PERSONALITY32_O32(ex, state) \
306 do { } while (0) 329 do { } while (0)
307#endif 330#endif
308 331
309#ifdef CONFIG_MIPS32_COMPAT 332#ifdef CONFIG_MIPS32_COMPAT
310#define __SET_PERSONALITY32(ex) \ 333#define __SET_PERSONALITY32(ex, state) \
311do { \ 334do { \
312 if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \ 335 if ((((ex).e_flags & EF_MIPS_ABI2) != 0) && \
313 ((ex).e_flags & EF_MIPS_ABI) == 0) \ 336 ((ex).e_flags & EF_MIPS_ABI) == 0) \
314 __SET_PERSONALITY32_N32(); \ 337 __SET_PERSONALITY32_N32(); \
315 else \ 338 else \
316 __SET_PERSONALITY32_O32(ex); \ 339 __SET_PERSONALITY32_O32(ex, state); \
317} while (0) 340} while (0)
318#else 341#else
319#define __SET_PERSONALITY32(ex) do { } while (0) 342#define __SET_PERSONALITY32(ex, state) do { } while (0)
320#endif 343#endif
321 344
322#define SET_PERSONALITY(ex) \ 345#define SET_PERSONALITY2(ex, state) \
323do { \ 346do { \
324 unsigned int p; \ 347 unsigned int p; \
325 \ 348 \
326 clear_thread_flag(TIF_32BIT_REGS); \ 349 clear_thread_flag(TIF_32BIT_REGS); \
327 clear_thread_flag(TIF_32BIT_FPREGS); \ 350 clear_thread_flag(TIF_32BIT_FPREGS); \
351 clear_thread_flag(TIF_HYBRID_FPREGS); \
328 clear_thread_flag(TIF_32BIT_ADDR); \ 352 clear_thread_flag(TIF_32BIT_ADDR); \
329 \ 353 \
330 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ 354 if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
331 __SET_PERSONALITY32(ex); \ 355 __SET_PERSONALITY32(ex, state); \
332 else \ 356 else \
333 current->thread.abi = &mips_abi; \ 357 current->thread.abi = &mips_abi; \
334 \ 358 \
@@ -390,4 +414,24 @@ struct mm_struct;
390extern unsigned long arch_randomize_brk(struct mm_struct *mm); 414extern unsigned long arch_randomize_brk(struct mm_struct *mm);
391#define arch_randomize_brk arch_randomize_brk 415#define arch_randomize_brk arch_randomize_brk
392 416
417struct arch_elf_state {
418 int fp_abi;
419 int interp_fp_abi;
420 int overall_abi;
421};
422
423#define INIT_ARCH_ELF_STATE { \
424 .fp_abi = -1, \
425 .interp_fp_abi = -1, \
426 .overall_abi = -1, \
427}
428
429extern int arch_elf_pt_proc(void *ehdr, void *phdr, struct file *elf,
430 bool is_interp, struct arch_elf_state *state);
431
432extern int arch_check_elf(void *ehdr, bool has_interpreter,
433 struct arch_elf_state *state);
434
435extern void mips_set_personality_fp(struct arch_elf_state *state);
436
393#endif /* _ASM_ELF_H */ 437#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index dd562414cd5e..994d21939676 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -36,14 +36,16 @@ extern void _restore_fp(struct task_struct *);
36 36
37/* 37/*
38 * This enum specifies a mode in which we want the FPU to operate, for cores 38 * This enum specifies a mode in which we want the FPU to operate, for cores
39 * which implement the Status.FR bit. Note that FPU_32BIT & FPU_64BIT 39 * which implement the Status.FR bit. Note that the bottom bit of the value
40 * purposefully have the values 0 & 1 respectively, so that an integer value 40 * purposefully matches the desired value of the Status.FR bit.
41 * of Status.FR can be trivially casted to the corresponding enum fpu_mode.
42 */ 41 */
43enum fpu_mode { 42enum fpu_mode {
44 FPU_32BIT = 0, /* FR = 0 */ 43 FPU_32BIT = 0, /* FR = 0 */
45 FPU_64BIT, /* FR = 1 */ 44 FPU_64BIT, /* FR = 1, FRE = 0 */
46 FPU_AS_IS, 45 FPU_AS_IS,
46 FPU_HYBRID, /* FR = 1, FRE = 1 */
47
48#define FPU_FR_MASK 0x1
47}; 49};
48 50
49static inline int __enable_fpu(enum fpu_mode mode) 51static inline int __enable_fpu(enum fpu_mode mode)
@@ -57,6 +59,14 @@ static inline int __enable_fpu(enum fpu_mode mode)
57 enable_fpu_hazard(); 59 enable_fpu_hazard();
58 return 0; 60 return 0;
59 61
62 case FPU_HYBRID:
63 if (!cpu_has_fre)
64 return SIGFPE;
65
66 /* set FRE */
67 write_c0_config5(read_c0_config5() | MIPS_CONF5_FRE);
68 goto fr_common;
69
60 case FPU_64BIT: 70 case FPU_64BIT:
61#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT)) 71#if !(defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_64BIT))
62 /* we only have a 32-bit FPU */ 72 /* we only have a 32-bit FPU */
@@ -64,8 +74,11 @@ static inline int __enable_fpu(enum fpu_mode mode)
64#endif 74#endif
65 /* fall through */ 75 /* fall through */
66 case FPU_32BIT: 76 case FPU_32BIT:
77 /* clear FRE */
78 write_c0_config5(read_c0_config5() & ~MIPS_CONF5_FRE);
79fr_common:
67 /* set CU1 & change FR appropriately */ 80 /* set CU1 & change FR appropriately */
68 fr = (int)mode; 81 fr = (int)mode & FPU_FR_MASK;
69 change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0)); 82 change_c0_status(ST0_CU1 | ST0_FR, ST0_CU1 | (fr ? ST0_FR : 0));
70 enable_fpu_hazard(); 83 enable_fpu_hazard();
71 84
@@ -102,13 +115,17 @@ static inline int __own_fpu(void)
102 enum fpu_mode mode; 115 enum fpu_mode mode;
103 int ret; 116 int ret;
104 117
105 mode = !test_thread_flag(TIF_32BIT_FPREGS); 118 if (test_thread_flag(TIF_HYBRID_FPREGS))
119 mode = FPU_HYBRID;
120 else
121 mode = !test_thread_flag(TIF_32BIT_FPREGS);
122
106 ret = __enable_fpu(mode); 123 ret = __enable_fpu(mode);
107 if (ret) 124 if (ret)
108 return ret; 125 return ret;
109 126
110 KSTK_STATUS(current) |= ST0_CU1; 127 KSTK_STATUS(current) |= ST0_CU1;
111 if (mode == FPU_64BIT) 128 if (mode == FPU_64BIT || mode == FPU_HYBRID)
112 KSTK_STATUS(current) |= ST0_FR; 129 KSTK_STATUS(current) |= ST0_FR;
113 else /* mode == FPU_32BIT */ 130 else /* mode == FPU_32BIT */
114 KSTK_STATUS(current) &= ~ST0_FR; 131 KSTK_STATUS(current) &= ~ST0_FR;
@@ -166,8 +183,24 @@ static inline int init_fpu(void)
166 183
167 if (cpu_has_fpu) { 184 if (cpu_has_fpu) {
168 ret = __own_fpu(); 185 ret = __own_fpu();
169 if (!ret) 186 if (!ret) {
187 unsigned int config5 = read_c0_config5();
188
189 /*
190 * Ensure FRE is clear whilst running _init_fpu, since
191 * single precision FP instructions are used. If FRE
192 * was set then we'll just end up initialising all 32
193 * 64b registers.
194 */
195 write_c0_config5(config5 & ~MIPS_CONF5_FRE);
196 enable_fpu_hazard();
197
170 _init_fpu(); 198 _init_fpu();
199
200 /* Restore FRE */
201 write_c0_config5(config5);
202 enable_fpu_hazard();
203 }
171 } else 204 } else
172 fpu_emulator_init_fpu(); 205 fpu_emulator_init_fpu();
173 206
diff --git a/arch/mips/include/asm/futex.h b/arch/mips/include/asm/futex.h
index 194cda0396a3..ef9987a61d88 100644
--- a/arch/mips/include/asm/futex.h
+++ b/arch/mips/include/asm/futex.h
@@ -14,6 +14,7 @@
14#include <linux/uaccess.h> 14#include <linux/uaccess.h>
15#include <asm/asm-eva.h> 15#include <asm/asm-eva.h>
16#include <asm/barrier.h> 16#include <asm/barrier.h>
17#include <asm/compiler.h>
17#include <asm/errno.h> 18#include <asm/errno.h>
18#include <asm/war.h> 19#include <asm/war.h>
19 20
@@ -32,6 +33,7 @@
32 " beqzl $1, 1b \n" \ 33 " beqzl $1, 1b \n" \
33 __WEAK_LLSC_MB \ 34 __WEAK_LLSC_MB \
34 "3: \n" \ 35 "3: \n" \
36 " .insn \n" \
35 " .set pop \n" \ 37 " .set pop \n" \
36 " .set mips0 \n" \ 38 " .set mips0 \n" \
37 " .section .fixup,\"ax\" \n" \ 39 " .section .fixup,\"ax\" \n" \
@@ -42,8 +44,10 @@
42 " "__UA_ADDR "\t1b, 4b \n" \ 44 " "__UA_ADDR "\t1b, 4b \n" \
43 " "__UA_ADDR "\t2b, 4b \n" \ 45 " "__UA_ADDR "\t2b, 4b \n" \
44 " .previous \n" \ 46 " .previous \n" \
45 : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ 47 : "=r" (ret), "=&r" (oldval), \
46 : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ 48 "=" GCC_OFF12_ASM() (*uaddr) \
49 : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \
50 "i" (-EFAULT) \
47 : "memory"); \ 51 : "memory"); \
48 } else if (cpu_has_llsc) { \ 52 } else if (cpu_has_llsc) { \
49 __asm__ __volatile__( \ 53 __asm__ __volatile__( \
@@ -58,6 +62,7 @@
58 " beqz $1, 1b \n" \ 62 " beqz $1, 1b \n" \
59 __WEAK_LLSC_MB \ 63 __WEAK_LLSC_MB \
60 "3: \n" \ 64 "3: \n" \
65 " .insn \n" \
61 " .set pop \n" \ 66 " .set pop \n" \
62 " .set mips0 \n" \ 67 " .set mips0 \n" \
63 " .section .fixup,\"ax\" \n" \ 68 " .section .fixup,\"ax\" \n" \
@@ -68,8 +73,10 @@
68 " "__UA_ADDR "\t1b, 4b \n" \ 73 " "__UA_ADDR "\t1b, 4b \n" \
69 " "__UA_ADDR "\t2b, 4b \n" \ 74 " "__UA_ADDR "\t2b, 4b \n" \
70 " .previous \n" \ 75 " .previous \n" \
71 : "=r" (ret), "=&r" (oldval), "=R" (*uaddr) \ 76 : "=r" (ret), "=&r" (oldval), \
72 : "0" (0), "R" (*uaddr), "Jr" (oparg), "i" (-EFAULT) \ 77 "=" GCC_OFF12_ASM() (*uaddr) \
78 : "0" (0), GCC_OFF12_ASM() (*uaddr), "Jr" (oparg), \
79 "i" (-EFAULT) \
73 : "memory"); \ 80 : "memory"); \
74 } else \ 81 } else \
75 ret = -ENOSYS; \ 82 ret = -ENOSYS; \
@@ -157,6 +164,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
157 " beqzl $1, 1b \n" 164 " beqzl $1, 1b \n"
158 __WEAK_LLSC_MB 165 __WEAK_LLSC_MB
159 "3: \n" 166 "3: \n"
167 " .insn \n"
160 " .set pop \n" 168 " .set pop \n"
161 " .section .fixup,\"ax\" \n" 169 " .section .fixup,\"ax\" \n"
162 "4: li %0, %6 \n" 170 "4: li %0, %6 \n"
@@ -166,8 +174,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
166 " "__UA_ADDR "\t1b, 4b \n" 174 " "__UA_ADDR "\t1b, 4b \n"
167 " "__UA_ADDR "\t2b, 4b \n" 175 " "__UA_ADDR "\t2b, 4b \n"
168 " .previous \n" 176 " .previous \n"
169 : "+r" (ret), "=&r" (val), "=R" (*uaddr) 177 : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
170 : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 178 : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
179 "i" (-EFAULT)
171 : "memory"); 180 : "memory");
172 } else if (cpu_has_llsc) { 181 } else if (cpu_has_llsc) {
173 __asm__ __volatile__( 182 __asm__ __volatile__(
@@ -184,6 +193,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
184 " beqz $1, 1b \n" 193 " beqz $1, 1b \n"
185 __WEAK_LLSC_MB 194 __WEAK_LLSC_MB
186 "3: \n" 195 "3: \n"
196 " .insn \n"
187 " .set pop \n" 197 " .set pop \n"
188 " .section .fixup,\"ax\" \n" 198 " .section .fixup,\"ax\" \n"
189 "4: li %0, %6 \n" 199 "4: li %0, %6 \n"
@@ -193,8 +203,9 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
193 " "__UA_ADDR "\t1b, 4b \n" 203 " "__UA_ADDR "\t1b, 4b \n"
194 " "__UA_ADDR "\t2b, 4b \n" 204 " "__UA_ADDR "\t2b, 4b \n"
195 " .previous \n" 205 " .previous \n"
196 : "+r" (ret), "=&r" (val), "=R" (*uaddr) 206 : "+r" (ret), "=&r" (val), "=" GCC_OFF12_ASM() (*uaddr)
197 : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) 207 : GCC_OFF12_ASM() (*uaddr), "Jr" (oldval), "Jr" (newval),
208 "i" (-EFAULT)
198 : "memory"); 209 : "memory");
199 } else 210 } else
200 return -ENOSYS; 211 return -ENOSYS;
diff --git a/arch/mips/include/asm/hpet.h b/arch/mips/include/asm/hpet.h
new file mode 100644
index 000000000000..18a8f778bfaa
--- /dev/null
+++ b/arch/mips/include/asm/hpet.h
@@ -0,0 +1,73 @@
1#ifndef _ASM_HPET_H
2#define _ASM_HPET_H
3
4#ifdef CONFIG_RS780_HPET
5
6#define HPET_MMAP_SIZE 1024
7
8#define HPET_ID 0x000
9#define HPET_PERIOD 0x004
10#define HPET_CFG 0x010
11#define HPET_STATUS 0x020
12#define HPET_COUNTER 0x0f0
13
14#define HPET_Tn_CFG(n) (0x100 + 0x20 * n)
15#define HPET_Tn_CMP(n) (0x108 + 0x20 * n)
16#define HPET_Tn_ROUTE(n) (0x110 + 0x20 * n)
17
18#define HPET_T0_IRS 0x001
19#define HPET_T1_IRS 0x002
20#define HPET_T3_IRS 0x004
21
22#define HPET_T0_CFG 0x100
23#define HPET_T0_CMP 0x108
24#define HPET_T0_ROUTE 0x110
25#define HPET_T1_CFG 0x120
26#define HPET_T1_CMP 0x128
27#define HPET_T1_ROUTE 0x130
28#define HPET_T2_CFG 0x140
29#define HPET_T2_CMP 0x148
30#define HPET_T2_ROUTE 0x150
31
32#define HPET_ID_REV 0x000000ff
33#define HPET_ID_NUMBER 0x00001f00
34#define HPET_ID_64BIT 0x00002000
35#define HPET_ID_LEGSUP 0x00008000
36#define HPET_ID_VENDOR 0xffff0000
37#define HPET_ID_NUMBER_SHIFT 8
38#define HPET_ID_VENDOR_SHIFT 16
39
40#define HPET_CFG_ENABLE 0x001
41#define HPET_CFG_LEGACY 0x002
42#define HPET_LEGACY_8254 2
43#define HPET_LEGACY_RTC 8
44
45#define HPET_TN_LEVEL 0x0002
46#define HPET_TN_ENABLE 0x0004
47#define HPET_TN_PERIODIC 0x0008
48#define HPET_TN_PERIODIC_CAP 0x0010
49#define HPET_TN_64BIT_CAP 0x0020
50#define HPET_TN_SETVAL 0x0040
51#define HPET_TN_32BIT 0x0100
52#define HPET_TN_ROUTE 0x3e00
53#define HPET_TN_FSB 0x4000
54#define HPET_TN_FSB_CAP 0x8000
55#define HPET_TN_ROUTE_SHIFT 9
56
57/* Max HPET Period is 10^8 femto sec as in HPET spec */
58#define HPET_MAX_PERIOD 100000000UL
59/*
60 * Min HPET period is 10^5 femto sec just for safety. If it is less than this,
61 * then 32 bit HPET counter wrapsaround in less than 0.5 sec.
62 */
63#define HPET_MIN_PERIOD 100000UL
64
65#define HPET_ADDR 0x20000
66#define HPET_MMIO_ADDR 0x90000e0000020000
67#define HPET_FREQ 14318780
68#define HPET_COMPARE_VAL ((HPET_FREQ + HZ / 2) / HZ)
69#define HPET_T0_IRQ 0
70
71extern void __init setup_hpet_timer(void);
72#endif /* CONFIG_RS780_HPET */
73#endif /* _ASM_HPET_H */
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 933b50e125a0..9e777cd42b67 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -167,7 +167,7 @@ static inline void * isa_bus_to_virt(unsigned long address)
167 */ 167 */
168#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT) 168#define page_to_phys(page) ((dma_addr_t)page_to_pfn(page) << PAGE_SHIFT)
169 169
170extern void __iomem * __ioremap(phys_t offset, phys_t size, unsigned long flags); 170extern void __iomem * __ioremap(phys_addr_t offset, phys_addr_t size, unsigned long flags);
171extern void __iounmap(const volatile void __iomem *addr); 171extern void __iounmap(const volatile void __iomem *addr);
172 172
173#ifndef CONFIG_PCI 173#ifndef CONFIG_PCI
@@ -175,7 +175,7 @@ struct pci_dev;
175static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {} 175static inline void pci_iounmap(struct pci_dev *dev, void __iomem *addr) {}
176#endif 176#endif
177 177
178static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size, 178static inline void __iomem * __ioremap_mode(phys_addr_t offset, unsigned long size,
179 unsigned long flags) 179 unsigned long flags)
180{ 180{
181 void __iomem *addr = plat_ioremap(offset, size, flags); 181 void __iomem *addr = plat_ioremap(offset, size, flags);
@@ -183,7 +183,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
183 if (addr) 183 if (addr)
184 return addr; 184 return addr;
185 185
186#define __IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) 186#define __IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
187 187
188 if (cpu_has_64bit_addresses) { 188 if (cpu_has_64bit_addresses) {
189 u64 base = UNCAC_BASE; 189 u64 base = UNCAC_BASE;
@@ -197,7 +197,7 @@ static inline void __iomem * __ioremap_mode(phys_t offset, unsigned long size,
197 return (void __iomem *) (unsigned long) (base + offset); 197 return (void __iomem *) (unsigned long) (base + offset);
198 } else if (__builtin_constant_p(offset) && 198 } else if (__builtin_constant_p(offset) &&
199 __builtin_constant_p(size) && __builtin_constant_p(flags)) { 199 __builtin_constant_p(size) && __builtin_constant_p(flags)) {
200 phys_t phys_addr, last_addr; 200 phys_addr_t phys_addr, last_addr;
201 201
202 phys_addr = fixup_bigphys_addr(offset, size); 202 phys_addr = fixup_bigphys_addr(offset, size);
203 203
diff --git a/arch/mips/include/asm/irq.h b/arch/mips/include/asm/irq.h
index 39f07aec640c..5a4e1bb8fb1b 100644
--- a/arch/mips/include/asm/irq.h
+++ b/arch/mips/include/asm/irq.h
@@ -48,4 +48,7 @@ extern int cp0_compare_irq;
48extern int cp0_compare_irq_shift; 48extern int cp0_compare_irq_shift;
49extern int cp0_perfcount_irq; 49extern int cp0_perfcount_irq;
50 50
51void arch_trigger_all_cpu_backtrace(bool);
52#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
53
51#endif /* _ASM_IRQ_H */ 54#endif /* _ASM_IRQ_H */
diff --git a/arch/mips/include/asm/irq_cpu.h b/arch/mips/include/asm/irq_cpu.h
index 3f11fdb3ed8c..39a160bb41dc 100644
--- a/arch/mips/include/asm/irq_cpu.h
+++ b/arch/mips/include/asm/irq_cpu.h
@@ -19,8 +19,8 @@ extern void rm9k_cpu_irq_init(void);
19 19
20#ifdef CONFIG_IRQ_DOMAIN 20#ifdef CONFIG_IRQ_DOMAIN
21struct device_node; 21struct device_node;
22extern int mips_cpu_intc_init(struct device_node *of_node, 22extern int mips_cpu_irq_of_init(struct device_node *of_node,
23 struct device_node *parent); 23 struct device_node *parent);
24#endif 24#endif
25 25
26#endif /* _ASM_IRQ_CPU_H */ 26#endif /* _ASM_IRQ_CPU_H */
diff --git a/arch/mips/include/asm/mach-ath25/ath25_platform.h b/arch/mips/include/asm/mach-ath25/ath25_platform.h
new file mode 100644
index 000000000000..4f4ee4f9e5ec
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/ath25_platform.h
@@ -0,0 +1,73 @@
1#ifndef __ASM_MACH_ATH25_PLATFORM_H
2#define __ASM_MACH_ATH25_PLATFORM_H
3
4#include <linux/etherdevice.h>
5
6/*
7 * This is board-specific data that is stored in a "fixed" location in flash.
8 * It is shared across operating systems, so it should not be changed lightly.
9 * The main reason we need it is in order to extract the ethernet MAC
10 * address(es).
11 */
12struct ath25_boarddata {
13 u32 magic; /* board data is valid */
14#define ATH25_BD_MAGIC 0x35333131 /* "5311", for all 531x/231x platforms */
15 u16 cksum; /* checksum (starting with BD_REV 2) */
16 u16 rev; /* revision of this struct */
17#define BD_REV 4
18 char board_name[64]; /* Name of board */
19 u16 major; /* Board major number */
20 u16 minor; /* Board minor number */
21 u32 flags; /* Board configuration */
22#define BD_ENET0 0x00000001 /* ENET0 is stuffed */
23#define BD_ENET1 0x00000002 /* ENET1 is stuffed */
24#define BD_UART1 0x00000004 /* UART1 is stuffed */
25#define BD_UART0 0x00000008 /* UART0 is stuffed (dma) */
26#define BD_RSTFACTORY 0x00000010 /* Reset factory defaults stuffed */
27#define BD_SYSLED 0x00000020 /* System LED stuffed */
28#define BD_EXTUARTCLK 0x00000040 /* External UART clock */
29#define BD_CPUFREQ 0x00000080 /* cpu freq is valid in nvram */
30#define BD_SYSFREQ 0x00000100 /* sys freq is set in nvram */
31#define BD_WLAN0 0x00000200 /* Enable WLAN0 */
32#define BD_MEMCAP 0x00000400 /* CAP SDRAM @ mem_cap for testing */
33#define BD_DISWATCHDOG 0x00000800 /* disable system watchdog */
34#define BD_WLAN1 0x00001000 /* Enable WLAN1 (ar5212) */
35#define BD_ISCASPER 0x00002000 /* FLAG for AR2312 */
36#define BD_WLAN0_2G_EN 0x00004000 /* FLAG for radio0_2G */
37#define BD_WLAN0_5G_EN 0x00008000 /* FLAG for radio0_2G */
38#define BD_WLAN1_2G_EN 0x00020000 /* FLAG for radio0_2G */
39#define BD_WLAN1_5G_EN 0x00040000 /* FLAG for radio0_2G */
40 u16 reset_config_gpio; /* Reset factory GPIO pin */
41 u16 sys_led_gpio; /* System LED GPIO pin */
42
43 u32 cpu_freq; /* CPU core frequency in Hz */
44 u32 sys_freq; /* System frequency in Hz */
45 u32 cnt_freq; /* Calculated C0_COUNT frequency */
46
47 u8 wlan0_mac[ETH_ALEN];
48 u8 enet0_mac[ETH_ALEN];
49 u8 enet1_mac[ETH_ALEN];
50
51 u16 pci_id; /* Pseudo PCIID for common code */
52 u16 mem_cap; /* cap bank1 in MB */
53
54 /* version 3 */
55 u8 wlan1_mac[ETH_ALEN]; /* (ar5212) */
56};
57
58#define BOARD_CONFIG_BUFSZ 0x1000
59
60/*
61 * Platform device information for the Wireless MAC
62 */
63struct ar231x_board_config {
64 u16 devid;
65
66 /* board config data */
67 struct ath25_boarddata *config;
68
69 /* radio calibration data */
70 const char *radio;
71};
72
73#endif /* __ASM_MACH_ATH25_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
new file mode 100644
index 000000000000..ade0356df257
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/cpu-feature-overrides.h
@@ -0,0 +1,64 @@
1/*
2 * Atheros AR231x/AR531x SoC specific CPU feature overrides
3 *
4 * Copyright (C) 2008 Gabor Juhos <juhosg@openwrt.org>
5 *
6 * This file was derived from: include/asm-mips/cpu-features.h
7 * Copyright (C) 2003, 2004 Ralf Baechle
8 * Copyright (C) 2004 Maciej W. Rozycki
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 *
14 */
15#ifndef __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H
16#define __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H
17
18/*
19 * The Atheros AR531x/AR231x SoCs have MIPS 4Kc/4KEc core.
20 */
21#define cpu_has_tlb 1
22#define cpu_has_4kex 1
23#define cpu_has_3k_cache 0
24#define cpu_has_4k_cache 1
25#define cpu_has_tx39_cache 0
26#define cpu_has_sb1_cache 0
27#define cpu_has_fpu 0
28#define cpu_has_32fpr 0
29#define cpu_has_counter 1
30#define cpu_has_ejtag 1
31
32#if !defined(CONFIG_SOC_AR5312)
33# define cpu_has_llsc 1
34#else
35/*
36 * The MIPS 4Kc V0.9 core in the AR5312/AR2312 have problems with the
37 * ll/sc instructions.
38 */
39# define cpu_has_llsc 0
40#endif
41
42#define cpu_has_mips16 0
43#define cpu_has_mdmx 0
44#define cpu_has_mips3d 0
45#define cpu_has_smartmips 0
46
47#define cpu_has_mips32r1 1
48
49#if !defined(CONFIG_SOC_AR5312)
50# define cpu_has_mips32r2 1
51#endif
52
53#define cpu_has_mips64r1 0
54#define cpu_has_mips64r2 0
55
56#define cpu_has_dsp 0
57#define cpu_has_mipsmt 0
58
59#define cpu_has_64bits 0
60#define cpu_has_64bit_zero_reg 0
61#define cpu_has_64bit_gp_regs 0
62#define cpu_has_64bit_addresses 0
63
64#endif /* __ASM_MACH_ATH25_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-ath25/dma-coherence.h b/arch/mips/include/asm/mach-ath25/dma-coherence.h
new file mode 100644
index 000000000000..d8009c93a465
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/dma-coherence.h
@@ -0,0 +1,82 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
7 * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org>
8 *
9 */
10#ifndef __ASM_MACH_ATH25_DMA_COHERENCE_H
11#define __ASM_MACH_ATH25_DMA_COHERENCE_H
12
13#include <linux/device.h>
14
15/*
16 * We need some arbitrary non-zero value to be programmed to the BAR1 register
17 * of PCI host controller to enable DMA. The same value should be used as the
18 * offset to calculate the physical address of DMA buffer for PCI devices.
19 */
20#define AR2315_PCI_HOST_SDRAM_BASEADDR 0x20000000
21
22static inline dma_addr_t ath25_dev_offset(struct device *dev)
23{
24#ifdef CONFIG_PCI
25 extern struct bus_type pci_bus_type;
26
27 if (dev && dev->bus == &pci_bus_type)
28 return AR2315_PCI_HOST_SDRAM_BASEADDR;
29#endif
30 return 0;
31}
32
33static inline dma_addr_t
34plat_map_dma_mem(struct device *dev, void *addr, size_t size)
35{
36 return virt_to_phys(addr) + ath25_dev_offset(dev);
37}
38
39static inline dma_addr_t
40plat_map_dma_mem_page(struct device *dev, struct page *page)
41{
42 return page_to_phys(page) + ath25_dev_offset(dev);
43}
44
45static inline unsigned long
46plat_dma_addr_to_phys(struct device *dev, dma_addr_t dma_addr)
47{
48 return dma_addr - ath25_dev_offset(dev);
49}
50
51static inline void
52plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr, size_t size,
53 enum dma_data_direction direction)
54{
55}
56
57static inline int plat_dma_supported(struct device *dev, u64 mask)
58{
59 return 1;
60}
61
62static inline void plat_extra_sync_for_device(struct device *dev)
63{
64}
65
66static inline int plat_dma_mapping_error(struct device *dev,
67 dma_addr_t dma_addr)
68{
69 return 0;
70}
71
72static inline int plat_device_is_coherent(struct device *dev)
73{
74#ifdef CONFIG_DMA_COHERENT
75 return 1;
76#endif
77#ifdef CONFIG_DMA_NONCOHERENT
78 return 0;
79#endif
80}
81
82#endif /* __ASM_MACH_ATH25_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ath25/gpio.h b/arch/mips/include/asm/mach-ath25/gpio.h
new file mode 100644
index 000000000000..713564b8e8ef
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/gpio.h
@@ -0,0 +1,16 @@
1#ifndef __ASM_MACH_ATH25_GPIO_H
2#define __ASM_MACH_ATH25_GPIO_H
3
4#include <asm-generic/gpio.h>
5
6#define gpio_get_value __gpio_get_value
7#define gpio_set_value __gpio_set_value
8#define gpio_cansleep __gpio_cansleep
9#define gpio_to_irq __gpio_to_irq
10
11static inline int irq_to_gpio(unsigned irq)
12{
13 return -EINVAL;
14}
15
16#endif /* __ASM_MACH_ATH25_GPIO_H */
diff --git a/arch/mips/include/asm/mach-ath25/war.h b/arch/mips/include/asm/mach-ath25/war.h
new file mode 100644
index 000000000000..e3a5250ebd67
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath25/war.h
@@ -0,0 +1,25 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Felix Fietkau <nbd@openwrt.org>
7 */
8#ifndef __ASM_MACH_ATH25_WAR_H
9#define __ASM_MACH_ATH25_WAR_H
10
11#define R4600_V1_INDEX_ICACHEOP_WAR 0
12#define R4600_V1_HIT_CACHEOP_WAR 0
13#define R4600_V2_HIT_CACHEOP_WAR 0
14#define R5432_CP0_INTERRUPT_WAR 0
15#define BCM1250_M3_WAR 0
16#define SIBYTE_1956_WAR 0
17#define MIPS4K_ICACHE_REFILL_WAR 0
18#define MIPS_CACHE_SYNC_WAR 0
19#define TX49XX_ICACHE_INDEX_INV_WAR 0
20#define RM9000_CDEX_SMP_WAR 0
21#define ICACHE_REFILLS_WORKAROUND_WAR 0
22#define R10000_LLSC_WAR 0
23#define MIPS34K_MISSED_ITLB_WAR 0
24
25#endif /* __ASM_MACH_ATH25_WAR_H */
diff --git a/arch/mips/include/asm/mach-au1x00/ioremap.h b/arch/mips/include/asm/mach-au1x00/ioremap.h
index 75a94ad3ac91..99fea1fbb4f5 100644
--- a/arch/mips/include/asm/mach-au1x00/ioremap.h
+++ b/arch/mips/include/asm/mach-au1x00/ioremap.h
@@ -11,10 +11,10 @@
11 11
12#include <linux/types.h> 12#include <linux/types.h>
13 13
14#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_PCI) 14#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_PCI)
15extern phys_t __fixup_bigphys_addr(phys_t, phys_t); 15extern phys_addr_t __fixup_bigphys_addr(phys_addr_t, phys_addr_t);
16#else 16#else
17static inline phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size) 17static inline phys_addr_t __fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
18{ 18{
19 return phys_addr; 19 return phys_addr;
20} 20}
@@ -23,12 +23,12 @@ static inline phys_t __fixup_bigphys_addr(phys_t phys_addr, phys_t size)
23/* 23/*
24 * Allow physical addresses to be fixed up to help 36-bit peripherals. 24 * Allow physical addresses to be fixed up to help 36-bit peripherals.
25 */ 25 */
26static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) 26static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
27{ 27{
28 return __fixup_bigphys_addr(phys_addr, size); 28 return __fixup_bigphys_addr(phys_addr, size);
29} 29}
30 30
31static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, 31static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
32 unsigned long flags) 32 unsigned long flags)
33{ 33{
34 return NULL; 34 return NULL;
diff --git a/arch/mips/include/asm/mach-bcm3384/dma-coherence.h b/arch/mips/include/asm/mach-bcm3384/dma-coherence.h
new file mode 100644
index 000000000000..a3be8e50e1f0
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm3384/dma-coherence.h
@@ -0,0 +1,48 @@
1/*
2 * Copyright (C) 2006 Ralf Baechle <ralf@linux-mips.org>
3 * Copyright (C) 2009 Broadcom Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#ifndef __ASM_MACH_BCM3384_DMA_COHERENCE_H
16#define __ASM_MACH_BCM3384_DMA_COHERENCE_H
17
18struct device;
19
20extern dma_addr_t plat_map_dma_mem(struct device *dev, void *addr, size_t size);
21extern dma_addr_t plat_map_dma_mem_page(struct device *dev, struct page *page);
22extern unsigned long plat_dma_addr_to_phys(struct device *dev,
23 dma_addr_t dma_addr);
24
25static inline void plat_unmap_dma_mem(struct device *dev, dma_addr_t dma_addr,
26 size_t size, enum dma_data_direction direction)
27{
28}
29
30static inline int plat_dma_supported(struct device *dev, u64 mask)
31{
32 /*
33 * we fall back to GFP_DMA when the mask isn't all 1s,
34 * so we can't guarantee allocations that must be
35 * within a tighter range than GFP_DMA..
36 */
37 if (mask < DMA_BIT_MASK(24))
38 return 0;
39
40 return 1;
41}
42
43static inline int plat_device_is_coherent(struct device *dev)
44{
45 return 0;
46}
47
48#endif /* __ASM_MACH_BCM3384_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-bcm3384/war.h b/arch/mips/include/asm/mach-bcm3384/war.h
new file mode 100644
index 000000000000..59d7599059b0
--- /dev/null
+++ b/arch/mips/include/asm/mach-bcm3384/war.h
@@ -0,0 +1,24 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2002, 2004, 2007 by Ralf Baechle <ralf@linux-mips.org>
7 */
8#ifndef __ASM_MIPS_MACH_BCM3384_WAR_H
9#define __ASM_MIPS_MACH_BCM3384_WAR_H
10
11#define R4600_V1_INDEX_ICACHEOP_WAR 0
12#define R4600_V1_HIT_CACHEOP_WAR 0
13#define R4600_V2_HIT_CACHEOP_WAR 0
14#define R5432_CP0_INTERRUPT_WAR 0
15#define BCM1250_M3_WAR 0
16#define SIBYTE_1956_WAR 0
17#define MIPS4K_ICACHE_REFILL_WAR 0
18#define MIPS_CACHE_SYNC_WAR 0
19#define TX49XX_ICACHE_INDEX_INV_WAR 0
20#define ICACHE_REFILLS_WORKAROUND_WAR 0
21#define R10000_LLSC_WAR 0
22#define MIPS34K_MISSED_ITLB_WAR 0
23
24#endif /* __ASM_MIPS_MACH_BCM3384_WAR_H */
diff --git a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
index 36a3fc1aa3ae..ee59ffe99922 100644
--- a/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
+++ b/arch/mips/include/asm/mach-bcm47xx/bcm47xx_nvram.h
@@ -14,40 +14,8 @@
14#include <linux/types.h> 14#include <linux/types.h>
15#include <linux/kernel.h> 15#include <linux/kernel.h>
16 16
17struct nvram_header { 17int bcm47xx_nvram_init_from_mem(u32 base, u32 lim);
18 u32 magic; 18int bcm47xx_nvram_getenv(const char *name, char *val, size_t val_len);
19 u32 len;
20 u32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */
21 u32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */
22 u32 config_ncdl; /* ncdl values for memc */
23};
24
25#define NVRAM_HEADER 0x48534C46 /* 'FLSH' */
26#define NVRAM_VERSION 1
27#define NVRAM_HEADER_SIZE 20
28#define NVRAM_SPACE 0x8000
29
30#define FLASH_MIN 0x00020000 /* Minimum flash size */
31
32#define NVRAM_MAX_VALUE_LEN 255
33#define NVRAM_MAX_PARAM_LEN 64
34
35extern int bcm47xx_nvram_getenv(char *name, char *val, size_t val_len);
36
37static inline void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
38{
39 if (strchr(buf, ':'))
40 sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
41 &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
42 &macaddr[5]);
43 else if (strchr(buf, '-'))
44 sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
45 &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
46 &macaddr[5]);
47 else
48 printk(KERN_WARNING "Can not parse mac address: %s\n", buf);
49}
50
51int bcm47xx_nvram_gpio_pin(const char *name); 19int bcm47xx_nvram_gpio_pin(const char *name);
52 20
53#endif /* __BCM47XX_NVRAM_H */ 21#endif /* __BCM47XX_NVRAM_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/ioremap.h b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
index ff15e3b14e7a..aea6e64b828f 100644
--- a/arch/mips/include/asm/mach-bcm63xx/ioremap.h
+++ b/arch/mips/include/asm/mach-bcm63xx/ioremap.h
@@ -3,12 +3,12 @@
3 3
4#include <bcm63xx_cpu.h> 4#include <bcm63xx_cpu.h>
5 5
6static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) 6static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
7{ 7{
8 return phys_addr; 8 return phys_addr;
9} 9}
10 10
11static inline int is_bcm63xx_internal_registers(phys_t offset) 11static inline int is_bcm63xx_internal_registers(phys_addr_t offset)
12{ 12{
13 switch (bcm63xx_get_cpu_id()) { 13 switch (bcm63xx_get_cpu_id()) {
14 case BCM3368_CPU_ID: 14 case BCM3368_CPU_ID:
@@ -32,7 +32,7 @@ static inline int is_bcm63xx_internal_registers(phys_t offset)
32 return 0; 32 return 0;
33} 33}
34 34
35static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, 35static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
36 unsigned long flags) 36 unsigned long flags)
37{ 37{
38 if (is_bcm63xx_internal_registers(offset)) 38 if (is_bcm63xx_internal_registers(offset))
diff --git a/arch/mips/include/asm/mach-generic/ioremap.h b/arch/mips/include/asm/mach-generic/ioremap.h
index b379938d47f0..513371f7c39c 100644
--- a/arch/mips/include/asm/mach-generic/ioremap.h
+++ b/arch/mips/include/asm/mach-generic/ioremap.h
@@ -15,12 +15,12 @@
15 * Allow physical addresses to be fixed up to help peripherals located 15 * Allow physical addresses to be fixed up to help peripherals located
16 * outside the low 32-bit range -- generic pass-through version. 16 * outside the low 32-bit range -- generic pass-through version.
17 */ 17 */
18static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) 18static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
19{ 19{
20 return phys_addr; 20 return phys_addr;
21} 21}
22 22
23static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, 23static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
24 unsigned long flags) 24 unsigned long flags)
25{ 25{
26 return NULL; 26 return NULL;
diff --git a/arch/mips/include/asm/mach-generic/irq.h b/arch/mips/include/asm/mach-generic/irq.h
index 139cd200e79d..050e18bb1a04 100644
--- a/arch/mips/include/asm/mach-generic/irq.h
+++ b/arch/mips/include/asm/mach-generic/irq.h
@@ -36,4 +36,10 @@
36 36
37#endif /* CONFIG_IRQ_CPU */ 37#endif /* CONFIG_IRQ_CPU */
38 38
39#ifdef CONFIG_MIPS_GIC
40#ifndef MIPS_GIC_IRQ_BASE
41#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8)
42#endif
43#endif /* CONFIG_MIPS_GIC */
44
39#endif /* __ASM_MACH_GENERIC_IRQ_H */ 45#endif /* __ASM_MACH_GENERIC_IRQ_H */
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index f196cceb7322..4e5ae6523cb4 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -48,6 +48,8 @@ extern struct clk *clk_get_ppe(void);
48extern unsigned char ltq_boot_select(void); 48extern unsigned char ltq_boot_select(void);
49/* find out what caused the last cpu reset */ 49/* find out what caused the last cpu reset */
50extern int ltq_reset_cause(void); 50extern int ltq_reset_cause(void);
51/* find out the soc type */
52extern int ltq_soc_type(void);
51 53
52#define IOPORT_RESOURCE_START 0x10000000 54#define IOPORT_RESOURCE_START 0x10000000
53#define IOPORT_RESOURCE_END 0xffffffff 55#define IOPORT_RESOURCE_END 0xffffffff
diff --git a/arch/mips/include/asm/mach-loongson/boot_param.h b/arch/mips/include/asm/mach-loongson/boot_param.h
index 3388fc53599e..fa802926523f 100644
--- a/arch/mips/include/asm/mach-loongson/boot_param.h
+++ b/arch/mips/include/asm/mach-loongson/boot_param.h
@@ -10,7 +10,8 @@
10#define VIDEO_ROM 7 10#define VIDEO_ROM 7
11#define ADAPTER_ROM 8 11#define ADAPTER_ROM 8
12#define ACPI_TABLE 9 12#define ACPI_TABLE 9
13#define MAX_MEMORY_TYPE 10 13#define SMBIOS_TABLE 10
14#define MAX_MEMORY_TYPE 11
14 15
15#define LOONGSON3_BOOT_MEM_MAP_MAX 128 16#define LOONGSON3_BOOT_MEM_MAP_MAX 128
16struct efi_memory_map_loongson { 17struct efi_memory_map_loongson {
@@ -42,15 +43,49 @@ struct efi_cpuinfo_loongson {
42 u32 processor_id; /* PRID, e.g. 6305, 6306 */ 43 u32 processor_id; /* PRID, e.g. 6305, 6306 */
43 u32 cputype; /* Loongson_3A/3B, etc. */ 44 u32 cputype; /* Loongson_3A/3B, etc. */
44 u32 total_node; /* num of total numa nodes */ 45 u32 total_node; /* num of total numa nodes */
45 u32 cpu_startup_core_id; /* Core id */ 46 u16 cpu_startup_core_id; /* Boot core id */
47 u16 reserved_cores_mask;
46 u32 cpu_clock_freq; /* cpu_clock */ 48 u32 cpu_clock_freq; /* cpu_clock */
47 u32 nr_cpus; 49 u32 nr_cpus;
48} __packed; 50} __packed;
49 51
52#define MAX_UARTS 64
53struct uart_device {
54 u32 iotype; /* see include/linux/serial_core.h */
55 u32 uartclk;
56 u32 int_offset;
57 u64 uart_base;
58} __packed;
59
60#define MAX_SENSORS 64
61#define SENSOR_TEMPER 0x00000001
62#define SENSOR_VOLTAGE 0x00000002
63#define SENSOR_FAN 0x00000004
64struct sensor_device {
65 char name[32]; /* a formal name */
66 char label[64]; /* a flexible description */
67 u32 type; /* SENSOR_* */
68 u32 id; /* instance id of a sensor-class */
69 u32 fan_policy; /* see loongson_hwmon.h */
70 u32 fan_percent;/* only for constant speed policy */
71 u64 base_addr; /* base address of device registers */
72} __packed;
73
50struct system_loongson { 74struct system_loongson {
51 u16 vers; /* version of system_loongson */ 75 u16 vers; /* version of system_loongson */
52 u32 ccnuma_smp; /* 0: no numa; 1: has numa */ 76 u32 ccnuma_smp; /* 0: no numa; 1: has numa */
53 u32 sing_double_channel; /* 1:single; 2:double */ 77 u32 sing_double_channel; /* 1:single; 2:double */
78 u32 nr_uarts;
79 struct uart_device uarts[MAX_UARTS];
80 u32 nr_sensors;
81 struct sensor_device sensors[MAX_SENSORS];
82 char has_ec;
83 char ec_name[32];
84 u64 ec_base_addr;
85 char has_tcm;
86 char tcm_name[32];
87 u64 tcm_base_addr;
88 u64 workarounds; /* see workarounds.h */
54} __packed; 89} __packed;
55 90
56struct irq_source_routing_table { 91struct irq_source_routing_table {
@@ -149,6 +184,8 @@ struct loongson_system_configuration {
149 u32 nr_nodes; 184 u32 nr_nodes;
150 int cores_per_node; 185 int cores_per_node;
151 int cores_per_package; 186 int cores_per_package;
187 u16 boot_cpu_id;
188 u16 reserved_cpus_mask;
152 enum loongson_cpu_type cputype; 189 enum loongson_cpu_type cputype;
153 u64 ht_control_base; 190 u64 ht_control_base;
154 u64 pci_mem_start_addr; 191 u64 pci_mem_start_addr;
@@ -159,9 +196,15 @@ struct loongson_system_configuration {
159 u64 suspend_addr; 196 u64 suspend_addr;
160 u64 vgabios_addr; 197 u64 vgabios_addr;
161 u32 dma_mask_bits; 198 u32 dma_mask_bits;
199 char ecname[32];
200 u32 nr_uarts;
201 struct uart_device uarts[MAX_UARTS];
202 u32 nr_sensors;
203 struct sensor_device sensors[MAX_SENSORS];
204 u64 workarounds;
162}; 205};
163 206
164extern struct efi_memory_map_loongson *loongson_memmap; 207extern struct efi_memory_map_loongson *loongson_memmap;
165extern struct loongson_system_configuration loongson_sysconf; 208extern struct loongson_system_configuration loongson_sysconf;
166extern int cpuhotplug_workaround; 209
167#endif 210#endif
diff --git a/arch/mips/include/asm/mach-loongson/dma-coherence.h b/arch/mips/include/asm/mach-loongson/dma-coherence.h
index 6a902751cc7f..a90534161bd2 100644
--- a/arch/mips/include/asm/mach-loongson/dma-coherence.h
+++ b/arch/mips/include/asm/mach-loongson/dma-coherence.h
@@ -23,7 +23,7 @@ static inline dma_addr_t plat_map_dma_mem(struct device *dev, void *addr,
23 size_t size) 23 size_t size)
24{ 24{
25#ifdef CONFIG_CPU_LOONGSON3 25#ifdef CONFIG_CPU_LOONGSON3
26 return virt_to_phys(addr); 26 return phys_to_dma(dev, virt_to_phys(addr));
27#else 27#else
28 return virt_to_phys(addr) | 0x80000000; 28 return virt_to_phys(addr) | 0x80000000;
29#endif 29#endif
@@ -33,7 +33,7 @@ static inline dma_addr_t plat_map_dma_mem_page(struct device *dev,
33 struct page *page) 33 struct page *page)
34{ 34{
35#ifdef CONFIG_CPU_LOONGSON3 35#ifdef CONFIG_CPU_LOONGSON3
36 return page_to_phys(page); 36 return phys_to_dma(dev, page_to_phys(page));
37#else 37#else
38 return page_to_phys(page) | 0x80000000; 38 return page_to_phys(page) | 0x80000000;
39#endif 39#endif
@@ -43,7 +43,7 @@ static inline unsigned long plat_dma_addr_to_phys(struct device *dev,
43 dma_addr_t dma_addr) 43 dma_addr_t dma_addr)
44{ 44{
45#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT) 45#if defined(CONFIG_CPU_LOONGSON3) && defined(CONFIG_64BIT)
46 return dma_addr; 46 return dma_to_phys(dev, dma_addr);
47#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT) 47#elif defined(CONFIG_CPU_LOONGSON2F) && defined(CONFIG_64BIT)
48 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff); 48 return (dma_addr > 0x8fffffff) ? dma_addr : (dma_addr & 0x0fffffff);
49#else 49#else
diff --git a/arch/mips/include/asm/mach-loongson/irq.h b/arch/mips/include/asm/mach-loongson/irq.h
index 34560bda6626..a281cca5f2fb 100644
--- a/arch/mips/include/asm/mach-loongson/irq.h
+++ b/arch/mips/include/asm/mach-loongson/irq.h
@@ -32,8 +32,7 @@
32#define LOONGSON_INT_ROUTER_LPC LOONGSON_INT_ROUTER_ENTRY(0x0a) 32#define LOONGSON_INT_ROUTER_LPC LOONGSON_INT_ROUTER_ENTRY(0x0a)
33#define LOONGSON_INT_ROUTER_HT1(n) LOONGSON_INT_ROUTER_ENTRY(n + 0x18) 33#define LOONGSON_INT_ROUTER_HT1(n) LOONGSON_INT_ROUTER_ENTRY(n + 0x18)
34 34
35#define LOONGSON_INT_CORE0_INT0 0x11 /* route to int 0 of core 0 */ 35#define LOONGSON_INT_COREx_INTy(x, y) (1<<(x) | 1<<(y+4)) /* route to int y of core x */
36#define LOONGSON_INT_CORE0_INT1 0x21 /* route to int 1 of core 0 */
37 36
38#endif 37#endif
39 38
diff --git a/arch/mips/include/asm/mach-loongson/loongson.h b/arch/mips/include/asm/mach-loongson/loongson.h
index 92bf76c21441..5459ac09679f 100644
--- a/arch/mips/include/asm/mach-loongson/loongson.h
+++ b/arch/mips/include/asm/mach-loongson/loongson.h
@@ -35,7 +35,7 @@ extern void __init prom_init_cmdline(void);
35extern void __init prom_init_machtype(void); 35extern void __init prom_init_machtype(void);
36extern void __init prom_init_env(void); 36extern void __init prom_init_env(void);
37#ifdef CONFIG_LOONGSON_UART_BASE 37#ifdef CONFIG_LOONGSON_UART_BASE
38extern unsigned long _loongson_uart_base, loongson_uart_base; 38extern unsigned long _loongson_uart_base[], loongson_uart_base[];
39extern void prom_init_loongson_uart_base(void); 39extern void prom_init_loongson_uart_base(void);
40#endif 40#endif
41 41
diff --git a/arch/mips/include/asm/mach-loongson/loongson_hwmon.h b/arch/mips/include/asm/mach-loongson/loongson_hwmon.h
new file mode 100644
index 000000000000..4431fc54a36c
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/loongson_hwmon.h
@@ -0,0 +1,55 @@
1#ifndef __LOONGSON_HWMON_H_
2#define __LOONGSON_HWMON_H_
3
4#include <linux/types.h>
5
6#define MIN_TEMP 0
7#define MAX_TEMP 255
8#define NOT_VALID_TEMP 999
9
10typedef int (*get_temp_fun)(int);
11extern int loongson3_cpu_temp(int);
12
13/* 0:Max speed, 1:Manual, 2:Auto */
14enum fan_control_mode {
15 FAN_FULL_MODE = 0,
16 FAN_MANUAL_MODE = 1,
17 FAN_AUTO_MODE = 2,
18 FAN_MODE_END
19};
20
21struct temp_range {
22 u8 low;
23 u8 high;
24 u8 level;
25};
26
27#define CONSTANT_SPEED_POLICY 0 /* at constent speed */
28#define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */
29#define KERNEL_HELPER_POLICY 2 /* kernel as a helper to fan control */
30
31#define MAX_STEP_NUM 16
32#define MAX_FAN_LEVEL 255
33
34/* loongson_fan_policy works when fan work at FAN_AUTO_MODE */
35struct loongson_fan_policy {
36 u8 type;
37
38 /* percent only used when type is CONSTANT_SPEED_POLICY */
39 u8 percent;
40
41 /* period between two check. (Unit: S) */
42 u8 adjust_period;
43
44 /* fan adjust usually depend on a temprature input */
45 get_temp_fun depend_temp;
46
47 /* up_step/down_step used when type is STEP_SPEED_POLICY */
48 u8 up_step_num;
49 u8 down_step_num;
50 struct temp_range up_step[MAX_STEP_NUM];
51 struct temp_range down_step[MAX_STEP_NUM];
52 struct delayed_work work;
53};
54
55#endif /* __LOONGSON_HWMON_H_*/
diff --git a/arch/mips/include/asm/mach-loongson/machine.h b/arch/mips/include/asm/mach-loongson/machine.h
index 228e37847a36..cb2b60249cd2 100644
--- a/arch/mips/include/asm/mach-loongson/machine.h
+++ b/arch/mips/include/asm/mach-loongson/machine.h
@@ -26,7 +26,7 @@
26 26
27#ifdef CONFIG_LOONGSON_MACH3X 27#ifdef CONFIG_LOONGSON_MACH3X
28 28
29#define LOONGSON_MACHTYPE MACH_LEMOTE_A1101 29#define LOONGSON_MACHTYPE MACH_LOONGSON_GENERIC
30 30
31#endif /* CONFIG_LOONGSON_MACH3X */ 31#endif /* CONFIG_LOONGSON_MACH3X */
32 32
diff --git a/arch/mips/include/asm/mach-loongson/topology.h b/arch/mips/include/asm/mach-loongson/topology.h
index 5598ba77d2ef..0d8f3b55bdbc 100644
--- a/arch/mips/include/asm/mach-loongson/topology.h
+++ b/arch/mips/include/asm/mach-loongson/topology.h
@@ -3,7 +3,7 @@
3 3
4#ifdef CONFIG_NUMA 4#ifdef CONFIG_NUMA
5 5
6#define cpu_to_node(cpu) ((cpu) >> 2) 6#define cpu_to_node(cpu) (cpu_logical_map(cpu) >> 2)
7#define parent_node(node) (node) 7#define parent_node(node) (node)
8#define cpumask_of_node(node) (&__node_data[(node)]->cpumask) 8#define cpumask_of_node(node) (&__node_data[(node)]->cpumask)
9 9
diff --git a/arch/mips/include/asm/mach-loongson/workarounds.h b/arch/mips/include/asm/mach-loongson/workarounds.h
new file mode 100644
index 000000000000..e180c1422eae
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson/workarounds.h
@@ -0,0 +1,7 @@
1#ifndef __ASM_MACH_LOONGSON_WORKAROUNDS_H_
2#define __ASM_MACH_LOONGSON_WORKAROUNDS_H_
3
4#define WORKAROUND_CPUFREQ 0x00000001
5#define WORKAROUND_CPUHOTPLUG 0x00000002
6
7#endif
diff --git a/arch/mips/include/asm/mach-loongson1/cpufreq.h b/arch/mips/include/asm/mach-loongson1/cpufreq.h
new file mode 100644
index 000000000000..e7765ce30bcf
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/cpufreq.h
@@ -0,0 +1,23 @@
1/*
2 * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
3 *
4 * Loongson 1 CPUFreq platform support.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12
13#ifndef __ASM_MACH_LOONGSON1_CPUFREQ_H
14#define __ASM_MACH_LOONGSON1_CPUFREQ_H
15
16struct plat_ls1x_cpufreq {
17 const char *clk_name; /* CPU clk */
18 const char *osc_clk_name; /* OSC clk */
19 unsigned int max_freq; /* in kHz */
20 unsigned int min_freq; /* in kHz */
21};
22
23#endif /* __ASM_MACH_LOONGSON1_CPUFREQ_H */
diff --git a/arch/mips/include/asm/mach-loongson1/loongson1.h b/arch/mips/include/asm/mach-loongson1/loongson1.h
index 5c437c2ba6b3..20e0c2b155dd 100644
--- a/arch/mips/include/asm/mach-loongson1/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson1/loongson1.h
@@ -16,6 +16,7 @@
16#define DEFAULT_MEMSIZE 256 /* If no memsize provided */ 16#define DEFAULT_MEMSIZE 256 /* If no memsize provided */
17 17
18/* Loongson 1 Register Bases */ 18/* Loongson 1 Register Bases */
19#define LS1X_MUX_BASE 0x1fd00420
19#define LS1X_INTC_BASE 0x1fd01040 20#define LS1X_INTC_BASE 0x1fd01040
20#define LS1X_EHCI_BASE 0x1fe00000 21#define LS1X_EHCI_BASE 0x1fe00000
21#define LS1X_OHCI_BASE 0x1fe08000 22#define LS1X_OHCI_BASE 0x1fe08000
@@ -31,7 +32,10 @@
31#define LS1X_I2C0_BASE 0x1fe58000 32#define LS1X_I2C0_BASE 0x1fe58000
32#define LS1X_I2C1_BASE 0x1fe68000 33#define LS1X_I2C1_BASE 0x1fe68000
33#define LS1X_I2C2_BASE 0x1fe70000 34#define LS1X_I2C2_BASE 0x1fe70000
34#define LS1X_PWM_BASE 0x1fe5c000 35#define LS1X_PWM0_BASE 0x1fe5c000
36#define LS1X_PWM1_BASE 0x1fe5c010
37#define LS1X_PWM2_BASE 0x1fe5c020
38#define LS1X_PWM3_BASE 0x1fe5c030
35#define LS1X_WDT_BASE 0x1fe5c060 39#define LS1X_WDT_BASE 0x1fe5c060
36#define LS1X_RTC_BASE 0x1fe64000 40#define LS1X_RTC_BASE 0x1fe64000
37#define LS1X_AC97_BASE 0x1fe74000 41#define LS1X_AC97_BASE 0x1fe74000
@@ -39,6 +43,8 @@
39#define LS1X_CLK_BASE 0x1fe78030 43#define LS1X_CLK_BASE 0x1fe78030
40 44
41#include <regs-clk.h> 45#include <regs-clk.h>
46#include <regs-mux.h>
47#include <regs-pwm.h>
42#include <regs-wdt.h> 48#include <regs-wdt.h>
43 49
44#endif /* __ASM_MACH_LOONGSON1_LOONGSON1_H */ 50#endif /* __ASM_MACH_LOONGSON1_LOONGSON1_H */
diff --git a/arch/mips/include/asm/mach-loongson1/platform.h b/arch/mips/include/asm/mach-loongson1/platform.h
index 30c13e508fff..47de55e0c835 100644
--- a/arch/mips/include/asm/mach-loongson1/platform.h
+++ b/arch/mips/include/asm/mach-loongson1/platform.h
@@ -13,10 +13,12 @@
13 13
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15 15
16extern struct platform_device ls1x_uart_device; 16extern struct platform_device ls1x_uart_pdev;
17extern struct platform_device ls1x_eth0_device; 17extern struct platform_device ls1x_cpufreq_pdev;
18extern struct platform_device ls1x_ehci_device; 18extern struct platform_device ls1x_eth0_pdev;
19extern struct platform_device ls1x_rtc_device; 19extern struct platform_device ls1x_eth1_pdev;
20extern struct platform_device ls1x_ehci_pdev;
21extern struct platform_device ls1x_rtc_pdev;
20 22
21extern void __init ls1x_clk_init(void); 23extern void __init ls1x_clk_init(void);
22extern void __init ls1x_serial_setup(struct platform_device *pdev); 24extern void __init ls1x_serial_setup(struct platform_device *pdev);
diff --git a/arch/mips/include/asm/mach-loongson1/regs-clk.h b/arch/mips/include/asm/mach-loongson1/regs-clk.h
index fb6a3ff9318f..ee2445b10fc3 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-clk.h
@@ -20,15 +20,32 @@
20 20
21/* Clock PLL Divisor Register Bits */ 21/* Clock PLL Divisor Register Bits */
22#define DIV_DC_EN (0x1 << 31) 22#define DIV_DC_EN (0x1 << 31)
23#define DIV_DC_RST (0x1 << 30)
23#define DIV_CPU_EN (0x1 << 25) 24#define DIV_CPU_EN (0x1 << 25)
25#define DIV_CPU_RST (0x1 << 24)
24#define DIV_DDR_EN (0x1 << 19) 26#define DIV_DDR_EN (0x1 << 19)
27#define DIV_DDR_RST (0x1 << 18)
28#define RST_DC_EN (0x1 << 5)
29#define RST_DC (0x1 << 4)
30#define RST_DDR_EN (0x1 << 3)
31#define RST_DDR (0x1 << 2)
32#define RST_CPU_EN (0x1 << 1)
33#define RST_CPU 0x1
25 34
26#define DIV_DC_SHIFT 26 35#define DIV_DC_SHIFT 26
27#define DIV_CPU_SHIFT 20 36#define DIV_CPU_SHIFT 20
28#define DIV_DDR_SHIFT 14 37#define DIV_DDR_SHIFT 14
29 38
30#define DIV_DC_WIDTH 5 39#define DIV_DC_WIDTH 4
31#define DIV_CPU_WIDTH 5 40#define DIV_CPU_WIDTH 4
32#define DIV_DDR_WIDTH 5 41#define DIV_DDR_WIDTH 4
42
43#define BYPASS_DC_SHIFT 12
44#define BYPASS_DDR_SHIFT 10
45#define BYPASS_CPU_SHIFT 8
46
47#define BYPASS_DC_WIDTH 1
48#define BYPASS_DDR_WIDTH 1
49#define BYPASS_CPU_WIDTH 1
33 50
34#endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */ 51#endif /* __ASM_MACH_LOONGSON1_REGS_CLK_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-mux.h b/arch/mips/include/asm/mach-loongson1/regs-mux.h
new file mode 100644
index 000000000000..fb1e36efaa19
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/regs-mux.h
@@ -0,0 +1,67 @@
1/*
2 * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
3 *
4 * Loongson 1 MUX Register Definitions.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#ifndef __ASM_MACH_LOONGSON1_REGS_MUX_H
13#define __ASM_MACH_LOONGSON1_REGS_MUX_H
14
15#define LS1X_MUX_REG(x) \
16 ((void __iomem *)KSEG1ADDR(LS1X_MUX_BASE + (x)))
17
18#define LS1X_MUX_CTRL0 LS1X_MUX_REG(0x0)
19#define LS1X_MUX_CTRL1 LS1X_MUX_REG(0x4)
20
21/* MUX CTRL0 Register Bits */
22#define UART0_USE_PWM23 (0x1 << 28)
23#define UART0_USE_PWM01 (0x1 << 27)
24#define UART1_USE_LCD0_5_6_11 (0x1 << 26)
25#define I2C2_USE_CAN1 (0x1 << 25)
26#define I2C1_USE_CAN0 (0x1 << 24)
27#define NAND3_USE_UART5 (0x1 << 23)
28#define NAND3_USE_UART4 (0x1 << 22)
29#define NAND3_USE_UART1_DAT (0x1 << 21)
30#define NAND3_USE_UART1_CTS (0x1 << 20)
31#define NAND3_USE_PWM23 (0x1 << 19)
32#define NAND3_USE_PWM01 (0x1 << 18)
33#define NAND2_USE_UART5 (0x1 << 17)
34#define NAND2_USE_UART4 (0x1 << 16)
35#define NAND2_USE_UART1_DAT (0x1 << 15)
36#define NAND2_USE_UART1_CTS (0x1 << 14)
37#define NAND2_USE_PWM23 (0x1 << 13)
38#define NAND2_USE_PWM01 (0x1 << 12)
39#define NAND1_USE_UART5 (0x1 << 11)
40#define NAND1_USE_UART4 (0x1 << 10)
41#define NAND1_USE_UART1_DAT (0x1 << 9)
42#define NAND1_USE_UART1_CTS (0x1 << 8)
43#define NAND1_USE_PWM23 (0x1 << 7)
44#define NAND1_USE_PWM01 (0x1 << 6)
45#define GMAC1_USE_UART1 (0x1 << 4)
46#define GMAC1_USE_UART0 (0x1 << 3)
47#define LCD_USE_UART0_DAT (0x1 << 2)
48#define LCD_USE_UART15 (0x1 << 1)
49#define LCD_USE_UART0 0x1
50
51/* MUX CTRL1 Register Bits */
52#define USB_RESET (0x1 << 31)
53#define SPI1_CS_USE_PWM01 (0x1 << 24)
54#define SPI1_USE_CAN (0x1 << 23)
55#define DISABLE_DDR_CONFSPACE (0x1 << 20)
56#define DDR32TO16EN (0x1 << 16)
57#define GMAC1_SHUT (0x1 << 13)
58#define GMAC0_SHUT (0x1 << 12)
59#define USB_SHUT (0x1 << 11)
60#define UART1_3_USE_CAN1 (0x1 << 5)
61#define UART1_2_USE_CAN0 (0x1 << 4)
62#define GMAC1_USE_TXCLK (0x1 << 3)
63#define GMAC0_USE_TXCLK (0x1 << 2)
64#define GMAC1_USE_PWM23 (0x1 << 1)
65#define GMAC0_USE_PWM01 0x1
66
67#endif /* __ASM_MACH_LOONGSON1_REGS_MUX_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-pwm.h b/arch/mips/include/asm/mach-loongson1/regs-pwm.h
new file mode 100644
index 000000000000..99f2bcc586f0
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson1/regs-pwm.h
@@ -0,0 +1,29 @@
1/*
2 * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
3 *
4 * Loongson 1 PWM Register Definitions.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#ifndef __ASM_MACH_LOONGSON1_REGS_PWM_H
13#define __ASM_MACH_LOONGSON1_REGS_PWM_H
14
15/* Loongson 1 PWM Timer Register Definitions */
16#define PWM_CNT 0x0
17#define PWM_HRC 0x4
18#define PWM_LRC 0x8
19#define PWM_CTRL 0xc
20
21/* PWM Control Register Bits */
22#define CNT_RST (0x1 << 7)
23#define INT_SR (0x1 << 6)
24#define INT_EN (0x1 << 5)
25#define PWM_SINGLE (0x1 << 4)
26#define PWM_OE (0x1 << 3)
27#define CNT_EN 0x1
28
29#endif /* __ASM_MACH_LOONGSON1_REGS_PWM_H */
diff --git a/arch/mips/include/asm/mach-loongson1/regs-wdt.h b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
index 6574568c2084..c39ee982ad3b 100644
--- a/arch/mips/include/asm/mach-loongson1/regs-wdt.h
+++ b/arch/mips/include/asm/mach-loongson1/regs-wdt.h
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com> 2 * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
3 * 3 *
4 * Loongson 1 watchdog register definitions. 4 * Loongson 1 Watchdog Register Definitions.
5 * 5 *
6 * This program is free software; you can redistribute it and/or modify it 6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the 7 * under the terms of the GNU General Public License as published by the
@@ -12,11 +12,8 @@
12#ifndef __ASM_MACH_LOONGSON1_REGS_WDT_H 12#ifndef __ASM_MACH_LOONGSON1_REGS_WDT_H
13#define __ASM_MACH_LOONGSON1_REGS_WDT_H 13#define __ASM_MACH_LOONGSON1_REGS_WDT_H
14 14
15#define LS1X_WDT_REG(x) \ 15#define WDT_EN 0x0
16 ((void __iomem *)KSEG1ADDR(LS1X_WDT_BASE + (x))) 16#define WDT_TIMER 0x4
17 17#define WDT_SET 0x8
18#define LS1X_WDT_EN LS1X_WDT_REG(0x0)
19#define LS1X_WDT_SET LS1X_WDT_REG(0x4)
20#define LS1X_WDT_TIMER LS1X_WDT_REG(0x8)
21 18
22#endif /* __ASM_MACH_LOONGSON1_REGS_WDT_H */ 19#endif /* __ASM_MACH_LOONGSON1_REGS_WDT_H */
diff --git a/arch/mips/include/asm/mach-malta/irq.h b/arch/mips/include/asm/mach-malta/irq.h
index f2c13d211abb..47cfe64efbb0 100644
--- a/arch/mips/include/asm/mach-malta/irq.h
+++ b/arch/mips/include/asm/mach-malta/irq.h
@@ -2,7 +2,6 @@
2#define __ASM_MACH_MIPS_IRQ_H 2#define __ASM_MACH_MIPS_IRQ_H
3 3
4 4
5#define GIC_NUM_INTRS (24 + NR_CPUS * 2)
6#define NR_IRQS 256 5#define NR_IRQS 256
7 6
8#include_next <irq.h> 7#include_next <irq.h>
diff --git a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
index fc946c835995..2e54b4bff5cf 100644
--- a/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
+++ b/arch/mips/include/asm/mach-pmcs-msp71xx/msp_regops.h
@@ -49,6 +49,7 @@
49 49
50#include <linux/types.h> 50#include <linux/types.h>
51 51
52#include <asm/compiler.h>
52#include <asm/war.h> 53#include <asm/war.h>
53 54
54#ifndef R10000_LLSC_WAR 55#ifndef R10000_LLSC_WAR
@@ -84,8 +85,8 @@ static inline void set_value_reg32(volatile u32 *const addr,
84 " "__beqz"%0, 1b \n" 85 " "__beqz"%0, 1b \n"
85 " nop \n" 86 " nop \n"
86 " .set pop \n" 87 " .set pop \n"
87 : "=&r" (temp), "=m" (*addr) 88 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
88 : "ir" (~mask), "ir" (value), "m" (*addr)); 89 : "ir" (~mask), "ir" (value), GCC_OFF12_ASM() (*addr));
89} 90}
90 91
91/* 92/*
@@ -105,8 +106,8 @@ static inline void set_reg32(volatile u32 *const addr,
105 " "__beqz"%0, 1b \n" 106 " "__beqz"%0, 1b \n"
106 " nop \n" 107 " nop \n"
107 " .set pop \n" 108 " .set pop \n"
108 : "=&r" (temp), "=m" (*addr) 109 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
109 : "ir" (mask), "m" (*addr)); 110 : "ir" (mask), GCC_OFF12_ASM() (*addr));
110} 111}
111 112
112/* 113/*
@@ -126,8 +127,8 @@ static inline void clear_reg32(volatile u32 *const addr,
126 " "__beqz"%0, 1b \n" 127 " "__beqz"%0, 1b \n"
127 " nop \n" 128 " nop \n"
128 " .set pop \n" 129 " .set pop \n"
129 : "=&r" (temp), "=m" (*addr) 130 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
130 : "ir" (~mask), "m" (*addr)); 131 : "ir" (~mask), GCC_OFF12_ASM() (*addr));
131} 132}
132 133
133/* 134/*
@@ -147,8 +148,8 @@ static inline void toggle_reg32(volatile u32 *const addr,
147 " "__beqz"%0, 1b \n" 148 " "__beqz"%0, 1b \n"
148 " nop \n" 149 " nop \n"
149 " .set pop \n" 150 " .set pop \n"
150 : "=&r" (temp), "=m" (*addr) 151 : "=&r" (temp), "=" GCC_OFF12_ASM() (*addr)
151 : "ir" (mask), "m" (*addr)); 152 : "ir" (mask), GCC_OFF12_ASM() (*addr));
152} 153}
153 154
154/* 155/*
@@ -219,8 +220,8 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
219 " .set arch=r4000 \n" \ 220 " .set arch=r4000 \n" \
220 "1: ll %0, %1 #custom_read_reg32 \n" \ 221 "1: ll %0, %1 #custom_read_reg32 \n" \
221 " .set pop \n" \ 222 " .set pop \n" \
222 : "=r" (tmp), "=m" (*address) \ 223 : "=r" (tmp), "=" GCC_OFF12_ASM() (*address) \
223 : "m" (*address)) 224 : GCC_OFF12_ASM() (*address))
224 225
225#define custom_write_reg32(address, tmp) \ 226#define custom_write_reg32(address, tmp) \
226 __asm__ __volatile__( \ 227 __asm__ __volatile__( \
@@ -230,7 +231,7 @@ static inline u32 blocking_read_reg32(volatile u32 *const addr)
230 " "__beqz"%0, 1b \n" \ 231 " "__beqz"%0, 1b \n" \
231 " nop \n" \ 232 " nop \n" \
232 " .set pop \n" \ 233 " .set pop \n" \
233 : "=&r" (tmp), "=m" (*address) \ 234 : "=&r" (tmp), "=" GCC_OFF12_ASM() (*address) \
234 : "0" (tmp), "m" (*address)) 235 : "0" (tmp), GCC_OFF12_ASM() (*address))
235 236
236#endif /* __ASM_REGOPS_H__ */ 237#endif /* __ASM_REGOPS_H__ */
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
index 6f9b24f51157..1976fb815fd1 100644
--- a/arch/mips/include/asm/mach-ralink/mt7620.h
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -13,6 +13,13 @@
13#ifndef _MT7620_REGS_H_ 13#ifndef _MT7620_REGS_H_
14#define _MT7620_REGS_H_ 14#define _MT7620_REGS_H_
15 15
16enum mt762x_soc_type {
17 MT762X_SOC_UNKNOWN = 0,
18 MT762X_SOC_MT7620A,
19 MT762X_SOC_MT7620N,
20 MT762X_SOC_MT7628AN,
21};
22
16#define MT7620_SYSC_BASE 0x10000000 23#define MT7620_SYSC_BASE 0x10000000
17 24
18#define SYSC_REG_CHIP_NAME0 0x00 25#define SYSC_REG_CHIP_NAME0 0x00
@@ -25,11 +32,9 @@
25#define SYSC_REG_CPLL_CONFIG0 0x54 32#define SYSC_REG_CPLL_CONFIG0 0x54
26#define SYSC_REG_CPLL_CONFIG1 0x58 33#define SYSC_REG_CPLL_CONFIG1 0x58
27 34
28#define MT7620N_CHIP_NAME0 0x33365452 35#define MT7620_CHIP_NAME0 0x3637544d
29#define MT7620N_CHIP_NAME1 0x20203235 36#define MT7620_CHIP_NAME1 0x20203032
30 37#define MT7628_CHIP_NAME1 0x20203832
31#define MT7620A_CHIP_NAME0 0x3637544d
32#define MT7620A_CHIP_NAME1 0x20203032
33 38
34#define SYSCFG0_XTAL_FREQ_SEL BIT(6) 39#define SYSCFG0_XTAL_FREQ_SEL BIT(6)
35 40
@@ -74,6 +79,9 @@
74#define SYSCFG0_DRAM_TYPE_DDR1 1 79#define SYSCFG0_DRAM_TYPE_DDR1 1
75#define SYSCFG0_DRAM_TYPE_DDR2 2 80#define SYSCFG0_DRAM_TYPE_DDR2 2
76 81
82#define SYSCFG0_DRAM_TYPE_DDR2_MT7628 0
83#define SYSCFG0_DRAM_TYPE_DDR1_MT7628 1
84
77#define MT7620_DRAM_BASE 0x0 85#define MT7620_DRAM_BASE 0x0
78#define MT7620_SDRAM_SIZE_MIN 2 86#define MT7620_SDRAM_SIZE_MIN 2
79#define MT7620_SDRAM_SIZE_MAX 64 87#define MT7620_SDRAM_SIZE_MAX 64
@@ -82,7 +90,6 @@
82#define MT7620_DDR2_SIZE_MIN 32 90#define MT7620_DDR2_SIZE_MIN 32
83#define MT7620_DDR2_SIZE_MAX 256 91#define MT7620_DDR2_SIZE_MAX 256
84 92
85#define MT7620_GPIO_MODE_I2C BIT(0)
86#define MT7620_GPIO_MODE_UART0_SHIFT 2 93#define MT7620_GPIO_MODE_UART0_SHIFT 2
87#define MT7620_GPIO_MODE_UART0_MASK 0x7 94#define MT7620_GPIO_MODE_UART0_MASK 0x7
88#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT) 95#define MT7620_GPIO_MODE_UART0(x) ((x) << MT7620_GPIO_MODE_UART0_SHIFT)
@@ -94,15 +101,40 @@
94#define MT7620_GPIO_MODE_GPIO_UARTF 0x5 101#define MT7620_GPIO_MODE_GPIO_UARTF 0x5
95#define MT7620_GPIO_MODE_GPIO_I2S 0x6 102#define MT7620_GPIO_MODE_GPIO_I2S 0x6
96#define MT7620_GPIO_MODE_GPIO 0x7 103#define MT7620_GPIO_MODE_GPIO 0x7
97#define MT7620_GPIO_MODE_UART1 BIT(5) 104
98#define MT7620_GPIO_MODE_MDIO BIT(8) 105#define MT7620_GPIO_MODE_NAND 0
99#define MT7620_GPIO_MODE_RGMII1 BIT(9) 106#define MT7620_GPIO_MODE_SD 1
100#define MT7620_GPIO_MODE_RGMII2 BIT(10) 107#define MT7620_GPIO_MODE_ND_SD_GPIO 2
101#define MT7620_GPIO_MODE_SPI BIT(11) 108#define MT7620_GPIO_MODE_ND_SD_MASK 0x3
102#define MT7620_GPIO_MODE_SPI_REF_CLK BIT(12) 109#define MT7620_GPIO_MODE_ND_SD_SHIFT 18
103#define MT7620_GPIO_MODE_WLED BIT(13) 110
104#define MT7620_GPIO_MODE_JTAG BIT(15) 111#define MT7620_GPIO_MODE_PCIE_RST 0
105#define MT7620_GPIO_MODE_EPHY BIT(15) 112#define MT7620_GPIO_MODE_PCIE_REF 1
106#define MT7620_GPIO_MODE_WDT BIT(22) 113#define MT7620_GPIO_MODE_PCIE_GPIO 2
114#define MT7620_GPIO_MODE_PCIE_MASK 0x3
115#define MT7620_GPIO_MODE_PCIE_SHIFT 16
116
117#define MT7620_GPIO_MODE_WDT_RST 0
118#define MT7620_GPIO_MODE_WDT_REF 1
119#define MT7620_GPIO_MODE_WDT_GPIO 2
120#define MT7620_GPIO_MODE_WDT_MASK 0x3
121#define MT7620_GPIO_MODE_WDT_SHIFT 21
122
123#define MT7620_GPIO_MODE_I2C 0
124#define MT7620_GPIO_MODE_UART1 5
125#define MT7620_GPIO_MODE_MDIO 8
126#define MT7620_GPIO_MODE_RGMII1 9
127#define MT7620_GPIO_MODE_RGMII2 10
128#define MT7620_GPIO_MODE_SPI 11
129#define MT7620_GPIO_MODE_SPI_REF_CLK 12
130#define MT7620_GPIO_MODE_WLED 13
131#define MT7620_GPIO_MODE_JTAG 15
132#define MT7620_GPIO_MODE_EPHY 15
133#define MT7620_GPIO_MODE_PA 20
134
135static inline int mt7620_get_eco(void)
136{
137 return rt_sysc_r32(SYSC_REG_CHIP_REV) & CHIP_REV_ECO_MASK;
138}
107 139
108#endif 140#endif
diff --git a/arch/mips/include/asm/mach-ralink/pinmux.h b/arch/mips/include/asm/mach-ralink/pinmux.h
new file mode 100644
index 000000000000..be106cb2e26d
--- /dev/null
+++ b/arch/mips/include/asm/mach-ralink/pinmux.h
@@ -0,0 +1,55 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * publishhed by the Free Software Foundation.
5 *
6 * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _RT288X_PINMUX_H__
10#define _RT288X_PINMUX_H__
11
12#define FUNC(name, value, pin_first, pin_count) \
13 { name, value, pin_first, pin_count }
14
15#define GRP(_name, _func, _mask, _shift) \
16 { .name = _name, .mask = _mask, .shift = _shift, \
17 .func = _func, .gpio = _mask, \
18 .func_count = ARRAY_SIZE(_func) }
19
20#define GRP_G(_name, _func, _mask, _gpio, _shift) \
21 { .name = _name, .mask = _mask, .shift = _shift, \
22 .func = _func, .gpio = _gpio, \
23 .func_count = ARRAY_SIZE(_func) }
24
25struct rt2880_pmx_group;
26
27struct rt2880_pmx_func {
28 const char *name;
29 const char value;
30
31 int pin_first;
32 int pin_count;
33 int *pins;
34
35 int *groups;
36 int group_count;
37
38 int enabled;
39};
40
41struct rt2880_pmx_group {
42 const char *name;
43 int enabled;
44
45 const u32 shift;
46 const char mask;
47 const char gpio;
48
49 struct rt2880_pmx_func *func;
50 int func_count;
51};
52
53extern struct rt2880_pmx_group *rt2880_pinmux_data;
54
55#endif
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
index 5a508f9f9432..bd93014490df 100644
--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h
+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
@@ -26,6 +26,13 @@ static inline u32 rt_sysc_r32(unsigned reg)
26 return __raw_readl(rt_sysc_membase + reg); 26 return __raw_readl(rt_sysc_membase + reg);
27} 27}
28 28
29static inline void rt_sysc_m32(u32 clr, u32 set, unsigned reg)
30{
31 u32 val = rt_sysc_r32(reg) & ~clr;
32
33 __raw_writel(val | set, rt_sysc_membase + reg);
34}
35
29static inline void rt_memc_w32(u32 val, unsigned reg) 36static inline void rt_memc_w32(u32 val, unsigned reg)
30{ 37{
31 __raw_writel(val, rt_memc_membase + reg); 38 __raw_writel(val, rt_memc_membase + reg);
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index 069bf37a6010..96f731bac79a 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -125,24 +125,29 @@ static inline int soc_is_rt5350(void)
125#define RT305X_GPIO_GE0_TXD0 40 125#define RT305X_GPIO_GE0_TXD0 40
126#define RT305X_GPIO_GE0_RXCLK 51 126#define RT305X_GPIO_GE0_RXCLK 51
127 127
128#define RT305X_GPIO_MODE_I2C BIT(0)
129#define RT305X_GPIO_MODE_SPI BIT(1)
130#define RT305X_GPIO_MODE_UART0_SHIFT 2 128#define RT305X_GPIO_MODE_UART0_SHIFT 2
131#define RT305X_GPIO_MODE_UART0_MASK 0x7 129#define RT305X_GPIO_MODE_UART0_MASK 0x7
132#define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT) 130#define RT305X_GPIO_MODE_UART0(x) ((x) << RT305X_GPIO_MODE_UART0_SHIFT)
133#define RT305X_GPIO_MODE_UARTF 0x0 131#define RT305X_GPIO_MODE_UARTF 0
134#define RT305X_GPIO_MODE_PCM_UARTF 0x1 132#define RT305X_GPIO_MODE_PCM_UARTF 1
135#define RT305X_GPIO_MODE_PCM_I2S 0x2 133#define RT305X_GPIO_MODE_PCM_I2S 2
136#define RT305X_GPIO_MODE_I2S_UARTF 0x3 134#define RT305X_GPIO_MODE_I2S_UARTF 3
137#define RT305X_GPIO_MODE_PCM_GPIO 0x4 135#define RT305X_GPIO_MODE_PCM_GPIO 4
138#define RT305X_GPIO_MODE_GPIO_UARTF 0x5 136#define RT305X_GPIO_MODE_GPIO_UARTF 5
139#define RT305X_GPIO_MODE_GPIO_I2S 0x6 137#define RT305X_GPIO_MODE_GPIO_I2S 6
140#define RT305X_GPIO_MODE_GPIO 0x7 138#define RT305X_GPIO_MODE_GPIO 7
141#define RT305X_GPIO_MODE_UART1 BIT(5) 139
142#define RT305X_GPIO_MODE_JTAG BIT(6) 140#define RT305X_GPIO_MODE_I2C 0
143#define RT305X_GPIO_MODE_MDIO BIT(7) 141#define RT305X_GPIO_MODE_SPI 1
144#define RT305X_GPIO_MODE_SDRAM BIT(8) 142#define RT305X_GPIO_MODE_UART1 5
145#define RT305X_GPIO_MODE_RGMII BIT(9) 143#define RT305X_GPIO_MODE_JTAG 6
144#define RT305X_GPIO_MODE_MDIO 7
145#define RT305X_GPIO_MODE_SDRAM 8
146#define RT305X_GPIO_MODE_RGMII 9
147#define RT5350_GPIO_MODE_PHY_LED 14
148#define RT5350_GPIO_MODE_SPI_CS1 21
149#define RT3352_GPIO_MODE_LNA 18
150#define RT3352_GPIO_MODE_PA 20
146 151
147#define RT3352_SYSC_REG_SYSCFG0 0x010 152#define RT3352_SYSC_REG_SYSCFG0 0x010
148#define RT3352_SYSC_REG_SYSCFG1 0x014 153#define RT3352_SYSC_REG_SYSCFG1 0x014
diff --git a/arch/mips/include/asm/mach-ralink/rt3883.h b/arch/mips/include/asm/mach-ralink/rt3883.h
index 058382f37f92..0fbe6f9257cd 100644
--- a/arch/mips/include/asm/mach-ralink/rt3883.h
+++ b/arch/mips/include/asm/mach-ralink/rt3883.h
@@ -112,8 +112,6 @@
112#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19) 112#define RT3883_CLKCFG1_PCI_CLK_EN BIT(19)
113#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18) 113#define RT3883_CLKCFG1_UPHY0_CLK_EN BIT(18)
114 114
115#define RT3883_GPIO_MODE_I2C BIT(0)
116#define RT3883_GPIO_MODE_SPI BIT(1)
117#define RT3883_GPIO_MODE_UART0_SHIFT 2 115#define RT3883_GPIO_MODE_UART0_SHIFT 2
118#define RT3883_GPIO_MODE_UART0_MASK 0x7 116#define RT3883_GPIO_MODE_UART0_MASK 0x7
119#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT) 117#define RT3883_GPIO_MODE_UART0(x) ((x) << RT3883_GPIO_MODE_UART0_SHIFT)
@@ -125,11 +123,15 @@
125#define RT3883_GPIO_MODE_GPIO_UARTF 0x5 123#define RT3883_GPIO_MODE_GPIO_UARTF 0x5
126#define RT3883_GPIO_MODE_GPIO_I2S 0x6 124#define RT3883_GPIO_MODE_GPIO_I2S 0x6
127#define RT3883_GPIO_MODE_GPIO 0x7 125#define RT3883_GPIO_MODE_GPIO 0x7
128#define RT3883_GPIO_MODE_UART1 BIT(5) 126
129#define RT3883_GPIO_MODE_JTAG BIT(6) 127#define RT3883_GPIO_MODE_I2C 0
130#define RT3883_GPIO_MODE_MDIO BIT(7) 128#define RT3883_GPIO_MODE_SPI 1
131#define RT3883_GPIO_MODE_GE1 BIT(9) 129#define RT3883_GPIO_MODE_UART1 5
132#define RT3883_GPIO_MODE_GE2 BIT(10) 130#define RT3883_GPIO_MODE_JTAG 6
131#define RT3883_GPIO_MODE_MDIO 7
132#define RT3883_GPIO_MODE_GE1 9
133#define RT3883_GPIO_MODE_GE2 10
134
133#define RT3883_GPIO_MODE_PCI_SHIFT 11 135#define RT3883_GPIO_MODE_PCI_SHIFT 11
134#define RT3883_GPIO_MODE_PCI_MASK 0x7 136#define RT3883_GPIO_MODE_PCI_MASK 0x7
135#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT) 137#define RT3883_GPIO_MODE_PCI (RT3883_GPIO_MODE_PCI_MASK << RT3883_GPIO_MODE_PCI_SHIFT)
diff --git a/arch/mips/include/asm/mach-sead3/irq.h b/arch/mips/include/asm/mach-sead3/irq.h
index d8106f75b9af..5d154cfbcf4c 100644
--- a/arch/mips/include/asm/mach-sead3/irq.h
+++ b/arch/mips/include/asm/mach-sead3/irq.h
@@ -1,7 +1,6 @@
1#ifndef __ASM_MACH_MIPS_IRQ_H 1#ifndef __ASM_MACH_MIPS_IRQ_H
2#define __ASM_MACH_MIPS_IRQ_H 2#define __ASM_MACH_MIPS_IRQ_H
3 3
4#define GIC_NUM_INTRS (24 + NR_CPUS * 2)
5#define NR_IRQS 256 4#define NR_IRQS 256
6 5
7 6
diff --git a/arch/mips/include/asm/mach-tx39xx/ioremap.h b/arch/mips/include/asm/mach-tx39xx/ioremap.h
index 93c6c04ffda3..0874cd2b06d7 100644
--- a/arch/mips/include/asm/mach-tx39xx/ioremap.h
+++ b/arch/mips/include/asm/mach-tx39xx/ioremap.h
@@ -15,12 +15,12 @@
15 * Allow physical addresses to be fixed up to help peripherals located 15 * Allow physical addresses to be fixed up to help peripherals located
16 * outside the low 32-bit range -- generic pass-through version. 16 * outside the low 32-bit range -- generic pass-through version.
17 */ 17 */
18static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) 18static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
19{ 19{
20 return phys_addr; 20 return phys_addr;
21} 21}
22 22
23static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, 23static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
24 unsigned long flags) 24 unsigned long flags)
25{ 25{
26#define TXX9_DIRECTMAP_BASE 0xff000000ul 26#define TXX9_DIRECTMAP_BASE 0xff000000ul
diff --git a/arch/mips/include/asm/mach-tx49xx/ioremap.h b/arch/mips/include/asm/mach-tx49xx/ioremap.h
index 1e7beae72229..4b6a8441b25f 100644
--- a/arch/mips/include/asm/mach-tx49xx/ioremap.h
+++ b/arch/mips/include/asm/mach-tx49xx/ioremap.h
@@ -15,12 +15,12 @@
15 * Allow physical addresses to be fixed up to help peripherals located 15 * Allow physical addresses to be fixed up to help peripherals located
16 * outside the low 32-bit range -- generic pass-through version. 16 * outside the low 32-bit range -- generic pass-through version.
17 */ 17 */
18static inline phys_t fixup_bigphys_addr(phys_t phys_addr, phys_t size) 18static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
19{ 19{
20 return phys_addr; 20 return phys_addr;
21} 21}
22 22
23static inline void __iomem *plat_ioremap(phys_t offset, unsigned long size, 23static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
24 unsigned long flags) 24 unsigned long flags)
25{ 25{
26#ifdef CONFIG_64BIT 26#ifdef CONFIG_64BIT
diff --git a/arch/mips/include/asm/mips-boards/maltaint.h b/arch/mips/include/asm/mips-boards/maltaint.h
index e330732ddf98..987ff580466b 100644
--- a/arch/mips/include/asm/mips-boards/maltaint.h
+++ b/arch/mips/include/asm/mips-boards/maltaint.h
@@ -10,7 +10,7 @@
10#ifndef _MIPS_MALTAINT_H 10#ifndef _MIPS_MALTAINT_H
11#define _MIPS_MALTAINT_H 11#define _MIPS_MALTAINT_H
12 12
13#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 8) 13#include <linux/irqchip/mips-gic.h>
14 14
15/* 15/*
16 * Interrupts 0..15 are used for Malta ISA compatible interrupts 16 * Interrupts 0..15 are used for Malta ISA compatible interrupts
@@ -22,29 +22,28 @@
22#define MIPSCPU_INT_SW1 1 22#define MIPSCPU_INT_SW1 1
23#define MIPSCPU_INT_MB0 2 23#define MIPSCPU_INT_MB0 2
24#define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0 24#define MIPSCPU_INT_I8259A MIPSCPU_INT_MB0
25#define MIPSCPU_INT_GIC MIPSCPU_INT_MB0 /* GIC chained interrupt */
25#define MIPSCPU_INT_MB1 3 26#define MIPSCPU_INT_MB1 3
26#define MIPSCPU_INT_SMI MIPSCPU_INT_MB1 27#define MIPSCPU_INT_SMI MIPSCPU_INT_MB1
27#define MIPSCPU_INT_IPI0 MIPSCPU_INT_MB1 /* GIC IPI */
28#define MIPSCPU_INT_MB2 4 28#define MIPSCPU_INT_MB2 4
29#define MIPSCPU_INT_IPI1 MIPSCPU_INT_MB2 /* GIC IPI */
30#define MIPSCPU_INT_MB3 5 29#define MIPSCPU_INT_MB3 5
31#define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3 30#define MIPSCPU_INT_COREHI MIPSCPU_INT_MB3
32#define MIPSCPU_INT_MB4 6 31#define MIPSCPU_INT_MB4 6
33#define MIPSCPU_INT_CORELO MIPSCPU_INT_MB4 32#define MIPSCPU_INT_CORELO MIPSCPU_INT_MB4
34 33
35/* 34/*
36 * Interrupts 64..127 are used for Soc-it Classic interrupts 35 * Interrupts 96..127 are used for Soc-it Classic interrupts
37 */ 36 */
38#define MSC01C_INT_BASE 64 37#define MSC01C_INT_BASE 96
39 38
40/* SOC-it Classic interrupt offsets */ 39/* SOC-it Classic interrupt offsets */
41#define MSC01C_INT_TMR 0 40#define MSC01C_INT_TMR 0
42#define MSC01C_INT_PCI 1 41#define MSC01C_INT_PCI 1
43 42
44/* 43/*
45 * Interrupts 64..127 are used for Soc-it EIC interrupts 44 * Interrupts 96..127 are used for Soc-it EIC interrupts
46 */ 45 */
47#define MSC01E_INT_BASE 64 46#define MSC01E_INT_BASE 96
48 47
49/* SOC-it EIC interrupt offsets */ 48/* SOC-it EIC interrupt offsets */
50#define MSC01E_INT_SW0 1 49#define MSC01E_INT_SW0 1
@@ -63,14 +62,7 @@
63#define MSC01E_INT_PERFCTR 10 62#define MSC01E_INT_PERFCTR 10
64#define MSC01E_INT_CPUCTR 11 63#define MSC01E_INT_CPUCTR 11
65 64
66/* External Interrupts used for IPI */ 65/* GIC external interrupts */
67#define GIC_IPI_EXT_INTR_RESCHED_VPE0 16 66#define GIC_INT_I8259A GIC_SHARED_TO_HWIRQ(3)
68#define GIC_IPI_EXT_INTR_CALLFNC_VPE0 17
69#define GIC_IPI_EXT_INTR_RESCHED_VPE1 18
70#define GIC_IPI_EXT_INTR_CALLFNC_VPE1 19
71#define GIC_IPI_EXT_INTR_RESCHED_VPE2 20
72#define GIC_IPI_EXT_INTR_CALLFNC_VPE2 21
73#define GIC_IPI_EXT_INTR_RESCHED_VPE3 22
74#define GIC_IPI_EXT_INTR_CALLFNC_VPE3 23
75 67
76#endif /* !(_MIPS_MALTAINT_H) */ 68#endif /* !(_MIPS_MALTAINT_H) */
diff --git a/arch/mips/include/asm/mips-boards/sead3int.h b/arch/mips/include/asm/mips-boards/sead3int.h
index 6b17aaf7d901..8932c7de0419 100644
--- a/arch/mips/include/asm/mips-boards/sead3int.h
+++ b/arch/mips/include/asm/mips-boards/sead3int.h
@@ -10,10 +10,23 @@
10#ifndef _MIPS_SEAD3INT_H 10#ifndef _MIPS_SEAD3INT_H
11#define _MIPS_SEAD3INT_H 11#define _MIPS_SEAD3INT_H
12 12
13#include <linux/irqchip/mips-gic.h>
14
13/* SEAD-3 GIC address space definitions. */ 15/* SEAD-3 GIC address space definitions. */
14#define GIC_BASE_ADDR 0x1b1c0000 16#define GIC_BASE_ADDR 0x1b1c0000
15#define GIC_ADDRSPACE_SZ (128 * 1024) 17#define GIC_ADDRSPACE_SZ (128 * 1024)
16 18
17#define MIPS_GIC_IRQ_BASE (MIPS_CPU_IRQ_BASE + 0) 19/* CPU interrupt offsets */
20#define CPU_INT_GIC 2
21#define CPU_INT_EHCI 2
22#define CPU_INT_UART0 4
23#define CPU_INT_UART1 4
24#define CPU_INT_NET 6
25
26/* GIC interrupt offsets */
27#define GIC_INT_NET GIC_SHARED_TO_HWIRQ(0)
28#define GIC_INT_UART1 GIC_SHARED_TO_HWIRQ(2)
29#define GIC_INT_UART0 GIC_SHARED_TO_HWIRQ(3)
30#define GIC_INT_EHCI GIC_SHARED_TO_HWIRQ(5)
18 31
19#endif /* !(_MIPS_SEAD3INT_H) */ 32#endif /* !(_MIPS_SEAD3INT_H) */
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index 6a9d2dd005ca..b95a827d763e 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -30,7 +30,7 @@ extern void __iomem *mips_cm_l2sync_base;
30 * different way by defining a function with the same prototype except for the 30 * different way by defining a function with the same prototype except for the
31 * name mips_cm_phys_base (without underscores). 31 * name mips_cm_phys_base (without underscores).
32 */ 32 */
33extern phys_t __mips_cm_phys_base(void); 33extern phys_addr_t __mips_cm_phys_base(void);
34 34
35/** 35/**
36 * mips_cm_probe - probe for a Coherence Manager 36 * mips_cm_probe - probe for a Coherence Manager
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index e139a534e0fd..1cebe8c79051 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -25,7 +25,7 @@ extern void __iomem *mips_cpc_base;
25 * memory mapped registers. This is platform dependant & must therefore be 25 * memory mapped registers. This is platform dependant & must therefore be
26 * implemented per-platform. 26 * implemented per-platform.
27 */ 27 */
28extern phys_t mips_cpc_default_phys_base(void); 28extern phys_addr_t mips_cpc_default_phys_base(void);
29 29
30/** 30/**
31 * mips_cpc_phys_base - retrieve the physical base address of the CPC 31 * mips_cpc_phys_base - retrieve the physical base address of the CPC
@@ -35,7 +35,7 @@ extern phys_t mips_cpc_default_phys_base(void);
35 * is present. It may be overriden by individual platforms which determine 35 * is present. It may be overriden by individual platforms which determine
36 * this address in a different way. 36 * this address in a different way.
37 */ 37 */
38extern phys_t __weak mips_cpc_phys_base(void); 38extern phys_addr_t __weak mips_cpc_phys_base(void);
39 39
40/** 40/**
41 * mips_cpc_probe - probe for a Cluster Power Controller 41 * mips_cpc_probe - probe for a Cluster Power Controller
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 22a135ac91de..5e4aef304b02 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -653,6 +653,9 @@
653#define MIPS_CONF5_NF (_ULCAST_(1) << 0) 653#define MIPS_CONF5_NF (_ULCAST_(1) << 0)
654#define MIPS_CONF5_UFR (_ULCAST_(1) << 2) 654#define MIPS_CONF5_UFR (_ULCAST_(1) << 2)
655#define MIPS_CONF5_MRP (_ULCAST_(1) << 3) 655#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
656#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
657#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
658#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
656#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27) 659#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
657#define MIPS_CONF5_EVA (_ULCAST_(1) << 28) 660#define MIPS_CONF5_EVA (_ULCAST_(1) << 28)
658#define MIPS_CONF5_CV (_ULCAST_(1) << 29) 661#define MIPS_CONF5_CV (_ULCAST_(1) << 29)
@@ -694,6 +697,7 @@
694#define MIPS_FPIR_W (_ULCAST_(1) << 20) 697#define MIPS_FPIR_W (_ULCAST_(1) << 20)
695#define MIPS_FPIR_L (_ULCAST_(1) << 21) 698#define MIPS_FPIR_L (_ULCAST_(1) << 21)
696#define MIPS_FPIR_F64 (_ULCAST_(1) << 22) 699#define MIPS_FPIR_F64 (_ULCAST_(1) << 22)
700#define MIPS_FPIR_FREP (_ULCAST_(1) << 29)
697 701
698/* 702/*
699 * Bits in the MIPS32 Memory Segmentation registers. 703 * Bits in the MIPS32 Memory Segmentation registers.
@@ -994,6 +998,39 @@ do { \
994 local_irq_restore(__flags); \ 998 local_irq_restore(__flags); \
995} while (0) 999} while (0)
996 1000
1001#define __readx_32bit_c0_register(source) \
1002({ \
1003 unsigned int __res; \
1004 \
1005 __asm__ __volatile__( \
1006 " .set push \n" \
1007 " .set noat \n" \
1008 " .set mips32r2 \n" \
1009 " .insn \n" \
1010 " # mfhc0 $1, %1 \n" \
1011 " .word (0x40410000 | ((%1 & 0x1f) << 11)) \n" \
1012 " move %0, $1 \n" \
1013 " .set pop \n" \
1014 : "=r" (__res) \
1015 : "i" (source)); \
1016 __res; \
1017})
1018
1019#define __writex_32bit_c0_register(register, value) \
1020do { \
1021 __asm__ __volatile__( \
1022 " .set push \n" \
1023 " .set noat \n" \
1024 " .set mips32r2 \n" \
1025 " move $1, %0 \n" \
1026 " # mthc0 $1, %1 \n" \
1027 " .insn \n" \
1028 " .word (0x40c10000 | ((%1 & 0x1f) << 11)) \n" \
1029 " .set pop \n" \
1030 : \
1031 : "r" (value), "i" (register)); \
1032} while (0)
1033
997#define read_c0_index() __read_32bit_c0_register($0, 0) 1034#define read_c0_index() __read_32bit_c0_register($0, 0)
998#define write_c0_index(val) __write_32bit_c0_register($0, 0, val) 1035#define write_c0_index(val) __write_32bit_c0_register($0, 0, val)
999 1036
@@ -1003,9 +1040,15 @@ do { \
1003#define read_c0_entrylo0() __read_ulong_c0_register($2, 0) 1040#define read_c0_entrylo0() __read_ulong_c0_register($2, 0)
1004#define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val) 1041#define write_c0_entrylo0(val) __write_ulong_c0_register($2, 0, val)
1005 1042
1043#define readx_c0_entrylo0() __readx_32bit_c0_register(2)
1044#define writex_c0_entrylo0(val) __writex_32bit_c0_register(2, val)
1045
1006#define read_c0_entrylo1() __read_ulong_c0_register($3, 0) 1046#define read_c0_entrylo1() __read_ulong_c0_register($3, 0)
1007#define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val) 1047#define write_c0_entrylo1(val) __write_ulong_c0_register($3, 0, val)
1008 1048
1049#define readx_c0_entrylo1() __readx_32bit_c0_register(3)
1050#define writex_c0_entrylo1(val) __writex_32bit_c0_register(3, val)
1051
1009#define read_c0_conf() __read_32bit_c0_register($3, 0) 1052#define read_c0_conf() __read_32bit_c0_register($3, 0)
1010#define write_c0_conf(val) __write_32bit_c0_register($3, 0, val) 1053#define write_c0_conf(val) __write_32bit_c0_register($3, 0, val)
1011 1054
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 024a71b2bff9..75739c83f07e 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -76,6 +76,8 @@
76 76
77#include <linux/prefetch.h> 77#include <linux/prefetch.h>
78 78
79#include <asm/compiler.h>
80
79#include <asm/octeon/cvmx-fpa.h> 81#include <asm/octeon/cvmx-fpa.h>
80/** 82/**
81 * By default we disable the max depth support. Most programs 83 * By default we disable the max depth support. Most programs
@@ -273,7 +275,7 @@ static inline void __cvmx_cmd_queue_lock(cvmx_cmd_queue_id_t queue_id,
273 " lbu %[ticket], %[now_serving]\n" 275 " lbu %[ticket], %[now_serving]\n"
274 "4:\n" 276 "4:\n"
275 ".set pop\n" : 277 ".set pop\n" :
276 [ticket_ptr] "=m"(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]), 278 [ticket_ptr] "=" GCC_OFF12_ASM()(__cvmx_cmd_queue_state_ptr->ticket[__cvmx_cmd_queue_get_index(queue_id)]),
277 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp), 279 [now_serving] "=m"(qptr->now_serving), [ticket] "=r"(tmp),
278 [my_ticket] "=r"(my_ticket) 280 [my_ticket] "=r"(my_ticket)
279 ); 281 );
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 4b4d0ecfd9eb..2188e65afb86 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -1066,7 +1066,7 @@ static inline void __cvmx_pow_warn_if_pending_switch(const char *function)
1066 uint64_t switch_complete; 1066 uint64_t switch_complete;
1067 CVMX_MF_CHORD(switch_complete); 1067 CVMX_MF_CHORD(switch_complete);
1068 if (!switch_complete) 1068 if (!switch_complete)
1069 pr_warning("%s called with tag switch in progress\n", function); 1069 pr_warn("%s called with tag switch in progress\n", function);
1070} 1070}
1071 1071
1072/** 1072/**
@@ -1084,8 +1084,7 @@ static inline void cvmx_pow_tag_sw_wait(void)
1084 if (unlikely(switch_complete)) 1084 if (unlikely(switch_complete))
1085 break; 1085 break;
1086 if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) { 1086 if (unlikely(cvmx_get_cycle() > start_cycle + MAX_CYCLES)) {
1087 pr_warning("Tag switch is taking a long time, " 1087 pr_warn("Tag switch is taking a long time, possible deadlock\n");
1088 "possible deadlock\n");
1089 start_cycle = -MAX_CYCLES - 1; 1088 start_cycle = -MAX_CYCLES - 1;
1090 } 1089 }
1091 } 1090 }
@@ -1296,19 +1295,16 @@ static inline void cvmx_pow_tag_sw_nocheck(uint32_t tag,
1296 __cvmx_pow_warn_if_pending_switch(__func__); 1295 __cvmx_pow_warn_if_pending_switch(__func__);
1297 current_tag = cvmx_pow_get_current_tag(); 1296 current_tag = cvmx_pow_get_current_tag();
1298 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) 1297 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1299 pr_warning("%s called with NULL_NULL tag\n", 1298 pr_warn("%s called with NULL_NULL tag\n", __func__);
1300 __func__);
1301 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) 1299 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1302 pr_warning("%s called with NULL tag\n", __func__); 1300 pr_warn("%s called with NULL tag\n", __func__);
1303 if ((current_tag.s.type == tag_type) 1301 if ((current_tag.s.type == tag_type)
1304 && (current_tag.s.tag == tag)) 1302 && (current_tag.s.tag == tag))
1305 pr_warning("%s called to perform a tag switch to the " 1303 pr_warn("%s called to perform a tag switch to the same tag\n",
1306 "same tag\n", 1304 __func__);
1307 __func__);
1308 if (tag_type == CVMX_POW_TAG_TYPE_NULL) 1305 if (tag_type == CVMX_POW_TAG_TYPE_NULL)
1309 pr_warning("%s called to perform a tag switch to " 1306 pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
1310 "NULL. Use cvmx_pow_tag_sw_null() instead\n", 1307 __func__);
1311 __func__);
1312 } 1308 }
1313 1309
1314 /* 1310 /*
@@ -1407,23 +1403,19 @@ static inline void cvmx_pow_tag_sw_full_nocheck(cvmx_wqe_t *wqp, uint32_t tag,
1407 __cvmx_pow_warn_if_pending_switch(__func__); 1403 __cvmx_pow_warn_if_pending_switch(__func__);
1408 current_tag = cvmx_pow_get_current_tag(); 1404 current_tag = cvmx_pow_get_current_tag();
1409 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) 1405 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1410 pr_warning("%s called with NULL_NULL tag\n", 1406 pr_warn("%s called with NULL_NULL tag\n", __func__);
1411 __func__);
1412 if ((current_tag.s.type == tag_type) 1407 if ((current_tag.s.type == tag_type)
1413 && (current_tag.s.tag == tag)) 1408 && (current_tag.s.tag == tag))
1414 pr_warning("%s called to perform a tag switch to " 1409 pr_warn("%s called to perform a tag switch to the same tag\n",
1415 "the same tag\n", 1410 __func__);
1416 __func__);
1417 if (tag_type == CVMX_POW_TAG_TYPE_NULL) 1411 if (tag_type == CVMX_POW_TAG_TYPE_NULL)
1418 pr_warning("%s called to perform a tag switch to " 1412 pr_warn("%s called to perform a tag switch to NULL. Use cvmx_pow_tag_sw_null() instead\n",
1419 "NULL. Use cvmx_pow_tag_sw_null() instead\n", 1413 __func__);
1420 __func__);
1421 if (wqp != cvmx_phys_to_ptr(0x80)) 1414 if (wqp != cvmx_phys_to_ptr(0x80))
1422 if (wqp != cvmx_pow_get_current_wqp()) 1415 if (wqp != cvmx_pow_get_current_wqp())
1423 pr_warning("%s passed WQE(%p) doesn't match " 1416 pr_warn("%s passed WQE(%p) doesn't match the address in the POW(%p)\n",
1424 "the address in the POW(%p)\n", 1417 __func__, wqp,
1425 __func__, wqp, 1418 cvmx_pow_get_current_wqp());
1426 cvmx_pow_get_current_wqp());
1427 } 1419 }
1428 1420
1429 /* 1421 /*
@@ -1507,12 +1499,10 @@ static inline void cvmx_pow_tag_sw_null_nocheck(void)
1507 __cvmx_pow_warn_if_pending_switch(__func__); 1499 __cvmx_pow_warn_if_pending_switch(__func__);
1508 current_tag = cvmx_pow_get_current_tag(); 1500 current_tag = cvmx_pow_get_current_tag();
1509 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) 1501 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1510 pr_warning("%s called with NULL_NULL tag\n", 1502 pr_warn("%s called with NULL_NULL tag\n", __func__);
1511 __func__);
1512 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) 1503 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1513 pr_warning("%s called when we already have a " 1504 pr_warn("%s called when we already have a NULL tag\n",
1514 "NULL tag\n", 1505 __func__);
1515 __func__);
1516 } 1506 }
1517 1507
1518 tag_req.u64 = 0; 1508 tag_req.u64 = 0;
@@ -1725,17 +1715,14 @@ static inline void cvmx_pow_tag_sw_desched_nocheck(
1725 __cvmx_pow_warn_if_pending_switch(__func__); 1715 __cvmx_pow_warn_if_pending_switch(__func__);
1726 current_tag = cvmx_pow_get_current_tag(); 1716 current_tag = cvmx_pow_get_current_tag();
1727 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) 1717 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1728 pr_warning("%s called with NULL_NULL tag\n", 1718 pr_warn("%s called with NULL_NULL tag\n", __func__);
1729 __func__);
1730 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) 1719 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1731 pr_warning("%s called with NULL tag. Deschedule not " 1720 pr_warn("%s called with NULL tag. Deschedule not allowed from NULL state\n",
1732 "allowed from NULL state\n", 1721 __func__);
1733 __func__);
1734 if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC) 1722 if ((current_tag.s.type != CVMX_POW_TAG_TYPE_ATOMIC)
1735 && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC)) 1723 && (tag_type != CVMX_POW_TAG_TYPE_ATOMIC))
1736 pr_warning("%s called where neither the before or " 1724 pr_warn("%s called where neither the before or after tag is ATOMIC\n",
1737 "after tag is ATOMIC\n", 1725 __func__);
1738 __func__);
1739 } 1726 }
1740 1727
1741 tag_req.u64 = 0; 1728 tag_req.u64 = 0;
@@ -1832,12 +1819,10 @@ static inline void cvmx_pow_desched(uint64_t no_sched)
1832 __cvmx_pow_warn_if_pending_switch(__func__); 1819 __cvmx_pow_warn_if_pending_switch(__func__);
1833 current_tag = cvmx_pow_get_current_tag(); 1820 current_tag = cvmx_pow_get_current_tag();
1834 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL) 1821 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL_NULL)
1835 pr_warning("%s called with NULL_NULL tag\n", 1822 pr_warn("%s called with NULL_NULL tag\n", __func__);
1836 __func__);
1837 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL) 1823 if (current_tag.s.type == CVMX_POW_TAG_TYPE_NULL)
1838 pr_warning("%s called with NULL tag. Deschedule not " 1824 pr_warn("%s called with NULL tag. Deschedule not expected from NULL state\n",
1839 "expected from NULL state\n", 1825 __func__);
1840 __func__);
1841 } 1826 }
1842 1827
1843 /* Need to make sure any writes to the work queue entry are complete */ 1828 /* Need to make sure any writes to the work queue entry are complete */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index f991e7701d3d..33db1c806b01 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -451,67 +451,4 @@ static inline uint32_t cvmx_octeon_num_cores(void)
451 return cvmx_pop(ciu_fuse); 451 return cvmx_pop(ciu_fuse);
452} 452}
453 453
454/**
455 * Read a byte of fuse data
456 * @byte_addr: address to read
457 *
458 * Returns fuse value: 0 or 1
459 */
460static uint8_t cvmx_fuse_read_byte(int byte_addr)
461{
462 union cvmx_mio_fus_rcmd read_cmd;
463
464 read_cmd.u64 = 0;
465 read_cmd.s.addr = byte_addr;
466 read_cmd.s.pend = 1;
467 cvmx_write_csr(CVMX_MIO_FUS_RCMD, read_cmd.u64);
468 while ((read_cmd.u64 = cvmx_read_csr(CVMX_MIO_FUS_RCMD))
469 && read_cmd.s.pend)
470 ;
471 return read_cmd.s.dat;
472}
473
474/**
475 * Read a single fuse bit
476 *
477 * @fuse: Fuse number (0-1024)
478 *
479 * Returns fuse value: 0 or 1
480 */
481static inline int cvmx_fuse_read(int fuse)
482{
483 return (cvmx_fuse_read_byte(fuse >> 3) >> (fuse & 0x7)) & 1;
484}
485
486static inline int cvmx_octeon_model_CN36XX(void)
487{
488 return OCTEON_IS_MODEL(OCTEON_CN38XX)
489 && !cvmx_octeon_is_pass1()
490 && cvmx_fuse_read(264);
491}
492
493static inline int cvmx_octeon_zip_present(void)
494{
495 return octeon_has_feature(OCTEON_FEATURE_ZIP);
496}
497
498static inline int cvmx_octeon_dfa_present(void)
499{
500 if (!OCTEON_IS_MODEL(OCTEON_CN38XX)
501 && !OCTEON_IS_MODEL(OCTEON_CN31XX)
502 && !OCTEON_IS_MODEL(OCTEON_CN58XX))
503 return 0;
504 else if (OCTEON_IS_MODEL(OCTEON_CN3020))
505 return 0;
506 else if (cvmx_octeon_is_pass1())
507 return 1;
508 else
509 return !cvmx_fuse_read(120);
510}
511
512static inline int cvmx_octeon_crypto_present(void)
513{
514 return octeon_has_feature(OCTEON_FEATURE_CRYPTO);
515}
516
517#endif /* __CVMX_H__ */ 454#endif /* __CVMX_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 90e05a8d4b15..c4fe81f47f53 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -86,8 +86,6 @@ enum octeon_feature {
86 OCTEON_MAX_FEATURE 86 OCTEON_MAX_FEATURE
87}; 87};
88 88
89static inline int cvmx_fuse_read(int fuse);
90
91/** 89/**
92 * Determine if the current Octeon supports a specific feature. These 90 * Determine if the current Octeon supports a specific feature. These
93 * checks have been optimized to be fairly quick, but they should still 91 * checks have been optimized to be fairly quick, but they should still
@@ -105,33 +103,6 @@ static inline int octeon_has_feature(enum octeon_feature feature)
105 case OCTEON_FEATURE_SAAD: 103 case OCTEON_FEATURE_SAAD:
106 return !OCTEON_IS_MODEL(OCTEON_CN3XXX); 104 return !OCTEON_IS_MODEL(OCTEON_CN3XXX);
107 105
108 case OCTEON_FEATURE_ZIP:
109 if (OCTEON_IS_MODEL(OCTEON_CN30XX)
110 || OCTEON_IS_MODEL(OCTEON_CN50XX)
111 || OCTEON_IS_MODEL(OCTEON_CN52XX))
112 return 0;
113 else if (OCTEON_IS_MODEL(OCTEON_CN38XX_PASS1))
114 return 1;
115 else
116 return !cvmx_fuse_read(121);
117
118 case OCTEON_FEATURE_CRYPTO:
119 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
120 union cvmx_mio_fus_dat2 fus_2;
121 fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
122 if (fus_2.s.nocrypto || fus_2.s.nomul) {
123 return 0;
124 } else if (!fus_2.s.dorm_crypto) {
125 return 1;
126 } else {
127 union cvmx_rnm_ctl_status st;
128 st.u64 = cvmx_read_csr(CVMX_RNM_CTL_STATUS);
129 return st.s.eer_val;
130 }
131 } else {
132 return !cvmx_fuse_read(90);
133 }
134
135 case OCTEON_FEATURE_DORM_CRYPTO: 106 case OCTEON_FEATURE_DORM_CRYPTO:
136 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 107 if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
137 union cvmx_mio_fus_dat2 fus_2; 108 union cvmx_mio_fus_dat2 fus_2;
@@ -188,29 +159,6 @@ static inline int octeon_has_feature(enum octeon_feature feature)
188 && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) 159 && !OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
189 && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X); 160 && !OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X);
190 161
191 case OCTEON_FEATURE_DFA:
192 if (!OCTEON_IS_MODEL(OCTEON_CN38XX)
193 && !OCTEON_IS_MODEL(OCTEON_CN31XX)
194 && !OCTEON_IS_MODEL(OCTEON_CN58XX))
195 return 0;
196 else if (OCTEON_IS_MODEL(OCTEON_CN3020))
197 return 0;
198 else
199 return !cvmx_fuse_read(120);
200
201 case OCTEON_FEATURE_HFA:
202 if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
203 return 0;
204 else
205 return !cvmx_fuse_read(90);
206
207 case OCTEON_FEATURE_DFM:
208 if (!(OCTEON_IS_MODEL(OCTEON_CN63XX)
209 || OCTEON_IS_MODEL(OCTEON_CN66XX)))
210 return 0;
211 else
212 return !cvmx_fuse_read(90);
213
214 case OCTEON_FEATURE_MDIO_CLAUSE_45: 162 case OCTEON_FEATURE_MDIO_CLAUSE_45:
215 return !(OCTEON_IS_MODEL(OCTEON_CN3XXX) 163 return !(OCTEON_IS_MODEL(OCTEON_CN3XXX)
216 || OCTEON_IS_MODEL(OCTEON_CN58XX) 164 || OCTEON_IS_MODEL(OCTEON_CN58XX)
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index e2c122c6a657..e8a1c2fd52cd 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -326,8 +326,7 @@ static inline int __octeon_is_model_runtime__(uint32_t model)
326#define OCTEON_IS_COMMON_BINARY() 1 326#define OCTEON_IS_COMMON_BINARY() 1
327#undef OCTEON_MODEL 327#undef OCTEON_MODEL
328 328
329const char *octeon_model_get_string(uint32_t chip_id); 329const char *__init octeon_model_get_string(uint32_t chip_id);
330const char *octeon_model_get_string_buffer(uint32_t chip_id, char *buffer);
331 330
332/* 331/*
333 * Return the octeon family, i.e., ProcessorID of the PrID register. 332 * Return the octeon family, i.e., ProcessorID of the PrID register.
diff --git a/arch/mips/include/asm/paccess.h b/arch/mips/include/asm/paccess.h
index 2474fc5d1751..af81ab0da55f 100644
--- a/arch/mips/include/asm/paccess.h
+++ b/arch/mips/include/asm/paccess.h
@@ -56,6 +56,7 @@ struct __large_pstruct { unsigned long buf[100]; };
56 "1:\t" insn "\t%1,%2\n\t" \ 56 "1:\t" insn "\t%1,%2\n\t" \
57 "move\t%0,$0\n" \ 57 "move\t%0,$0\n" \
58 "2:\n\t" \ 58 "2:\n\t" \
59 ".insn\n\t" \
59 ".section\t.fixup,\"ax\"\n" \ 60 ".section\t.fixup,\"ax\"\n" \
60 "3:\tli\t%0,%3\n\t" \ 61 "3:\tli\t%0,%3\n\t" \
61 "move\t%1,$0\n\t" \ 62 "move\t%1,$0\n\t" \
@@ -94,6 +95,7 @@ extern void __get_dbe_unknown(void);
94 "1:\t" insn "\t%1,%2\n\t" \ 95 "1:\t" insn "\t%1,%2\n\t" \
95 "move\t%0,$0\n" \ 96 "move\t%0,$0\n" \
96 "2:\n\t" \ 97 "2:\n\t" \
98 ".insn\n\t" \
97 ".section\t.fixup,\"ax\"\n" \ 99 ".section\t.fixup,\"ax\"\n" \
98 "3:\tli\t%0,%3\n\t" \ 100 "3:\tli\t%0,%3\n\t" \
99 "j\t2b\n\t" \ 101 "j\t2b\n\t" \
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 3be81803595d..154b70a10483 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -116,7 +116,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
116/* 116/*
117 * These are used to make use of C type-checking.. 117 * These are used to make use of C type-checking..
118 */ 118 */
119#ifdef CONFIG_64BIT_PHYS_ADDR 119#ifdef CONFIG_PHYS_ADDR_T_64BIT
120 #ifdef CONFIG_CPU_MIPS32 120 #ifdef CONFIG_CPU_MIPS32
121 typedef struct { unsigned long pte_low, pte_high; } pte_t; 121 typedef struct { unsigned long pte_low, pte_high; } pte_t;
122 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) 122 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 974b0e308963..69529624a005 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -84,7 +84,7 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
84 const struct resource *rsrc, resource_size_t *start, 84 const struct resource *rsrc, resource_size_t *start,
85 resource_size_t *end) 85 resource_size_t *end)
86{ 86{
87 phys_t size = resource_size(rsrc); 87 phys_addr_t size = resource_size(rsrc);
88 88
89 *start = fixup_bigphys_addr(rsrc->start, size); 89 *start = fixup_bigphys_addr(rsrc->start, size);
90 *end = rsrc->start + size; 90 *end = rsrc->start + size;
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index cd7d6064bcbe..68984b612f9d 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -69,7 +69,7 @@ extern int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
69# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE) 69# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
70#endif 70#endif
71 71
72#ifdef CONFIG_64BIT_PHYS_ADDR 72#ifdef CONFIG_PHYS_ADDR_T_64BIT
73#define pte_ERROR(e) \ 73#define pte_ERROR(e) \
74 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e)) 74 printk("%s:%d: bad pte %016Lx.\n", __FILE__, __LINE__, pte_val(e))
75#else 75#else
@@ -103,7 +103,7 @@ static inline void pmd_clear(pmd_t *pmdp)
103 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table); 103 pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
104} 104}
105 105
106#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 106#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
107#define pte_page(x) pfn_to_page(pte_pfn(x)) 107#define pte_page(x) pfn_to_page(pte_pfn(x))
108#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6)) 108#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
109static inline pte_t 109static inline pte_t
@@ -126,7 +126,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
126#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT)) 126#define pte_pfn(x) ((unsigned long)((x).pte >> _PFN_SHIFT))
127#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot)) 127#define pfn_pte(pfn, prot) __pte(((unsigned long long)(pfn) << _PFN_SHIFT) | pgprot_val(prot))
128#endif 128#endif
129#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */ 129#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
130 130
131#define __pgd_offset(address) pgd_index(address) 131#define __pgd_offset(address) pgd_index(address)
132#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1)) 132#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
@@ -155,73 +155,75 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
155#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 155#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
156 156
157/* Swap entries must have VALID bit cleared. */ 157/* Swap entries must have VALID bit cleared. */
158#define __swp_type(x) (((x).val >> 10) & 0x1f) 158#define __swp_type(x) (((x).val >> 10) & 0x1f)
159#define __swp_offset(x) ((x).val >> 15) 159#define __swp_offset(x) ((x).val >> 15)
160#define __swp_entry(type,offset) \ 160#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 10) | ((offset) << 15) })
161 ((swp_entry_t) { ((type) << 10) | ((offset) << 15) }) 161#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
162#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
162 163
163/* 164/*
164 * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range: 165 * Encode and decode a nonlinear file mapping entry
165 */ 166 */
166#define PTE_FILE_MAX_BITS 28 167#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \
167 168 (((_pte).pte >> 2 ) & 0x38) | \
168#define pte_to_pgoff(_pte) ((((_pte).pte >> 1 ) & 0x07) | \ 169 (((_pte).pte >> 10) << 6 ))
169 (((_pte).pte >> 2 ) & 0x38) | \
170 (((_pte).pte >> 10) << 6 ))
171 170
172#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \ 171#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x07) << 1 ) | \
173 (((off) & 0x38) << 2 ) | \ 172 (((off) & 0x38) << 2 ) | \
174 (((off) >> 6 ) << 10) | \ 173 (((off) >> 6 ) << 10) | \
175 _PAGE_FILE }) 174 _PAGE_FILE })
176 175
176/*
177 * Bits 0, 4, 8, and 9 are taken, split up 28 bits of offset into this range:
178 */
179#define PTE_FILE_MAX_BITS 28
177#else 180#else
178 181
182#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
183
179/* Swap entries must have VALID and GLOBAL bits cleared. */ 184/* Swap entries must have VALID and GLOBAL bits cleared. */
180#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 185#define __swp_type(x) (((x).val >> 2) & 0x1f)
181#define __swp_type(x) (((x).val >> 2) & 0x1f) 186#define __swp_offset(x) ((x).val >> 7)
182#define __swp_offset(x) ((x).val >> 7) 187#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
183#define __swp_entry(type,offset) \ 188#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
184 ((swp_entry_t) { ((type) << 2) | ((offset) << 7) }) 189#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
185#else
186#define __swp_type(x) (((x).val >> 8) & 0x1f)
187#define __swp_offset(x) ((x).val >> 13)
188#define __swp_entry(type,offset) \
189 ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
190#endif /* defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) */
191 190
192#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
193/* 191/*
194 * Bits 0 and 1 of pte_high are taken, use the rest for the page offset... 192 * Bits 0 and 1 of pte_high are taken, use the rest for the page offset...
195 */ 193 */
196#define PTE_FILE_MAX_BITS 30 194#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
197 195#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
198#define pte_to_pgoff(_pte) ((_pte).pte_high >> 2)
199#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) << 2 })
200 196
197#define PTE_FILE_MAX_BITS 30
201#else 198#else
202/* 199/*
203 * Bits 0, 4, 6, and 7 are taken, split up 28 bits of offset into this range: 200 * Constraints:
201 * _PAGE_PRESENT at bit 0
202 * _PAGE_MODIFIED at bit 4
203 * _PAGE_GLOBAL at bit 6
204 * _PAGE_VALID at bit 7
204 */ 205 */
205#define PTE_FILE_MAX_BITS 28 206#define __swp_type(x) (((x).val >> 8) & 0x1f)
207#define __swp_offset(x) ((x).val >> 13)
208#define __swp_entry(type,offset) ((swp_entry_t) { ((type) << 8) | ((offset) << 13) })
209#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
210#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
206 211
207#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \ 212/*
208 (((_pte).pte >> 2) & 0x8) | \ 213 * Encode and decode a nonlinear file mapping entry
209 (((_pte).pte >> 8) << 4)) 214 */
215#define pte_to_pgoff(_pte) ((((_pte).pte >> 1) & 0x7) | \
216 (((_pte).pte >> 2) & 0x8) | \
217 (((_pte).pte >> 8) << 4))
210 218
211#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \ 219#define pgoff_to_pte(off) ((pte_t) { (((off) & 0x7) << 1) | \
212 (((off) & 0x8) << 2) | \ 220 (((off) & 0x8) << 2) | \
213 (((off) >> 4) << 8) | \ 221 (((off) >> 4) << 8) | \
214 _PAGE_FILE }) 222 _PAGE_FILE })
215#endif
216 223
217#endif 224#define PTE_FILE_MAX_BITS 28
225#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
218 226
219#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 227#endif /* defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) */
220#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
221#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
222#else
223#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
224#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
225#endif
226 228
227#endif /* _ASM_PGTABLE_32_H */ 229#endif /* _ASM_PGTABLE_32_H */
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index e747bfa0be7e..ca11f14f40a3 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -32,39 +32,41 @@
32 * unpredictable things. The code (when it is written) to deal with 32 * unpredictable things. The code (when it is written) to deal with
33 * this problem will be in the update_mmu_cache() code for the r4k. 33 * this problem will be in the update_mmu_cache() code for the r4k.
34 */ 34 */
35#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 35#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
36 36
37/* 37/*
38 * The following bits are directly used by the TLB hardware 38 * The following bits are directly used by the TLB hardware
39 */ 39 */
40#define _PAGE_R4KBUG (1 << 0) /* workaround for r4k bug */ 40#define _PAGE_GLOBAL_SHIFT 0
41#define _PAGE_GLOBAL (1 << 0) 41#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
42#define _PAGE_VALID_SHIFT 1 42#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
43#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT) 43#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
44#define _PAGE_SILENT_READ (1 << 1) /* synonym */ 44#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
45#define _PAGE_DIRTY_SHIFT 2 45#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
46#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT) /* The MIPS dirty bit */ 46#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
47#define _PAGE_SILENT_WRITE (1 << 2) 47#define _CACHE_MASK (7 << _CACHE_SHIFT)
48#define _CACHE_SHIFT 3
49#define _CACHE_MASK (7 << 3)
50 48
51/* 49/*
52 * The following bits are implemented in software 50 * The following bits are implemented in software
53 * 51 *
54 * _PAGE_FILE semantics: set:pagecache unset:swap 52 * _PAGE_FILE semantics: set:pagecache unset:swap
55 */ 53 */
56#define _PAGE_PRESENT_SHIFT 6 54#define _PAGE_PRESENT_SHIFT (_CACHE_SHIFT + 3)
57#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT) 55#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
58#define _PAGE_READ_SHIFT 7 56#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
59#define _PAGE_READ (1 << _PAGE_READ_SHIFT) 57#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
60#define _PAGE_WRITE_SHIFT 8 58#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
61#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT) 59#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
62#define _PAGE_ACCESSED_SHIFT 9 60#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
63#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT) 61#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
64#define _PAGE_MODIFIED_SHIFT 10 62#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
65#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT) 63#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
66 64
67#define _PAGE_FILE (1 << 10) 65#define _PAGE_SILENT_READ _PAGE_VALID
66#define _PAGE_SILENT_WRITE _PAGE_DIRTY
67#define _PAGE_FILE _PAGE_MODIFIED
68
69#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
68 70
69#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) 71#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
70 72
@@ -172,7 +174,7 @@
172 174
173#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3) 175#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
174 176
175#endif /* defined(CONFIG_64BIT_PHYS_ADDR && defined(CONFIG_CPU_MIPS32) */ 177#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
176 178
177#ifndef _PFN_SHIFT 179#ifndef _PFN_SHIFT
178#define _PFN_SHIFT PAGE_SHIFT 180#define _PFN_SHIFT PAGE_SHIFT
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index d6d1928539b1..62a6ba383d4f 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -125,7 +125,7 @@ do { \
125extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, 125extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
126 pte_t pteval); 126 pte_t pteval);
127 127
128#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 128#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
129 129
130#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL)) 130#define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
131#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT) 131#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
@@ -227,7 +227,7 @@ extern pgd_t swapper_pg_dir[];
227 * The following only work if pte_present() is true. 227 * The following only work if pte_present() is true.
228 * Undefined behaviour if not.. 228 * Undefined behaviour if not..
229 */ 229 */
230#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 230#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
231static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; } 231static inline int pte_write(pte_t pte) { return pte.pte_low & _PAGE_WRITE; }
232static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; } 232static inline int pte_dirty(pte_t pte) { return pte.pte_low & _PAGE_MODIFIED; }
233static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; } 233static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
@@ -297,13 +297,13 @@ static inline pte_t pte_wrprotect(pte_t pte)
297 297
298static inline pte_t pte_mkclean(pte_t pte) 298static inline pte_t pte_mkclean(pte_t pte)
299{ 299{
300 pte_val(pte) &= ~(_PAGE_MODIFIED|_PAGE_SILENT_WRITE); 300 pte_val(pte) &= ~(_PAGE_MODIFIED | _PAGE_SILENT_WRITE);
301 return pte; 301 return pte;
302} 302}
303 303
304static inline pte_t pte_mkold(pte_t pte) 304static inline pte_t pte_mkold(pte_t pte)
305{ 305{
306 pte_val(pte) &= ~(_PAGE_ACCESSED|_PAGE_SILENT_READ); 306 pte_val(pte) &= ~(_PAGE_ACCESSED | _PAGE_SILENT_READ);
307 return pte; 307 return pte;
308} 308}
309 309
@@ -382,13 +382,13 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
382 */ 382 */
383#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot)) 383#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
384 384
385#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 385#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
386static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 386static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
387{ 387{
388 pte.pte_low &= _PAGE_CHG_MASK; 388 pte.pte_low &= _PAGE_CHG_MASK;
389 pte.pte_high &= ~0x3f; 389 pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
390 pte.pte_low |= pgprot_val(newprot); 390 pte.pte_low |= pgprot_val(newprot);
391 pte.pte_high |= pgprot_val(newprot) & 0x3f; 391 pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
392 return pte; 392 return pte;
393} 393}
394#else 394#else
@@ -419,7 +419,7 @@ static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
419 419
420#define kern_addr_valid(addr) (1) 420#define kern_addr_valid(addr) (1)
421 421
422#ifdef CONFIG_64BIT_PHYS_ADDR 422#ifdef CONFIG_PHYS_ADDR_T_64BIT
423extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot); 423extern int remap_pfn_range(struct vm_area_struct *vma, unsigned long from, unsigned long pfn, unsigned long size, pgprot_t prot);
424 424
425static inline int io_remap_pfn_range(struct vm_area_struct *vma, 425static inline int io_remap_pfn_range(struct vm_area_struct *vma,
@@ -428,7 +428,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
428 unsigned long size, 428 unsigned long size,
429 pgprot_t prot) 429 pgprot_t prot)
430{ 430{
431 phys_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size); 431 phys_addr_t phys_addr_high = fixup_bigphys_addr(pfn << PAGE_SHIFT, size);
432 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot); 432 return remap_pfn_range(vma, vaddr, phys_addr_high >> PAGE_SHIFT, size, prot);
433} 433}
434#define io_remap_pfn_range io_remap_pfn_range 434#define io_remap_pfn_range io_remap_pfn_range
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index a9494c0141fb..eaa26270a5e5 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -22,6 +22,7 @@ extern void device_tree_init(void);
22struct boot_param_header; 22struct boot_param_header;
23 23
24extern void __dt_setup_arch(void *bph); 24extern void __dt_setup_arch(void *bph);
25extern int __dt_register_buses(const char *bus0, const char *bus1);
25 26
26#define dt_setup_arch(sym) \ 27#define dt_setup_arch(sym) \
27({ \ 28({ \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index cd6e0afc6833..e293a8d89a6d 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -47,79 +47,20 @@ extern void (*r4k_blast_icache)(void);
47 47
48#ifdef CONFIG_MIPS_MT 48#ifdef CONFIG_MIPS_MT
49 49
50/*
51 * Optionally force single-threaded execution during I-cache flushes.
52 */
53#define PROTECT_CACHE_FLUSHES 1
54
55#ifdef PROTECT_CACHE_FLUSHES
56
57extern int mt_protiflush;
58extern int mt_protdflush;
59extern void mt_cflush_lockdown(void);
60extern void mt_cflush_release(void);
61
62#define BEGIN_MT_IPROT \
63 unsigned long flags = 0; \
64 unsigned long mtflags = 0; \
65 if(mt_protiflush) { \
66 local_irq_save(flags); \
67 ehb(); \
68 mtflags = dvpe(); \
69 mt_cflush_lockdown(); \
70 }
71
72#define END_MT_IPROT \
73 if(mt_protiflush) { \
74 mt_cflush_release(); \
75 evpe(mtflags); \
76 local_irq_restore(flags); \
77 }
78
79#define BEGIN_MT_DPROT \
80 unsigned long flags = 0; \
81 unsigned long mtflags = 0; \
82 if(mt_protdflush) { \
83 local_irq_save(flags); \
84 ehb(); \
85 mtflags = dvpe(); \
86 mt_cflush_lockdown(); \
87 }
88
89#define END_MT_DPROT \
90 if(mt_protdflush) { \
91 mt_cflush_release(); \
92 evpe(mtflags); \
93 local_irq_restore(flags); \
94 }
95
96#else
97
98#define BEGIN_MT_IPROT
99#define BEGIN_MT_DPROT
100#define END_MT_IPROT
101#define END_MT_DPROT
102
103#endif /* PROTECT_CACHE_FLUSHES */
104
105#define __iflush_prologue \ 50#define __iflush_prologue \
106 unsigned long redundance; \ 51 unsigned long redundance; \
107 extern int mt_n_iflushes; \ 52 extern int mt_n_iflushes; \
108 BEGIN_MT_IPROT \
109 for (redundance = 0; redundance < mt_n_iflushes; redundance++) { 53 for (redundance = 0; redundance < mt_n_iflushes; redundance++) {
110 54
111#define __iflush_epilogue \ 55#define __iflush_epilogue \
112 END_MT_IPROT \
113 } 56 }
114 57
115#define __dflush_prologue \ 58#define __dflush_prologue \
116 unsigned long redundance; \ 59 unsigned long redundance; \
117 extern int mt_n_dflushes; \ 60 extern int mt_n_dflushes; \
118 BEGIN_MT_DPROT \
119 for (redundance = 0; redundance < mt_n_dflushes; redundance++) { 61 for (redundance = 0; redundance < mt_n_dflushes; redundance++) {
120 62
121#define __dflush_epilogue \ 63#define __dflush_epilogue \
122 END_MT_DPROT \
123 } 64 }
124 65
125#define __inv_dflush_prologue __dflush_prologue 66#define __inv_dflush_prologue __dflush_prologue
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 78d201fb6c87..c6d06d383ef9 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -12,6 +12,7 @@
12#include <linux/compiler.h> 12#include <linux/compiler.h>
13 13
14#include <asm/barrier.h> 14#include <asm/barrier.h>
15#include <asm/compiler.h>
15#include <asm/war.h> 16#include <asm/war.h>
16 17
17/* 18/*
@@ -88,7 +89,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
88 " subu %[ticket], %[ticket], 1 \n" 89 " subu %[ticket], %[ticket], 1 \n"
89 " .previous \n" 90 " .previous \n"
90 " .set pop \n" 91 " .set pop \n"
91 : [ticket_ptr] "+m" (lock->lock), 92 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
92 [serving_now_ptr] "+m" (lock->h.serving_now), 93 [serving_now_ptr] "+m" (lock->h.serving_now),
93 [ticket] "=&r" (tmp), 94 [ticket] "=&r" (tmp),
94 [my_ticket] "=&r" (my_ticket) 95 [my_ticket] "=&r" (my_ticket)
@@ -121,7 +122,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
121 " subu %[ticket], %[ticket], 1 \n" 122 " subu %[ticket], %[ticket], 1 \n"
122 " .previous \n" 123 " .previous \n"
123 " .set pop \n" 124 " .set pop \n"
124 : [ticket_ptr] "+m" (lock->lock), 125 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
125 [serving_now_ptr] "+m" (lock->h.serving_now), 126 [serving_now_ptr] "+m" (lock->h.serving_now),
126 [ticket] "=&r" (tmp), 127 [ticket] "=&r" (tmp),
127 [my_ticket] "=&r" (my_ticket) 128 [my_ticket] "=&r" (my_ticket)
@@ -163,7 +164,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
163 " li %[ticket], 0 \n" 164 " li %[ticket], 0 \n"
164 " .previous \n" 165 " .previous \n"
165 " .set pop \n" 166 " .set pop \n"
166 : [ticket_ptr] "+m" (lock->lock), 167 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
167 [ticket] "=&r" (tmp), 168 [ticket] "=&r" (tmp),
168 [my_ticket] "=&r" (tmp2), 169 [my_ticket] "=&r" (tmp2),
169 [now_serving] "=&r" (tmp3) 170 [now_serving] "=&r" (tmp3)
@@ -187,7 +188,7 @@ static inline unsigned int arch_spin_trylock(arch_spinlock_t *lock)
187 " li %[ticket], 0 \n" 188 " li %[ticket], 0 \n"
188 " .previous \n" 189 " .previous \n"
189 " .set pop \n" 190 " .set pop \n"
190 : [ticket_ptr] "+m" (lock->lock), 191 : [ticket_ptr] "+" GCC_OFF12_ASM() (lock->lock),
191 [ticket] "=&r" (tmp), 192 [ticket] "=&r" (tmp),
192 [my_ticket] "=&r" (tmp2), 193 [my_ticket] "=&r" (tmp2),
193 [now_serving] "=&r" (tmp3) 194 [now_serving] "=&r" (tmp3)
@@ -234,8 +235,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
234 " beqzl %1, 1b \n" 235 " beqzl %1, 1b \n"
235 " nop \n" 236 " nop \n"
236 " .set reorder \n" 237 " .set reorder \n"
237 : "=m" (rw->lock), "=&r" (tmp) 238 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
238 : "m" (rw->lock) 239 : GCC_OFF12_ASM() (rw->lock)
239 : "memory"); 240 : "memory");
240 } else { 241 } else {
241 do { 242 do {
@@ -244,8 +245,8 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
244 " bltz %1, 1b \n" 245 " bltz %1, 1b \n"
245 " addu %1, 1 \n" 246 " addu %1, 1 \n"
246 "2: sc %1, %0 \n" 247 "2: sc %1, %0 \n"
247 : "=m" (rw->lock), "=&r" (tmp) 248 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
248 : "m" (rw->lock) 249 : GCC_OFF12_ASM() (rw->lock)
249 : "memory"); 250 : "memory");
250 } while (unlikely(!tmp)); 251 } while (unlikely(!tmp));
251 } 252 }
@@ -268,8 +269,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
268 " sub %1, 1 \n" 269 " sub %1, 1 \n"
269 " sc %1, %0 \n" 270 " sc %1, %0 \n"
270 " beqzl %1, 1b \n" 271 " beqzl %1, 1b \n"
271 : "=m" (rw->lock), "=&r" (tmp) 272 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
272 : "m" (rw->lock) 273 : GCC_OFF12_ASM() (rw->lock)
273 : "memory"); 274 : "memory");
274 } else { 275 } else {
275 do { 276 do {
@@ -277,8 +278,8 @@ static inline void arch_read_unlock(arch_rwlock_t *rw)
277 "1: ll %1, %2 # arch_read_unlock \n" 278 "1: ll %1, %2 # arch_read_unlock \n"
278 " sub %1, 1 \n" 279 " sub %1, 1 \n"
279 " sc %1, %0 \n" 280 " sc %1, %0 \n"
280 : "=m" (rw->lock), "=&r" (tmp) 281 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
281 : "m" (rw->lock) 282 : GCC_OFF12_ASM() (rw->lock)
282 : "memory"); 283 : "memory");
283 } while (unlikely(!tmp)); 284 } while (unlikely(!tmp));
284 } 285 }
@@ -298,8 +299,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
298 " beqzl %1, 1b \n" 299 " beqzl %1, 1b \n"
299 " nop \n" 300 " nop \n"
300 " .set reorder \n" 301 " .set reorder \n"
301 : "=m" (rw->lock), "=&r" (tmp) 302 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
302 : "m" (rw->lock) 303 : GCC_OFF12_ASM() (rw->lock)
303 : "memory"); 304 : "memory");
304 } else { 305 } else {
305 do { 306 do {
@@ -308,8 +309,8 @@ static inline void arch_write_lock(arch_rwlock_t *rw)
308 " bnez %1, 1b \n" 309 " bnez %1, 1b \n"
309 " lui %1, 0x8000 \n" 310 " lui %1, 0x8000 \n"
310 "2: sc %1, %0 \n" 311 "2: sc %1, %0 \n"
311 : "=m" (rw->lock), "=&r" (tmp) 312 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp)
312 : "m" (rw->lock) 313 : GCC_OFF12_ASM() (rw->lock)
313 : "memory"); 314 : "memory");
314 } while (unlikely(!tmp)); 315 } while (unlikely(!tmp));
315 } 316 }
@@ -348,8 +349,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
348 __WEAK_LLSC_MB 349 __WEAK_LLSC_MB
349 " li %2, 1 \n" 350 " li %2, 1 \n"
350 "2: \n" 351 "2: \n"
351 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 352 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
352 : "m" (rw->lock) 353 : GCC_OFF12_ASM() (rw->lock)
353 : "memory"); 354 : "memory");
354 } else { 355 } else {
355 __asm__ __volatile__( 356 __asm__ __volatile__(
@@ -365,8 +366,8 @@ static inline int arch_read_trylock(arch_rwlock_t *rw)
365 __WEAK_LLSC_MB 366 __WEAK_LLSC_MB
366 " li %2, 1 \n" 367 " li %2, 1 \n"
367 "2: \n" 368 "2: \n"
368 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 369 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
369 : "m" (rw->lock) 370 : GCC_OFF12_ASM() (rw->lock)
370 : "memory"); 371 : "memory");
371 } 372 }
372 373
@@ -392,8 +393,8 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
392 " li %2, 1 \n" 393 " li %2, 1 \n"
393 " .set reorder \n" 394 " .set reorder \n"
394 "2: \n" 395 "2: \n"
395 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 396 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp), "=&r" (ret)
396 : "m" (rw->lock) 397 : GCC_OFF12_ASM() (rw->lock)
397 : "memory"); 398 : "memory");
398 } else { 399 } else {
399 do { 400 do {
@@ -405,8 +406,9 @@ static inline int arch_write_trylock(arch_rwlock_t *rw)
405 " sc %1, %0 \n" 406 " sc %1, %0 \n"
406 " li %2, 1 \n" 407 " li %2, 1 \n"
407 "2: \n" 408 "2: \n"
408 : "=m" (rw->lock), "=&r" (tmp), "=&r" (ret) 409 : "=" GCC_OFF12_ASM() (rw->lock), "=&r" (tmp),
409 : "m" (rw->lock) 410 "=&r" (ret)
411 : GCC_OFF12_ASM() (rw->lock)
410 : "memory"); 412 : "memory");
411 } while (unlikely(!tmp)); 413 } while (unlikely(!tmp));
412 414
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 7de865805deb..99eea59604e9 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -116,6 +116,7 @@ static inline struct thread_info *current_thread_info(void)
116#define TIF_LOAD_WATCH 25 /* If set, load watch registers */ 116#define TIF_LOAD_WATCH 25 /* If set, load watch registers */
117#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */ 117#define TIF_SYSCALL_TRACEPOINT 26 /* syscall tracepoint instrumentation */
118#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */ 118#define TIF_32BIT_FPREGS 27 /* 32-bit floating point registers */
119#define TIF_HYBRID_FPREGS 28 /* 64b FP registers, odd singles in bits 63:32 of even doubles */
119#define TIF_USEDMSA 29 /* MSA has been used this quantum */ 120#define TIF_USEDMSA 29 /* MSA has been used this quantum */
120#define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */ 121#define TIF_MSA_CTX_LIVE 30 /* MSA context must be preserved */
121#define TIF_SYSCALL_TRACE 31 /* syscall trace active */ 122#define TIF_SYSCALL_TRACE 31 /* syscall trace active */
@@ -135,6 +136,7 @@ static inline struct thread_info *current_thread_info(void)
135#define _TIF_FPUBOUND (1<<TIF_FPUBOUND) 136#define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
136#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH) 137#define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
137#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS) 138#define _TIF_32BIT_FPREGS (1<<TIF_32BIT_FPREGS)
139#define _TIF_HYBRID_FPREGS (1<<TIF_HYBRID_FPREGS)
138#define _TIF_USEDMSA (1<<TIF_USEDMSA) 140#define _TIF_USEDMSA (1<<TIF_USEDMSA)
139#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE) 141#define _TIF_MSA_CTX_LIVE (1<<TIF_MSA_CTX_LIVE)
140#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) 142#define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
diff --git a/arch/mips/include/asm/time.h b/arch/mips/include/asm/time.h
index 8f3047d611ee..8ab2874225c4 100644
--- a/arch/mips/include/asm/time.h
+++ b/arch/mips/include/asm/time.h
@@ -46,19 +46,17 @@ extern unsigned int mips_hpt_frequency;
46 * so it lives here. 46 * so it lives here.
47 */ 47 */
48extern int (*perf_irq)(void); 48extern int (*perf_irq)(void);
49extern int __weak get_c0_perfcount_int(void);
49 50
50/* 51/*
51 * Initialize the calling CPU's compare interrupt as clockevent device 52 * Initialize the calling CPU's compare interrupt as clockevent device
52 */ 53 */
53extern unsigned int __weak get_c0_compare_int(void); 54extern unsigned int __weak get_c0_compare_int(void);
54extern int r4k_clockevent_init(void); 55extern int r4k_clockevent_init(void);
55extern int gic_clockevent_init(void);
56 56
57static inline int mips_clockevent_init(void) 57static inline int mips_clockevent_init(void)
58{ 58{
59#if defined(CONFIG_CEVT_GIC) 59#ifdef CONFIG_CEVT_R4K
60 return (gic_clockevent_init() | r4k_clockevent_init());
61#elif defined(CONFIG_CEVT_R4K)
62 return r4k_clockevent_init(); 60 return r4k_clockevent_init();
63#else 61#else
64 return -ENXIO; 62 return -ENXIO;
diff --git a/arch/mips/include/asm/types.h b/arch/mips/include/asm/types.h
index a845aafedee4..148d42a17f30 100644
--- a/arch/mips/include/asm/types.h
+++ b/arch/mips/include/asm/types.h
@@ -11,23 +11,7 @@
11#ifndef _ASM_TYPES_H 11#ifndef _ASM_TYPES_H
12#define _ASM_TYPES_H 12#define _ASM_TYPES_H
13 13
14# include <asm-generic/int-ll64.h> 14#include <asm-generic/int-ll64.h>
15#include <uapi/asm/types.h> 15#include <uapi/asm/types.h>
16 16
17/*
18 * These aren't exported outside the kernel to avoid name space clashes
19 */
20#ifndef __ASSEMBLY__
21
22/*
23 * Don't use phys_t. You've been warned.
24 */
25#ifdef CONFIG_64BIT_PHYS_ADDR
26typedef unsigned long long phys_t;
27#else
28typedef unsigned long phys_t;
29#endif
30
31#endif /* __ASSEMBLY__ */
32
33#endif /* _ASM_TYPES_H */ 17#endif /* _ASM_TYPES_H */
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 22a5624e2fd2..bf8b32450ef6 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -1325,33 +1325,6 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
1325 return res; 1325 return res;
1326} 1326}
1327 1327
1328/* Returns: 0 if bad, string length+1 (memory size) of string if ok */
1329static inline long __strlen_user(const char __user *s)
1330{
1331 long res;
1332
1333 if (segment_eq(get_fs(), get_ds())) {
1334 __asm__ __volatile__(
1335 "move\t$4, %1\n\t"
1336 __MODULE_JAL(__strlen_kernel_nocheck_asm)
1337 "move\t%0, $2"
1338 : "=r" (res)
1339 : "r" (s)
1340 : "$2", "$4", __UA_t0, "$31");
1341 } else {
1342 might_fault();
1343 __asm__ __volatile__(
1344 "move\t$4, %1\n\t"
1345 __MODULE_JAL(__strlen_user_nocheck_asm)
1346 "move\t%0, $2"
1347 : "=r" (res)
1348 : "r" (s)
1349 : "$2", "$4", __UA_t0, "$31");
1350 }
1351
1352 return res;
1353}
1354
1355/* 1328/*
1356 * strlen_user: - Get the size of a string in user space. 1329 * strlen_user: - Get the size of a string in user space.
1357 * @str: The string to measure. 1330 * @str: The string to measure.
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 708c5d414905..fc1cdd25fcda 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -136,9 +136,11 @@ Ip_u1s2(_lui);
136Ip_u2s3u1(_lw); 136Ip_u2s3u1(_lw);
137Ip_u3u1u2(_lwx); 137Ip_u3u1u2(_lwx);
138Ip_u1u2u3(_mfc0); 138Ip_u1u2u3(_mfc0);
139Ip_u1u2u3(_mfhc0);
139Ip_u1(_mfhi); 140Ip_u1(_mfhi);
140Ip_u1(_mflo); 141Ip_u1(_mflo);
141Ip_u1u2u3(_mtc0); 142Ip_u1u2u3(_mtc0);
143Ip_u1u2u3(_mthc0);
142Ip_u3u1u2(_mul); 144Ip_u3u1u2(_mul);
143Ip_u3u1u2(_or); 145Ip_u3u1u2(_or);
144Ip_u2u1u3(_ori); 146Ip_u2u1u3(_ori);
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4bfdb9d4c186..89c22433b1c6 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -108,9 +108,10 @@ enum rt_op {
108 */ 108 */
109enum cop_op { 109enum cop_op {
110 mfc_op = 0x00, dmfc_op = 0x01, 110 mfc_op = 0x00, dmfc_op = 0x01,
111 cfc_op = 0x02, mfhc_op = 0x03, 111 cfc_op = 0x02, mfhc0_op = 0x02,
112 mtc_op = 0x04, dmtc_op = 0x05, 112 mfhc_op = 0x03, mtc_op = 0x04,
113 ctc_op = 0x06, mthc_op = 0x07, 113 dmtc_op = 0x05, ctc_op = 0x06,
114 mthc0_op = 0x06, mthc_op = 0x07,
114 bc_op = 0x08, cop_op = 0x10, 115 bc_op = 0x08, cop_op = 0x10,
115 copm_op = 0x18 116 copm_op = 0x18
116}; 117};
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index 76eafcb79c89..ef796f97b996 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -32,7 +32,7 @@ static void __init jz4740_detect_mem(void)
32{ 32{
33 void __iomem *jz_emc_base; 33 void __iomem *jz_emc_base;
34 u32 ctrl, bus, bank, rows, cols; 34 u32 ctrl, bus, bank, rows, cols;
35 phys_t size; 35 phys_addr_t size;
36 36
37 jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100); 37 jz_emc_base = ioremap(JZ4740_EMC_BASE_ADDR, 0x100);
38 ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL); 38 ctrl = readl(jz_emc_base + JZ4740_EMC_SDRAM_CTRL);
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 008a2fed0584..92987d1bbe5f 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -4,9 +4,10 @@
4 4
5extra-y := head.o vmlinux.lds 5extra-y := head.o vmlinux.lds
6 6
7obj-y += cpu-probe.o branch.o entry.o genex.o idle.o irq.o process.o \ 7obj-y += cpu-probe.o branch.o elf.o entry.o genex.o idle.o irq.o \
8 prom.o ptrace.o reset.o setup.o signal.o syscall.o \ 8 process.o prom.o ptrace.o reset.o setup.o signal.o \
9 time.o topology.o traps.o unaligned.o watch.o vdso.o 9 syscall.o time.o topology.o traps.o unaligned.o watch.o \
10 vdso.o
10 11
11ifdef CONFIG_FUNCTION_TRACER 12ifdef CONFIG_FUNCTION_TRACER
12CFLAGS_REMOVE_ftrace.o = -pg 13CFLAGS_REMOVE_ftrace.o = -pg
@@ -18,12 +19,10 @@ endif
18obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o 19obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
19obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o 20obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
20obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o 21obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
21obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
22obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o 22obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
23obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o 23obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
24obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o 24obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
25obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o 25obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
26obj-$(CONFIG_CSRC_GIC) += csrc-gic.o
27obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o 26obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
28obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o 27obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
29obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o 28obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
@@ -68,7 +67,6 @@ obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o
68obj-$(CONFIG_MIPS_MSC) += irq-msc01.o 67obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
69obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o 68obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
70obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o 69obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
71obj-$(CONFIG_IRQ_GIC) += irq-gic.o
72 70
73obj-$(CONFIG_KPROBES) += kprobes.o 71obj-$(CONFIG_KPROBES) += kprobes.o
74obj-$(CONFIG_32BIT) += scall32-o32.o 72obj-$(CONFIG_32BIT) += scall32-o32.o
diff --git a/arch/mips/kernel/cevt-gic.c b/arch/mips/kernel/cevt-gic.c
deleted file mode 100644
index 6093716980b9..000000000000
--- a/arch/mips/kernel/cevt-gic.c
+++ /dev/null
@@ -1,105 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2013 Imagination Technologies Ltd.
7 */
8#include <linux/clockchips.h>
9#include <linux/interrupt.h>
10#include <linux/percpu.h>
11#include <linux/smp.h>
12#include <linux/irq.h>
13
14#include <asm/time.h>
15#include <asm/gic.h>
16#include <asm/mips-boards/maltaint.h>
17
18DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
19int gic_timer_irq_installed;
20
21
22static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
23{
24 u64 cnt;
25 int res;
26
27 cnt = gic_read_count();
28 cnt += (u64)delta;
29 gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
30 res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
31 return res;
32}
33
34void gic_set_clock_mode(enum clock_event_mode mode,
35 struct clock_event_device *evt)
36{
37 /* Nothing to do ... */
38}
39
40irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
41{
42 struct clock_event_device *cd;
43 int cpu = smp_processor_id();
44
45 gic_write_compare(gic_read_compare());
46 cd = &per_cpu(gic_clockevent_device, cpu);
47 cd->event_handler(cd);
48 return IRQ_HANDLED;
49}
50
51struct irqaction gic_compare_irqaction = {
52 .handler = gic_compare_interrupt,
53 .flags = IRQF_PERCPU | IRQF_TIMER,
54 .name = "timer",
55};
56
57
58void gic_event_handler(struct clock_event_device *dev)
59{
60}
61
62int gic_clockevent_init(void)
63{
64 unsigned int cpu = smp_processor_id();
65 struct clock_event_device *cd;
66 unsigned int irq;
67
68 if (!cpu_has_counter || !gic_frequency)
69 return -ENXIO;
70
71 irq = MIPS_GIC_IRQ_BASE;
72
73 cd = &per_cpu(gic_clockevent_device, cpu);
74
75 cd->name = "MIPS GIC";
76 cd->features = CLOCK_EVT_FEAT_ONESHOT |
77 CLOCK_EVT_FEAT_C3STOP;
78
79 clockevent_set_clock(cd, gic_frequency);
80
81 /* Calculate the min / max delta */
82 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
83 cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
84
85 cd->rating = 300;
86 cd->irq = irq;
87 cd->cpumask = cpumask_of(cpu);
88 cd->set_next_event = gic_next_event;
89 cd->set_mode = gic_set_clock_mode;
90 cd->event_handler = gic_event_handler;
91
92 clockevents_register_device(cd);
93
94 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_MAP), 0x80000002);
95 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), GIC_VPE_SMASK_CMP_MSK);
96
97 if (gic_timer_irq_installed)
98 return 0;
99
100 gic_timer_irq_installed = 1;
101
102 setup_irq(irq, &gic_compare_irqaction);
103 irq_set_handler(irq, handle_percpu_irq);
104 return 0;
105}
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index bc127e22fdab..6acaad0480af 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -11,10 +11,10 @@
11#include <linux/percpu.h> 11#include <linux/percpu.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <linux/irq.h> 13#include <linux/irq.h>
14#include <linux/irqchip/mips-gic.h>
14 15
15#include <asm/time.h> 16#include <asm/time.h>
16#include <asm/cevt-r4k.h> 17#include <asm/cevt-r4k.h>
17#include <asm/gic.h>
18 18
19static int mips_next_event(unsigned long delta, 19static int mips_next_event(unsigned long delta,
20 struct clock_event_device *evt) 20 struct clock_event_device *evt)
@@ -85,8 +85,8 @@ void mips_event_handler(struct clock_event_device *dev)
85 */ 85 */
86static int c0_compare_int_pending(void) 86static int c0_compare_int_pending(void)
87{ 87{
88#ifdef CONFIG_IRQ_GIC 88#ifdef CONFIG_MIPS_GIC
89 if (cpu_has_veic) 89 if (gic_present)
90 return gic_get_timer_pending(); 90 return gic_get_timer_pending();
91#endif 91#endif
92 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); 92 return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index dc49cf30c2db..5342674842f5 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -69,6 +69,63 @@ static int __init htw_disable(char *s)
69 69
70__setup("nohtw", htw_disable); 70__setup("nohtw", htw_disable);
71 71
72static int mips_ftlb_disabled;
73static int mips_has_ftlb_configured;
74
75static void set_ftlb_enable(struct cpuinfo_mips *c, int enable);
76
77static int __init ftlb_disable(char *s)
78{
79 unsigned int config4, mmuextdef;
80
81 /*
82 * If the core hasn't done any FTLB configuration, there is nothing
83 * for us to do here.
84 */
85 if (!mips_has_ftlb_configured)
86 return 1;
87
88 /* Disable it in the boot cpu */
89 set_ftlb_enable(&cpu_data[0], 0);
90
91 back_to_back_c0_hazard();
92
93 config4 = read_c0_config4();
94
95 /* Check that FTLB has been disabled */
96 mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
97 /* MMUSIZEEXT == VTLB ON, FTLB OFF */
98 if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) {
99 /* This should never happen */
100 pr_warn("FTLB could not be disabled!\n");
101 return 1;
102 }
103
104 mips_ftlb_disabled = 1;
105 mips_has_ftlb_configured = 0;
106
107 /*
108 * noftlb is mainly used for debug purposes so print
109 * an informative message instead of using pr_debug()
110 */
111 pr_info("FTLB has been disabled\n");
112
113 /*
114 * Some of these bits are duplicated in the decode_config4.
115 * MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case
116 * once FTLB has been disabled so undo what decode_config4 did.
117 */
118 cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways *
119 cpu_data[0].tlbsizeftlbsets;
120 cpu_data[0].tlbsizeftlbsets = 0;
121 cpu_data[0].tlbsizeftlbways = 0;
122
123 return 1;
124}
125
126__setup("noftlb", ftlb_disable);
127
128
72static inline void check_errata(void) 129static inline void check_errata(void)
73{ 130{
74 struct cpuinfo_mips *c = &current_cpu_data; 131 struct cpuinfo_mips *c = &current_cpu_data;
@@ -140,7 +197,7 @@ static inline unsigned long cpu_get_fpu_id(void)
140 */ 197 */
141static inline int __cpu_has_fpu(void) 198static inline int __cpu_has_fpu(void)
142{ 199{
143 return ((cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE); 200 return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE;
144} 201}
145 202
146static inline unsigned long cpu_get_msa_id(void) 203static inline unsigned long cpu_get_msa_id(void)
@@ -399,6 +456,8 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
399 ftlb_page = MIPS_CONF4_VFTLBPAGESIZE; 456 ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
400 /* fall through */ 457 /* fall through */
401 case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: 458 case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
459 if (mips_ftlb_disabled)
460 break;
402 newcf4 = (config4 & ~ftlb_page) | 461 newcf4 = (config4 & ~ftlb_page) |
403 (page_size_ftlb(mmuextdef) << 462 (page_size_ftlb(mmuextdef) <<
404 MIPS_CONF4_FTLBPAGESIZE_SHIFT); 463 MIPS_CONF4_FTLBPAGESIZE_SHIFT);
@@ -418,6 +477,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
418 c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >> 477 c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
419 MIPS_CONF4_FTLBWAYS_SHIFT) + 2; 478 MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
420 c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets; 479 c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
480 mips_has_ftlb_configured = 1;
421 break; 481 break;
422 } 482 }
423 } 483 }
@@ -432,7 +492,7 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
432 unsigned int config5; 492 unsigned int config5;
433 493
434 config5 = read_c0_config5(); 494 config5 = read_c0_config5();
435 config5 &= ~MIPS_CONF5_UFR; 495 config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
436 write_c0_config5(config5); 496 write_c0_config5(config5);
437 497
438 if (config5 & MIPS_CONF5_EVA) 498 if (config5 & MIPS_CONF5_EVA)
@@ -453,8 +513,8 @@ static void decode_configs(struct cpuinfo_mips *c)
453 513
454 c->scache.flags = MIPS_CACHE_NOT_PRESENT; 514 c->scache.flags = MIPS_CACHE_NOT_PRESENT;
455 515
456 /* Enable FTLB if present */ 516 /* Enable FTLB if present and not disabled */
457 set_ftlb_enable(c, 1); 517 set_ftlb_enable(c, !mips_ftlb_disabled);
458 518
459 ok = decode_config0(c); /* Read Config registers. */ 519 ok = decode_config0(c); /* Read Config registers. */
460 BUG_ON(!ok); /* Arch spec violation! */ 520 BUG_ON(!ok); /* Arch spec violation! */
@@ -1058,6 +1118,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
1058 break; 1118 break;
1059 } 1119 }
1060 case PRID_IMP_BMIPS5000: 1120 case PRID_IMP_BMIPS5000:
1121 case PRID_IMP_BMIPS5200:
1061 c->cputype = CPU_BMIPS5000; 1122 c->cputype = CPU_BMIPS5000;
1062 __cpu_name[cpu] = "Broadcom BMIPS5000"; 1123 __cpu_name[cpu] = "Broadcom BMIPS5000";
1063 set_elf_platform(cpu, "bmips5000"); 1124 set_elf_platform(cpu, "bmips5000");
@@ -1288,6 +1349,8 @@ void cpu_probe(void)
1288 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) { 1349 MIPS_CPU_ISA_M64R1 | MIPS_CPU_ISA_M64R2)) {
1289 if (c->fpu_id & MIPS_FPIR_3D) 1350 if (c->fpu_id & MIPS_FPIR_3D)
1290 c->ases |= MIPS_ASE_MIPS3D; 1351 c->ases |= MIPS_ASE_MIPS3D;
1352 if (c->fpu_id & MIPS_FPIR_FREP)
1353 c->options |= MIPS_CPU_FRE;
1291 } 1354 }
1292 } 1355 }
1293 1356
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
index f291cf99b03a..6fe7790e5868 100644
--- a/arch/mips/kernel/crash_dump.c
+++ b/arch/mips/kernel/crash_dump.c
@@ -38,7 +38,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
38 kunmap_atomic(vaddr); 38 kunmap_atomic(vaddr);
39 } else { 39 } else {
40 if (!kdump_buf_page) { 40 if (!kdump_buf_page) {
41 pr_warning("Kdump: Kdump buffer page not allocated\n"); 41 pr_warn("Kdump: Kdump buffer page not allocated\n");
42 42
43 return -EFAULT; 43 return -EFAULT;
44 } 44 }
@@ -57,7 +57,7 @@ static int __init kdump_buf_page_init(void)
57 57
58 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); 58 kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL);
59 if (!kdump_buf_page) { 59 if (!kdump_buf_page) {
60 pr_warning("Kdump: Failed to allocate kdump buffer page\n"); 60 pr_warn("Kdump: Failed to allocate kdump buffer page\n");
61 ret = -ENOMEM; 61 ret = -ENOMEM;
62 } 62 }
63 63
diff --git a/arch/mips/kernel/csrc-gic.c b/arch/mips/kernel/csrc-gic.c
deleted file mode 100644
index e02620901117..000000000000
--- a/arch/mips/kernel/csrc-gic.c
+++ /dev/null
@@ -1,40 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/init.h>
9#include <linux/time.h>
10
11#include <asm/gic.h>
12
13static cycle_t gic_hpt_read(struct clocksource *cs)
14{
15 return gic_read_count();
16}
17
18static struct clocksource gic_clocksource = {
19 .name = "GIC",
20 .read = gic_hpt_read,
21 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
22};
23
24void __init gic_clocksource_init(unsigned int frequency)
25{
26 unsigned int config, bits;
27
28 /* Calculate the clocksource mask. */
29 GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), config);
30 bits = 32 + ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
31 (GIC_SH_CONFIG_COUNTBITS_SHF - 2));
32
33 /* Set clocksource mask. */
34 gic_clocksource.mask = CLOCKSOURCE_MASK(bits);
35
36 /* Calculate a somewhat reasonable rating value. */
37 gic_clocksource.rating = 200 + frequency / 10000000;
38
39 clocksource_register_hz(&gic_clocksource, frequency);
40}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
new file mode 100644
index 000000000000..c92b15df6893
--- /dev/null
+++ b/arch/mips/kernel/elf.c
@@ -0,0 +1,191 @@
1/*
2 * Copyright (C) 2014 Imagination Technologies
3 * Author: Paul Burton <paul.burton@imgtec.com>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
9 */
10
11#include <linux/elf.h>
12#include <linux/sched.h>
13
14enum {
15 FP_ERROR = -1,
16 FP_DOUBLE_64A = -2,
17};
18
19int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
20 bool is_interp, struct arch_elf_state *state)
21{
22 struct elfhdr *ehdr = _ehdr;
23 struct elf_phdr *phdr = _phdr;
24 struct mips_elf_abiflags_v0 abiflags;
25 int ret;
26
27 if (config_enabled(CONFIG_64BIT) &&
28 (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
29 return 0;
30 if (phdr->p_type != PT_MIPS_ABIFLAGS)
31 return 0;
32 if (phdr->p_filesz < sizeof(abiflags))
33 return -EINVAL;
34
35 ret = kernel_read(elf, phdr->p_offset, (char *)&abiflags,
36 sizeof(abiflags));
37 if (ret < 0)
38 return ret;
39 if (ret != sizeof(abiflags))
40 return -EIO;
41
42 /* Record the required FP ABIs for use by mips_check_elf */
43 if (is_interp)
44 state->interp_fp_abi = abiflags.fp_abi;
45 else
46 state->fp_abi = abiflags.fp_abi;
47
48 return 0;
49}
50
51static inline unsigned get_fp_abi(struct elfhdr *ehdr, int in_abi)
52{
53 /* If the ABI requirement is provided, simply return that */
54 if (in_abi != -1)
55 return in_abi;
56
57 /* If the EF_MIPS_FP64 flag was set, return MIPS_ABI_FP_64 */
58 if (ehdr->e_flags & EF_MIPS_FP64)
59 return MIPS_ABI_FP_64;
60
61 /* Default to MIPS_ABI_FP_DOUBLE */
62 return MIPS_ABI_FP_DOUBLE;
63}
64
65int arch_check_elf(void *_ehdr, bool has_interpreter,
66 struct arch_elf_state *state)
67{
68 struct elfhdr *ehdr = _ehdr;
69 unsigned fp_abi, interp_fp_abi, abi0, abi1;
70
71 /* Ignore non-O32 binaries */
72 if (config_enabled(CONFIG_64BIT) &&
73 (ehdr->e_ident[EI_CLASS] != ELFCLASS32))
74 return 0;
75
76 fp_abi = get_fp_abi(ehdr, state->fp_abi);
77
78 if (has_interpreter) {
79 interp_fp_abi = get_fp_abi(ehdr, state->interp_fp_abi);
80
81 abi0 = min(fp_abi, interp_fp_abi);
82 abi1 = max(fp_abi, interp_fp_abi);
83 } else {
84 abi0 = abi1 = fp_abi;
85 }
86
87 state->overall_abi = FP_ERROR;
88
89 if (abi0 == abi1) {
90 state->overall_abi = abi0;
91 } else if (abi0 == MIPS_ABI_FP_ANY) {
92 state->overall_abi = abi1;
93 } else if (abi0 == MIPS_ABI_FP_DOUBLE) {
94 switch (abi1) {
95 case MIPS_ABI_FP_XX:
96 state->overall_abi = MIPS_ABI_FP_DOUBLE;
97 break;
98
99 case MIPS_ABI_FP_64A:
100 state->overall_abi = FP_DOUBLE_64A;
101 break;
102 }
103 } else if (abi0 == MIPS_ABI_FP_SINGLE ||
104 abi0 == MIPS_ABI_FP_SOFT) {
105 /* Cannot link with other ABIs */
106 } else if (abi0 == MIPS_ABI_FP_OLD_64) {
107 switch (abi1) {
108 case MIPS_ABI_FP_XX:
109 case MIPS_ABI_FP_64:
110 case MIPS_ABI_FP_64A:
111 state->overall_abi = MIPS_ABI_FP_64;
112 break;
113 }
114 } else if (abi0 == MIPS_ABI_FP_XX ||
115 abi0 == MIPS_ABI_FP_64 ||
116 abi0 == MIPS_ABI_FP_64A) {
117 state->overall_abi = MIPS_ABI_FP_64;
118 }
119
120 switch (state->overall_abi) {
121 case MIPS_ABI_FP_64:
122 case MIPS_ABI_FP_64A:
123 case FP_DOUBLE_64A:
124 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
125 return -ELIBBAD;
126 break;
127
128 case FP_ERROR:
129 return -ELIBBAD;
130 }
131
132 return 0;
133}
134
135void mips_set_personality_fp(struct arch_elf_state *state)
136{
137 if (config_enabled(CONFIG_FP32XX_HYBRID_FPRS)) {
138 /*
139 * Use hybrid FPRs for all code which can correctly execute
140 * with that mode.
141 */
142 switch (state->overall_abi) {
143 case MIPS_ABI_FP_DOUBLE:
144 case MIPS_ABI_FP_SINGLE:
145 case MIPS_ABI_FP_SOFT:
146 case MIPS_ABI_FP_XX:
147 case MIPS_ABI_FP_ANY:
148 /* FR=1, FRE=1 */
149 clear_thread_flag(TIF_32BIT_FPREGS);
150 set_thread_flag(TIF_HYBRID_FPREGS);
151 return;
152 }
153 }
154
155 switch (state->overall_abi) {
156 case MIPS_ABI_FP_DOUBLE:
157 case MIPS_ABI_FP_SINGLE:
158 case MIPS_ABI_FP_SOFT:
159 /* FR=0 */
160 set_thread_flag(TIF_32BIT_FPREGS);
161 clear_thread_flag(TIF_HYBRID_FPREGS);
162 break;
163
164 case FP_DOUBLE_64A:
165 /* FR=1, FRE=1 */
166 clear_thread_flag(TIF_32BIT_FPREGS);
167 set_thread_flag(TIF_HYBRID_FPREGS);
168 break;
169
170 case MIPS_ABI_FP_64:
171 case MIPS_ABI_FP_64A:
172 /* FR=1, FRE=0 */
173 clear_thread_flag(TIF_32BIT_FPREGS);
174 clear_thread_flag(TIF_HYBRID_FPREGS);
175 break;
176
177 case MIPS_ABI_FP_XX:
178 case MIPS_ABI_FP_ANY:
179 if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
180 set_thread_flag(TIF_32BIT_FPREGS);
181 else
182 clear_thread_flag(TIF_32BIT_FPREGS);
183
184 clear_thread_flag(TIF_HYBRID_FPREGS);
185 break;
186
187 default:
188 case FP_ERROR:
189 BUG();
190 }
191}
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index 50b364897dda..a74ec3ae557c 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/ioport.h> 13#include <linux/ioport.h>
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/irqdomain.h>
15#include <linux/kernel.h> 16#include <linux/kernel.h>
16#include <linux/spinlock.h> 17#include <linux/spinlock.h>
17#include <linux/syscore_ops.h> 18#include <linux/syscore_ops.h>
@@ -308,6 +309,19 @@ static struct resource pic2_io_resource = {
308 .flags = IORESOURCE_BUSY 309 .flags = IORESOURCE_BUSY
309}; 310};
310 311
312static int i8259A_irq_domain_map(struct irq_domain *d, unsigned int virq,
313 irq_hw_number_t hw)
314{
315 irq_set_chip_and_handler(virq, &i8259A_chip, handle_level_irq);
316 irq_set_probe(virq);
317 return 0;
318}
319
320static struct irq_domain_ops i8259A_ops = {
321 .map = i8259A_irq_domain_map,
322 .xlate = irq_domain_xlate_onecell,
323};
324
311/* 325/*
312 * On systems with i8259-style interrupt controllers we assume for 326 * On systems with i8259-style interrupt controllers we assume for
313 * driver compatibility reasons interrupts 0 - 15 to be the i8259 327 * driver compatibility reasons interrupts 0 - 15 to be the i8259
@@ -315,17 +329,17 @@ static struct resource pic2_io_resource = {
315 */ 329 */
316void __init init_i8259_irqs(void) 330void __init init_i8259_irqs(void)
317{ 331{
318 int i; 332 struct irq_domain *domain;
319 333
320 insert_resource(&ioport_resource, &pic1_io_resource); 334 insert_resource(&ioport_resource, &pic1_io_resource);
321 insert_resource(&ioport_resource, &pic2_io_resource); 335 insert_resource(&ioport_resource, &pic2_io_resource);
322 336
323 init_8259A(0); 337 init_8259A(0);
324 338
325 for (i = I8259A_IRQ_BASE; i < I8259A_IRQ_BASE + 16; i++) { 339 domain = irq_domain_add_legacy(NULL, 16, I8259A_IRQ_BASE, 0,
326 irq_set_chip_and_handler(i, &i8259A_chip, handle_level_irq); 340 &i8259A_ops, NULL);
327 irq_set_probe(i); 341 if (!domain)
328 } 342 panic("Failed to add i8259 IRQ domain");
329 343
330 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2); 344 setup_irq(I8259A_IRQ_BASE + PIC_CASCADE_IR, &irq2);
331} 345}
diff --git a/arch/mips/kernel/irq-gic.c b/arch/mips/kernel/irq-gic.c
deleted file mode 100644
index 9e9d8b9a5b97..000000000000
--- a/arch/mips/kernel/irq-gic.c
+++ /dev/null
@@ -1,402 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
9#include <linux/bitmap.h>
10#include <linux/init.h>
11#include <linux/smp.h>
12#include <linux/irq.h>
13#include <linux/clocksource.h>
14
15#include <asm/io.h>
16#include <asm/gic.h>
17#include <asm/setup.h>
18#include <asm/traps.h>
19#include <linux/hardirq.h>
20#include <asm-generic/bitops/find.h>
21
22unsigned int gic_frequency;
23unsigned int gic_present;
24unsigned long _gic_base;
25unsigned int gic_irq_base;
26unsigned int gic_irq_flags[GIC_NUM_INTRS];
27
28/* The index into this array is the vector # of the interrupt. */
29struct gic_shared_intr_map gic_shared_intr_map[GIC_NUM_INTRS];
30
31struct gic_pcpu_mask {
32 DECLARE_BITMAP(pcpu_mask, GIC_NUM_INTRS);
33};
34
35struct gic_pending_regs {
36 DECLARE_BITMAP(pending, GIC_NUM_INTRS);
37};
38
39struct gic_intrmask_regs {
40 DECLARE_BITMAP(intrmask, GIC_NUM_INTRS);
41};
42
43static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
44static struct gic_pending_regs pending_regs[NR_CPUS];
45static struct gic_intrmask_regs intrmask_regs[NR_CPUS];
46
47#if defined(CONFIG_CSRC_GIC) || defined(CONFIG_CEVT_GIC)
48cycle_t gic_read_count(void)
49{
50 unsigned int hi, hi2, lo;
51
52 do {
53 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi);
54 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), lo);
55 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_63_32), hi2);
56 } while (hi2 != hi);
57
58 return (((cycle_t) hi) << 32) + lo;
59}
60
61void gic_write_compare(cycle_t cnt)
62{
63 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
64 (int)(cnt >> 32));
65 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
66 (int)(cnt & 0xffffffff));
67}
68
69void gic_write_cpu_compare(cycle_t cnt, int cpu)
70{
71 unsigned long flags;
72
73 local_irq_save(flags);
74
75 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
76 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
77 (int)(cnt >> 32));
78 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
79 (int)(cnt & 0xffffffff));
80
81 local_irq_restore(flags);
82}
83
84cycle_t gic_read_compare(void)
85{
86 unsigned int hi, lo;
87
88 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), hi);
89 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), lo);
90
91 return (((cycle_t) hi) << 32) + lo;
92}
93#endif
94
95unsigned int gic_get_timer_pending(void)
96{
97 unsigned int vpe_pending;
98
99 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
100 GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_PEND), vpe_pending);
101 return (vpe_pending & GIC_VPE_PEND_TIMER_MSK);
102}
103
104void gic_bind_eic_interrupt(int irq, int set)
105{
106 /* Convert irq vector # to hw int # */
107 irq -= GIC_PIN_TO_VEC_OFFSET;
108
109 /* Set irq to use shadow set */
110 GICWRITE(GIC_REG_ADDR(VPE_LOCAL, GIC_VPE_EIC_SS(irq)), set);
111}
112
113void gic_send_ipi(unsigned int intr)
114{
115 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), 0x80000000 | intr);
116}
117
118static void gic_eic_irq_dispatch(void)
119{
120 unsigned int cause = read_c0_cause();
121 int irq;
122
123 irq = (cause & ST0_IM) >> STATUSB_IP2;
124 if (irq == 0)
125 irq = -1;
126
127 if (irq >= 0)
128 do_IRQ(gic_irq_base + irq);
129 else
130 spurious_interrupt();
131}
132
133static void __init vpe_local_setup(unsigned int numvpes)
134{
135 unsigned long timer_intr = GIC_INT_TMR;
136 unsigned long perf_intr = GIC_INT_PERFCTR;
137 unsigned int vpe_ctl;
138 int i;
139
140 if (cpu_has_veic) {
141 /*
142 * GIC timer interrupt -> CPU HW Int X (vector X+2) ->
143 * map to pin X+2-1 (since GIC adds 1)
144 */
145 timer_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
146 /*
147 * GIC perfcnt interrupt -> CPU HW Int X (vector X+2) ->
148 * map to pin X+2-1 (since GIC adds 1)
149 */
150 perf_intr += (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
151 }
152
153 /*
154 * Setup the default performance counter timer interrupts
155 * for all VPEs
156 */
157 for (i = 0; i < numvpes; i++) {
158 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
159
160 /* Are Interrupts locally routable? */
161 GICREAD(GIC_REG(VPE_OTHER, GIC_VPE_CTL), vpe_ctl);
162 if (vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK)
163 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
164 GIC_MAP_TO_PIN_MSK | timer_intr);
165 if (cpu_has_veic) {
166 set_vi_handler(timer_intr + GIC_PIN_TO_VEC_OFFSET,
167 gic_eic_irq_dispatch);
168 gic_shared_intr_map[timer_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_TIMER_MSK;
169 }
170
171 if (vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK)
172 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
173 GIC_MAP_TO_PIN_MSK | perf_intr);
174 if (cpu_has_veic) {
175 set_vi_handler(perf_intr + GIC_PIN_TO_VEC_OFFSET, gic_eic_irq_dispatch);
176 gic_shared_intr_map[perf_intr + GIC_PIN_TO_VEC_OFFSET].local_intr_mask |= GIC_VPE_RMASK_PERFCNT_MSK;
177 }
178 }
179}
180
181unsigned int gic_compare_int(void)
182{
183 unsigned int pending;
184
185 GICREAD(GIC_REG(VPE_LOCAL, GIC_VPE_PEND), pending);
186 if (pending & GIC_VPE_PEND_CMP_MSK)
187 return 1;
188 else
189 return 0;
190}
191
192void gic_get_int_mask(unsigned long *dst, const unsigned long *src)
193{
194 unsigned int i;
195 unsigned long *pending, *intrmask, *pcpu_mask;
196 unsigned long *pending_abs, *intrmask_abs;
197
198 /* Get per-cpu bitmaps */
199 pending = pending_regs[smp_processor_id()].pending;
200 intrmask = intrmask_regs[smp_processor_id()].intrmask;
201 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
202
203 pending_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
204 GIC_SH_PEND_31_0_OFS);
205 intrmask_abs = (unsigned long *) GIC_REG_ABS_ADDR(SHARED,
206 GIC_SH_MASK_31_0_OFS);
207
208 for (i = 0; i < BITS_TO_LONGS(GIC_NUM_INTRS); i++) {
209 GICREAD(*pending_abs, pending[i]);
210 GICREAD(*intrmask_abs, intrmask[i]);
211 pending_abs++;
212 intrmask_abs++;
213 }
214
215 bitmap_and(pending, pending, intrmask, GIC_NUM_INTRS);
216 bitmap_and(pending, pending, pcpu_mask, GIC_NUM_INTRS);
217 bitmap_and(dst, src, pending, GIC_NUM_INTRS);
218}
219
220unsigned int gic_get_int(void)
221{
222 DECLARE_BITMAP(interrupts, GIC_NUM_INTRS);
223
224 bitmap_fill(interrupts, GIC_NUM_INTRS);
225 gic_get_int_mask(interrupts, interrupts);
226
227 return find_first_bit(interrupts, GIC_NUM_INTRS);
228}
229
230static void gic_mask_irq(struct irq_data *d)
231{
232 GIC_CLR_INTR_MASK(d->irq - gic_irq_base);
233}
234
235static void gic_unmask_irq(struct irq_data *d)
236{
237 GIC_SET_INTR_MASK(d->irq - gic_irq_base);
238}
239
240#ifdef CONFIG_SMP
241static DEFINE_SPINLOCK(gic_lock);
242
243static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
244 bool force)
245{
246 unsigned int irq = (d->irq - gic_irq_base);
247 cpumask_t tmp = CPU_MASK_NONE;
248 unsigned long flags;
249 int i;
250
251 cpumask_and(&tmp, cpumask, cpu_online_mask);
252 if (cpus_empty(tmp))
253 return -1;
254
255 /* Assumption : cpumask refers to a single CPU */
256 spin_lock_irqsave(&gic_lock, flags);
257
258 /* Re-route this IRQ */
259 GIC_SH_MAP_TO_VPE_SMASK(irq, first_cpu(tmp));
260
261 /* Update the pcpu_masks */
262 for (i = 0; i < NR_CPUS; i++)
263 clear_bit(irq, pcpu_masks[i].pcpu_mask);
264 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
265
266 cpumask_copy(d->affinity, cpumask);
267 spin_unlock_irqrestore(&gic_lock, flags);
268
269 return IRQ_SET_MASK_OK_NOCOPY;
270}
271#endif
272
273static struct irq_chip gic_irq_controller = {
274 .name = "MIPS GIC",
275 .irq_ack = gic_irq_ack,
276 .irq_mask = gic_mask_irq,
277 .irq_mask_ack = gic_mask_irq,
278 .irq_unmask = gic_unmask_irq,
279 .irq_eoi = gic_finish_irq,
280#ifdef CONFIG_SMP
281 .irq_set_affinity = gic_set_affinity,
282#endif
283};
284
285static void __init gic_setup_intr(unsigned int intr, unsigned int cpu,
286 unsigned int pin, unsigned int polarity, unsigned int trigtype,
287 unsigned int flags)
288{
289 struct gic_shared_intr_map *map_ptr;
290
291 /* Setup Intr to Pin mapping */
292 if (pin & GIC_MAP_TO_NMI_MSK) {
293 int i;
294
295 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)), pin);
296 /* FIXME: hack to route NMI to all cpu's */
297 for (i = 0; i < NR_CPUS; i += 32) {
298 GICWRITE(GIC_REG_ADDR(SHARED,
299 GIC_SH_MAP_TO_VPE_REG_OFF(intr, i)),
300 0xffffffff);
301 }
302 } else {
303 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_PIN(intr)),
304 GIC_MAP_TO_PIN_MSK | pin);
305 /* Setup Intr to CPU mapping */
306 GIC_SH_MAP_TO_VPE_SMASK(intr, cpu);
307 if (cpu_has_veic) {
308 set_vi_handler(pin + GIC_PIN_TO_VEC_OFFSET,
309 gic_eic_irq_dispatch);
310 map_ptr = &gic_shared_intr_map[pin + GIC_PIN_TO_VEC_OFFSET];
311 if (map_ptr->num_shared_intr >= GIC_MAX_SHARED_INTR)
312 BUG();
313 map_ptr->intr_list[map_ptr->num_shared_intr++] = intr;
314 }
315 }
316
317 /* Setup Intr Polarity */
318 GIC_SET_POLARITY(intr, polarity);
319
320 /* Setup Intr Trigger Type */
321 GIC_SET_TRIGGER(intr, trigtype);
322
323 /* Init Intr Masks */
324 GIC_CLR_INTR_MASK(intr);
325
326 /* Initialise per-cpu Interrupt software masks */
327 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
328
329 if ((flags & GIC_FLAG_TRANSPARENT) && (cpu_has_veic == 0))
330 GIC_SET_INTR_MASK(intr);
331 if (trigtype == GIC_TRIG_EDGE)
332 gic_irq_flags[intr] |= GIC_TRIG_EDGE;
333}
334
335static void __init gic_basic_init(int numintrs, int numvpes,
336 struct gic_intr_map *intrmap, int mapsize)
337{
338 unsigned int i, cpu;
339 unsigned int pin_offset = 0;
340
341 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
342
343 /* Setup defaults */
344 for (i = 0; i < numintrs; i++) {
345 GIC_SET_POLARITY(i, GIC_POL_POS);
346 GIC_SET_TRIGGER(i, GIC_TRIG_LEVEL);
347 GIC_CLR_INTR_MASK(i);
348 if (i < GIC_NUM_INTRS) {
349 gic_irq_flags[i] = 0;
350 gic_shared_intr_map[i].num_shared_intr = 0;
351 gic_shared_intr_map[i].local_intr_mask = 0;
352 }
353 }
354
355 /*
356 * In EIC mode, the HW_INT# is offset by (2-1). Need to subtract
357 * one because the GIC will add one (since 0=no intr).
358 */
359 if (cpu_has_veic)
360 pin_offset = (GIC_CPU_TO_VEC_OFFSET - GIC_PIN_TO_VEC_OFFSET);
361
362 /* Setup specifics */
363 for (i = 0; i < mapsize; i++) {
364 cpu = intrmap[i].cpunum;
365 if (cpu == GIC_UNUSED)
366 continue;
367 gic_setup_intr(i,
368 intrmap[i].cpunum,
369 intrmap[i].pin + pin_offset,
370 intrmap[i].polarity,
371 intrmap[i].trigtype,
372 intrmap[i].flags);
373 }
374
375 vpe_local_setup(numvpes);
376}
377
378void __init gic_init(unsigned long gic_base_addr,
379 unsigned long gic_addrspace_size,
380 struct gic_intr_map *intr_map, unsigned int intr_map_size,
381 unsigned int irqbase)
382{
383 unsigned int gicconfig;
384 int numvpes, numintrs;
385
386 _gic_base = (unsigned long) ioremap_nocache(gic_base_addr,
387 gic_addrspace_size);
388 gic_irq_base = irqbase;
389
390 GICREAD(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
391 numintrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
392 GIC_SH_CONFIG_NUMINTRS_SHF;
393 numintrs = ((numintrs + 1) * 8);
394
395 numvpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
396 GIC_SH_CONFIG_NUMVPES_SHF;
397 numvpes = numvpes + 1;
398
399 gic_basic_init(numintrs, numvpes, intr_map, intr_map_size);
400
401 gic_platform_init(numintrs, &gic_irq_controller);
402}
diff --git a/arch/mips/kernel/irq_cpu.c b/arch/mips/kernel/irq_cpu.c
index e498f2b3646a..590c2c980fd3 100644
--- a/arch/mips/kernel/irq_cpu.c
+++ b/arch/mips/kernel/irq_cpu.c
@@ -36,6 +36,7 @@
36#include <asm/irq_cpu.h> 36#include <asm/irq_cpu.h>
37#include <asm/mipsregs.h> 37#include <asm/mipsregs.h>
38#include <asm/mipsmtregs.h> 38#include <asm/mipsmtregs.h>
39#include <asm/setup.h>
39 40
40static inline void unmask_mips_irq(struct irq_data *d) 41static inline void unmask_mips_irq(struct irq_data *d)
41{ 42{
@@ -94,28 +95,24 @@ static struct irq_chip mips_mt_cpu_irq_controller = {
94 .irq_eoi = unmask_mips_irq, 95 .irq_eoi = unmask_mips_irq,
95}; 96};
96 97
97void __init mips_cpu_irq_init(void) 98asmlinkage void __weak plat_irq_dispatch(void)
98{ 99{
99 int irq_base = MIPS_CPU_IRQ_BASE; 100 unsigned long pending = read_c0_cause() & read_c0_status() & ST0_IM;
100 int i; 101 int irq;
101 102
102 /* Mask interrupts. */ 103 if (!pending) {
103 clear_c0_status(ST0_IM); 104 spurious_interrupt();
104 clear_c0_cause(CAUSEF_IP); 105 return;
105 106 }
106 /* Software interrupts are used for MT/CMT IPI */
107 for (i = irq_base; i < irq_base + 2; i++)
108 irq_set_chip_and_handler(i, cpu_has_mipsmt ?
109 &mips_mt_cpu_irq_controller :
110 &mips_cpu_irq_controller,
111 handle_percpu_irq);
112 107
113 for (i = irq_base + 2; i < irq_base + 8; i++) 108 pending >>= CAUSEB_IP;
114 irq_set_chip_and_handler(i, &mips_cpu_irq_controller, 109 while (pending) {
115 handle_percpu_irq); 110 irq = fls(pending) - 1;
111 do_IRQ(MIPS_CPU_IRQ_BASE + irq);
112 pending &= ~BIT(irq);
113 }
116} 114}
117 115
118#ifdef CONFIG_IRQ_DOMAIN
119static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq, 116static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
120 irq_hw_number_t hw) 117 irq_hw_number_t hw)
121{ 118{
@@ -128,6 +125,9 @@ static int mips_cpu_intc_map(struct irq_domain *d, unsigned int irq,
128 chip = &mips_cpu_irq_controller; 125 chip = &mips_cpu_irq_controller;
129 } 126 }
130 127
128 if (cpu_has_vint)
129 set_vi_handler(hw, plat_irq_dispatch);
130
131 irq_set_chip_and_handler(irq, chip, handle_percpu_irq); 131 irq_set_chip_and_handler(irq, chip, handle_percpu_irq);
132 132
133 return 0; 133 return 0;
@@ -138,8 +138,7 @@ static const struct irq_domain_ops mips_cpu_intc_irq_domain_ops = {
138 .xlate = irq_domain_xlate_onecell, 138 .xlate = irq_domain_xlate_onecell,
139}; 139};
140 140
141int __init mips_cpu_intc_init(struct device_node *of_node, 141static void __init __mips_cpu_irq_init(struct device_node *of_node)
142 struct device_node *parent)
143{ 142{
144 struct irq_domain *domain; 143 struct irq_domain *domain;
145 144
@@ -151,7 +150,16 @@ int __init mips_cpu_intc_init(struct device_node *of_node,
151 &mips_cpu_intc_irq_domain_ops, NULL); 150 &mips_cpu_intc_irq_domain_ops, NULL);
152 if (!domain) 151 if (!domain)
153 panic("Failed to add irqdomain for MIPS CPU"); 152 panic("Failed to add irqdomain for MIPS CPU");
153}
154 154
155void __init mips_cpu_irq_init(void)
156{
157 __mips_cpu_irq_init(NULL);
158}
159
160int __init mips_cpu_irq_of_init(struct device_node *of_node,
161 struct device_node *parent)
162{
163 __mips_cpu_irq_init(of_node);
155 return 0; 164 return 0;
156} 165}
157#endif /* CONFIG_IRQ_DOMAIN */
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index f76f7a08412d..85bbe9b96759 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -16,7 +16,7 @@
16void __iomem *mips_cm_base; 16void __iomem *mips_cm_base;
17void __iomem *mips_cm_l2sync_base; 17void __iomem *mips_cm_l2sync_base;
18 18
19phys_t __mips_cm_phys_base(void) 19phys_addr_t __mips_cm_phys_base(void)
20{ 20{
21 u32 config3 = read_c0_config3(); 21 u32 config3 = read_c0_config3();
22 u32 cmgcr; 22 u32 cmgcr;
@@ -30,10 +30,10 @@ phys_t __mips_cm_phys_base(void)
30 return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); 30 return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
31} 31}
32 32
33phys_t mips_cm_phys_base(void) 33phys_addr_t mips_cm_phys_base(void)
34 __attribute__((weak, alias("__mips_cm_phys_base"))); 34 __attribute__((weak, alias("__mips_cm_phys_base")));
35 35
36phys_t __mips_cm_l2sync_phys_base(void) 36phys_addr_t __mips_cm_l2sync_phys_base(void)
37{ 37{
38 u32 base_reg; 38 u32 base_reg;
39 39
@@ -49,13 +49,13 @@ phys_t __mips_cm_l2sync_phys_base(void)
49 return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; 49 return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
50} 50}
51 51
52phys_t mips_cm_l2sync_phys_base(void) 52phys_addr_t mips_cm_l2sync_phys_base(void)
53 __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); 53 __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
54 54
55static void mips_cm_probe_l2sync(void) 55static void mips_cm_probe_l2sync(void)
56{ 56{
57 unsigned major_rev; 57 unsigned major_rev;
58 phys_t addr; 58 phys_addr_t addr;
59 59
60 /* L2-only sync was introduced with CM major revision 6 */ 60 /* L2-only sync was introduced with CM major revision 6 */
61 major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >> 61 major_rev = (read_gcr_rev() & CM_GCR_REV_MAJOR_MSK) >>
@@ -78,7 +78,7 @@ static void mips_cm_probe_l2sync(void)
78 78
79int mips_cm_probe(void) 79int mips_cm_probe(void)
80{ 80{
81 phys_t addr; 81 phys_addr_t addr;
82 u32 base_reg; 82 u32 base_reg;
83 83
84 addr = mips_cm_phys_base(); 84 addr = mips_cm_phys_base();
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
index ba473608a347..11964501c4b0 100644
--- a/arch/mips/kernel/mips-cpc.c
+++ b/arch/mips/kernel/mips-cpc.c
@@ -21,7 +21,7 @@ static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
21 21
22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); 22static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
23 23
24phys_t __weak mips_cpc_phys_base(void) 24phys_addr_t __weak mips_cpc_phys_base(void)
25{ 25{
26 u32 cpc_base; 26 u32 cpc_base;
27 27
@@ -44,7 +44,7 @@ phys_t __weak mips_cpc_phys_base(void)
44 44
45int mips_cpc_probe(void) 45int mips_cpc_probe(void)
46{ 46{
47 phys_t addr; 47 phys_addr_t addr;
48 unsigned cpu; 48 unsigned cpu;
49 49
50 for_each_possible_cpu(cpu) 50 for_each_possible_cpu(cpu)
diff --git a/arch/mips/kernel/mips_ksyms.c b/arch/mips/kernel/mips_ksyms.c
index 2607c3a4ff7e..17eaf0cf760c 100644
--- a/arch/mips/kernel/mips_ksyms.c
+++ b/arch/mips/kernel/mips_ksyms.c
@@ -24,9 +24,7 @@ extern long __strncpy_from_user_nocheck_asm(char *__to,
24 const char *__from, long __len); 24 const char *__from, long __len);
25extern long __strncpy_from_user_asm(char *__to, const char *__from, 25extern long __strncpy_from_user_asm(char *__to, const char *__from,
26 long __len); 26 long __len);
27extern long __strlen_kernel_nocheck_asm(const char *s);
28extern long __strlen_kernel_asm(const char *s); 27extern long __strlen_kernel_asm(const char *s);
29extern long __strlen_user_nocheck_asm(const char *s);
30extern long __strlen_user_asm(const char *s); 28extern long __strlen_user_asm(const char *s);
31extern long __strnlen_kernel_nocheck_asm(const char *s); 29extern long __strnlen_kernel_nocheck_asm(const char *s);
32extern long __strnlen_kernel_asm(const char *s); 30extern long __strnlen_kernel_asm(const char *s);
@@ -62,9 +60,7 @@ EXPORT_SYMBOL(__strncpy_from_kernel_nocheck_asm);
62EXPORT_SYMBOL(__strncpy_from_kernel_asm); 60EXPORT_SYMBOL(__strncpy_from_kernel_asm);
63EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm); 61EXPORT_SYMBOL(__strncpy_from_user_nocheck_asm);
64EXPORT_SYMBOL(__strncpy_from_user_asm); 62EXPORT_SYMBOL(__strncpy_from_user_asm);
65EXPORT_SYMBOL(__strlen_kernel_nocheck_asm);
66EXPORT_SYMBOL(__strlen_kernel_asm); 63EXPORT_SYMBOL(__strlen_kernel_asm);
67EXPORT_SYMBOL(__strlen_user_nocheck_asm);
68EXPORT_SYMBOL(__strlen_user_asm); 64EXPORT_SYMBOL(__strlen_user_asm);
69EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm); 65EXPORT_SYMBOL(__strnlen_kernel_nocheck_asm);
70EXPORT_SYMBOL(__strnlen_kernel_asm); 66EXPORT_SYMBOL(__strnlen_kernel_asm);
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index a8f9cdc6f8b0..9466184d0039 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -561,8 +561,8 @@ static int mipspmu_get_irq(void)
561 IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD, 561 IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD,
562 "mips_perf_pmu", NULL); 562 "mips_perf_pmu", NULL);
563 if (err) { 563 if (err) {
564 pr_warning("Unable to request IRQ%d for MIPS " 564 pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
565 "performance counters!\n", mipspmu.irq); 565 mipspmu.irq);
566 } 566 }
567 } else if (cp0_perfcount_irq < 0) { 567 } else if (cp0_perfcount_irq < 0) {
568 /* 568 /*
@@ -572,8 +572,7 @@ static int mipspmu_get_irq(void)
572 perf_irq = mipsxx_pmu_handle_shared_irq; 572 perf_irq = mipsxx_pmu_handle_shared_irq;
573 err = 0; 573 err = 0;
574 } else { 574 } else {
575 pr_warning("The platform hasn't properly defined its " 575 pr_warn("The platform hasn't properly defined its interrupt controller\n");
576 "interrupt controller.\n");
577 err = -ENOENT; 576 err = -ENOENT;
578 } 577 }
579 578
@@ -1614,22 +1613,13 @@ init_hw_perf_events(void)
1614 counters = counters_total_to_per_cpu(counters); 1613 counters = counters_total_to_per_cpu(counters);
1615#endif 1614#endif
1616 1615
1617#ifdef MSC01E_INT_BASE 1616 if (get_c0_perfcount_int)
1618 if (cpu_has_veic) { 1617 irq = get_c0_perfcount_int();
1619 /* 1618 else if ((cp0_perfcount_irq >= 0) &&
1620 * Using platform specific interrupt controller defines. 1619 (cp0_compare_irq != cp0_perfcount_irq))
1621 */ 1620 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1622 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; 1621 else
1623 } else { 1622 irq = -1;
1624#endif
1625 if ((cp0_perfcount_irq >= 0) &&
1626 (cp0_compare_irq != cp0_perfcount_irq))
1627 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1628 else
1629 irq = -1;
1630#ifdef MSC01E_INT_BASE
1631 }
1632#endif
1633 1623
1634 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; 1624 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
1635 1625
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index 636b0745d7c7..eb76434828e8 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -42,6 +42,7 @@
42#include <asm/isadep.h> 42#include <asm/isadep.h>
43#include <asm/inst.h> 43#include <asm/inst.h>
44#include <asm/stacktrace.h> 44#include <asm/stacktrace.h>
45#include <asm/irq_regs.h>
45 46
46#ifdef CONFIG_HOTPLUG_CPU 47#ifdef CONFIG_HOTPLUG_CPU
47void arch_cpu_idle_dead(void) 48void arch_cpu_idle_dead(void)
@@ -187,21 +188,21 @@ static inline int is_ra_save_ins(union mips_instruction *ip)
187 */ 188 */
188 if (mm_insn_16bit(ip->halfword[0])) { 189 if (mm_insn_16bit(ip->halfword[0])) {
189 mmi.word = (ip->halfword[0] << 16); 190 mmi.word = (ip->halfword[0] << 16);
190 return ((mmi.mm16_r5_format.opcode == mm_swsp16_op && 191 return (mmi.mm16_r5_format.opcode == mm_swsp16_op &&
191 mmi.mm16_r5_format.rt == 31) || 192 mmi.mm16_r5_format.rt == 31) ||
192 (mmi.mm16_m_format.opcode == mm_pool16c_op && 193 (mmi.mm16_m_format.opcode == mm_pool16c_op &&
193 mmi.mm16_m_format.func == mm_swm16_op)); 194 mmi.mm16_m_format.func == mm_swm16_op);
194 } 195 }
195 else { 196 else {
196 mmi.halfword[0] = ip->halfword[1]; 197 mmi.halfword[0] = ip->halfword[1];
197 mmi.halfword[1] = ip->halfword[0]; 198 mmi.halfword[1] = ip->halfword[0];
198 return ((mmi.mm_m_format.opcode == mm_pool32b_op && 199 return (mmi.mm_m_format.opcode == mm_pool32b_op &&
199 mmi.mm_m_format.rd > 9 && 200 mmi.mm_m_format.rd > 9 &&
200 mmi.mm_m_format.base == 29 && 201 mmi.mm_m_format.base == 29 &&
201 mmi.mm_m_format.func == mm_swm32_func) || 202 mmi.mm_m_format.func == mm_swm32_func) ||
202 (mmi.i_format.opcode == mm_sw32_op && 203 (mmi.i_format.opcode == mm_sw32_op &&
203 mmi.i_format.rs == 29 && 204 mmi.i_format.rs == 29 &&
204 mmi.i_format.rt == 31)); 205 mmi.i_format.rt == 31);
205 } 206 }
206#else 207#else
207 /* sw / sd $ra, offset($sp) */ 208 /* sw / sd $ra, offset($sp) */
@@ -233,7 +234,7 @@ static inline int is_jump_ins(union mips_instruction *ip)
233 if (ip->r_format.opcode != mm_pool32a_op || 234 if (ip->r_format.opcode != mm_pool32a_op ||
234 ip->r_format.func != mm_pool32axf_op) 235 ip->r_format.func != mm_pool32axf_op)
235 return 0; 236 return 0;
236 return (((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op); 237 return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
237#else 238#else
238 if (ip->j_format.opcode == j_op) 239 if (ip->j_format.opcode == j_op)
239 return 1; 240 return 1;
@@ -260,13 +261,13 @@ static inline int is_sp_move_ins(union mips_instruction *ip)
260 union mips_instruction mmi; 261 union mips_instruction mmi;
261 262
262 mmi.word = (ip->halfword[0] << 16); 263 mmi.word = (ip->halfword[0] << 16);
263 return ((mmi.mm16_r3_format.opcode == mm_pool16d_op && 264 return (mmi.mm16_r3_format.opcode == mm_pool16d_op &&
264 mmi.mm16_r3_format.simmediate && mm_addiusp_func) || 265 mmi.mm16_r3_format.simmediate && mm_addiusp_func) ||
265 (mmi.mm16_r5_format.opcode == mm_pool16d_op && 266 (mmi.mm16_r5_format.opcode == mm_pool16d_op &&
266 mmi.mm16_r5_format.rt == 29)); 267 mmi.mm16_r5_format.rt == 29);
267 } 268 }
268 return (ip->mm_i_format.opcode == mm_addiu32_op && 269 return ip->mm_i_format.opcode == mm_addiu32_op &&
269 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29); 270 ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29;
270#else 271#else
271 /* addiu/daddiu sp,sp,-imm */ 272 /* addiu/daddiu sp,sp,-imm */
272 if (ip->i_format.rs != 29 || ip->i_format.rt != 29) 273 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
@@ -532,3 +533,20 @@ unsigned long arch_align_stack(unsigned long sp)
532 533
533 return sp & ALMASK; 534 return sp & ALMASK;
534} 535}
536
537static void arch_dump_stack(void *info)
538{
539 struct pt_regs *regs;
540
541 regs = get_irq_regs();
542
543 if (regs)
544 show_regs(regs);
545
546 dump_stack();
547}
548
549void arch_trigger_all_cpu_backtrace(bool include_self)
550{
551 smp_call_function(arch_dump_stack, NULL, 1);
552}
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 5d39bb85bf35..452d4350ce42 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -16,6 +16,7 @@
16#include <linux/debugfs.h> 16#include <linux/debugfs.h>
17#include <linux/of.h> 17#include <linux/of.h>
18#include <linux/of_fdt.h> 18#include <linux/of_fdt.h>
19#include <linux/of_platform.h>
19 20
20#include <asm/page.h> 21#include <asm/page.h>
21#include <asm/prom.h> 22#include <asm/prom.h>
@@ -54,4 +55,21 @@ void __init __dt_setup_arch(void *bph)
54 55
55 mips_set_machine_name(of_flat_dt_get_machine_name()); 56 mips_set_machine_name(of_flat_dt_get_machine_name());
56} 57}
58
59int __init __dt_register_buses(const char *bus0, const char *bus1)
60{
61 static struct of_device_id of_ids[3];
62
63 if (!of_have_populated_dt())
64 panic("device tree not present");
65
66 strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible));
67 strlcpy(of_ids[1].compatible, bus1, sizeof(of_ids[1].compatible));
68
69 if (of_platform_populate(NULL, of_ids, NULL, NULL))
70 panic("failed to populate DT");
71
72 return 0;
73}
74
57#endif 75#endif
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index f3b635f86c39..058929041368 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -82,14 +82,14 @@ static struct resource data_resource = { .name = "Kernel data", };
82 82
83static void *detect_magic __initdata = detect_memory_region; 83static void *detect_magic __initdata = detect_memory_region;
84 84
85void __init add_memory_region(phys_t start, phys_t size, long type) 85void __init add_memory_region(phys_addr_t start, phys_addr_t size, long type)
86{ 86{
87 int x = boot_mem_map.nr_map; 87 int x = boot_mem_map.nr_map;
88 int i; 88 int i;
89 89
90 /* Sanity check */ 90 /* Sanity check */
91 if (start + size < start) { 91 if (start + size < start) {
92 pr_warning("Trying to add an invalid memory region, skipped\n"); 92 pr_warn("Trying to add an invalid memory region, skipped\n");
93 return; 93 return;
94 } 94 }
95 95
@@ -127,10 +127,10 @@ void __init add_memory_region(phys_t start, phys_t size, long type)
127 boot_mem_map.nr_map++; 127 boot_mem_map.nr_map++;
128} 128}
129 129
130void __init detect_memory_region(phys_t start, phys_t sz_min, phys_t sz_max) 130void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
131{ 131{
132 void *dm = &detect_magic; 132 void *dm = &detect_magic;
133 phys_t size; 133 phys_addr_t size;
134 134
135 for (size = sz_min; size < sz_max; size <<= 1) { 135 for (size = sz_min; size < sz_max; size <<= 1) {
136 if (!memcmp(dm, dm + size, sizeof(detect_magic))) 136 if (!memcmp(dm, dm + size, sizeof(detect_magic)))
@@ -493,7 +493,7 @@ static int usermem __initdata;
493 493
494static int __init early_parse_mem(char *p) 494static int __init early_parse_mem(char *p)
495{ 495{
496 phys_t start, size; 496 phys_addr_t start, size;
497 497
498 /* 498 /*
499 * If a user specifies memory size, we 499 * If a user specifies memory size, we
@@ -545,9 +545,9 @@ static int __init early_parse_elfcorehdr(char *p)
545early_param("elfcorehdr", early_parse_elfcorehdr); 545early_param("elfcorehdr", early_parse_elfcorehdr);
546#endif 546#endif
547 547
548static void __init arch_mem_addpart(phys_t mem, phys_t end, int type) 548static void __init arch_mem_addpart(phys_addr_t mem, phys_addr_t end, int type)
549{ 549{
550 phys_t size; 550 phys_addr_t size;
551 int i; 551 int i;
552 552
553 size = end - mem; 553 size = end - mem;
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 16f1e4f2bf3c..545bf11bd2ed 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -530,7 +530,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
530 struct mips_abi *abi = current->thread.abi; 530 struct mips_abi *abi = current->thread.abi;
531#ifdef CONFIG_CPU_MICROMIPS 531#ifdef CONFIG_CPU_MICROMIPS
532 void *vdso; 532 void *vdso;
533 unsigned int tmp = (unsigned int)current->mm->context.vdso; 533 unsigned long tmp = (unsigned long)current->mm->context.vdso;
534 534
535 set_isa16_mode(tmp); 535 set_isa16_mode(tmp);
536 vdso = (void *)tmp; 536 vdso = (void *)tmp;
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 06bb5ed6d80a..b8bd9340c9c7 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -35,6 +35,7 @@
35#include <asm/bmips.h> 35#include <asm/bmips.h>
36#include <asm/traps.h> 36#include <asm/traps.h>
37#include <asm/barrier.h> 37#include <asm/barrier.h>
38#include <asm/cpu-features.h>
38 39
39static int __maybe_unused max_cpus = 1; 40static int __maybe_unused max_cpus = 1;
40 41
@@ -42,6 +43,12 @@ static int __maybe_unused max_cpus = 1;
42int bmips_smp_enabled = 1; 43int bmips_smp_enabled = 1;
43int bmips_cpu_offset; 44int bmips_cpu_offset;
44cpumask_t bmips_booted_mask; 45cpumask_t bmips_booted_mask;
46unsigned long bmips_tp1_irqs = IE_IRQ1;
47
48#define RESET_FROM_KSEG0 0x80080800
49#define RESET_FROM_KSEG1 0xa0080800
50
51static void bmips_set_reset_vec(int cpu, u32 val);
45 52
46#ifdef CONFIG_SMP 53#ifdef CONFIG_SMP
47 54
@@ -194,6 +201,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
194 pr_info("SMP: Booting CPU%d...\n", cpu); 201 pr_info("SMP: Booting CPU%d...\n", cpu);
195 202
196 if (cpumask_test_cpu(cpu, &bmips_booted_mask)) { 203 if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
204 /* kseg1 might not exist if this CPU enabled XKS01 */
205 bmips_set_reset_vec(cpu, RESET_FROM_KSEG0);
206
197 switch (current_cpu_type()) { 207 switch (current_cpu_type()) {
198 case CPU_BMIPS4350: 208 case CPU_BMIPS4350:
199 case CPU_BMIPS4380: 209 case CPU_BMIPS4380:
@@ -203,8 +213,9 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
203 bmips5000_send_ipi_single(cpu, 0); 213 bmips5000_send_ipi_single(cpu, 0);
204 break; 214 break;
205 } 215 }
206 } 216 } else {
207 else { 217 bmips_set_reset_vec(cpu, RESET_FROM_KSEG1);
218
208 switch (current_cpu_type()) { 219 switch (current_cpu_type()) {
209 case CPU_BMIPS4350: 220 case CPU_BMIPS4350:
210 case CPU_BMIPS4380: 221 case CPU_BMIPS4380:
@@ -213,17 +224,7 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
213 set_c0_brcm_cmt_ctrl(0x01); 224 set_c0_brcm_cmt_ctrl(0x01);
214 break; 225 break;
215 case CPU_BMIPS5000: 226 case CPU_BMIPS5000:
216 if (cpu & 0x01) 227 write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
217 write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
218 else {
219 /*
220 * core N thread 0 was already booted; just
221 * pulse the NMI line
222 */
223 bmips_write_zscm_reg(0x210, 0xc0000000);
224 udelay(10);
225 bmips_write_zscm_reg(0x210, 0x00);
226 }
227 break; 228 break;
228 } 229 }
229 cpumask_set_cpu(cpu, &bmips_booted_mask); 230 cpumask_set_cpu(cpu, &bmips_booted_mask);
@@ -235,31 +236,12 @@ static void bmips_boot_secondary(int cpu, struct task_struct *idle)
235 */ 236 */
236static void bmips_init_secondary(void) 237static void bmips_init_secondary(void)
237{ 238{
238 /* move NMI vector to kseg0, in case XKS01 is enabled */
239
240 void __iomem *cbr;
241 unsigned long old_vec;
242 unsigned long relo_vector;
243 int boot_cpu;
244
245 switch (current_cpu_type()) { 239 switch (current_cpu_type()) {
246 case CPU_BMIPS4350: 240 case CPU_BMIPS4350:
247 case CPU_BMIPS4380: 241 case CPU_BMIPS4380:
248 cbr = BMIPS_GET_CBR();
249
250 boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
251 relo_vector = boot_cpu ? BMIPS_RELO_VECTOR_CONTROL_0 :
252 BMIPS_RELO_VECTOR_CONTROL_1;
253
254 old_vec = __raw_readl(cbr + relo_vector);
255 __raw_writel(old_vec & ~0x20000000, cbr + relo_vector);
256
257 clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); 242 clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
258 break; 243 break;
259 case CPU_BMIPS5000: 244 case CPU_BMIPS5000:
260 write_c0_brcm_bootvec(read_c0_brcm_bootvec() &
261 (smp_processor_id() & 0x01 ? ~0x20000000 : ~0x2000));
262
263 write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); 245 write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
264 break; 246 break;
265 } 247 }
@@ -276,7 +258,7 @@ static void bmips_smp_finish(void)
276 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); 258 write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
277 259
278 irq_enable_hazard(); 260 irq_enable_hazard();
279 set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ1 | IE_IRQ5 | ST0_IE); 261 set_c0_status(IE_SW0 | IE_SW1 | bmips_tp1_irqs | IE_IRQ5 | ST0_IE);
280 irq_enable_hazard(); 262 irq_enable_hazard();
281} 263}
282 264
@@ -381,6 +363,7 @@ static int bmips_cpu_disable(void)
381 363
382 set_cpu_online(cpu, false); 364 set_cpu_online(cpu, false);
383 cpu_clear(cpu, cpu_callin_map); 365 cpu_clear(cpu, cpu_callin_map);
366 clear_c0_status(IE_IRQ5);
384 367
385 local_flush_tlb_all(); 368 local_flush_tlb_all();
386 local_flush_icache_range(0, ~0); 369 local_flush_icache_range(0, ~0);
@@ -405,7 +388,8 @@ void __ref play_dead(void)
405 * IRQ handlers; this clears ST0_IE and returns immediately. 388 * IRQ handlers; this clears ST0_IE and returns immediately.
406 */ 389 */
407 clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); 390 clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
408 change_c0_status(IE_IRQ5 | IE_IRQ1 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, 391 change_c0_status(
392 IE_IRQ5 | bmips_tp1_irqs | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
409 IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); 393 IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
410 irq_disable_hazard(); 394 irq_disable_hazard();
411 395
@@ -473,10 +457,61 @@ static inline void bmips_nmi_handler_setup(void)
473 &bmips_smp_int_vec_end); 457 &bmips_smp_int_vec_end);
474} 458}
475 459
460struct reset_vec_info {
461 int cpu;
462 u32 val;
463};
464
465static void bmips_set_reset_vec_remote(void *vinfo)
466{
467 struct reset_vec_info *info = vinfo;
468 int shift = info->cpu & 0x01 ? 16 : 0;
469 u32 mask = ~(0xffff << shift), val = info->val >> 16;
470
471 preempt_disable();
472 if (smp_processor_id() > 0) {
473 smp_call_function_single(0, &bmips_set_reset_vec_remote,
474 info, 1);
475 } else {
476 if (info->cpu & 0x02) {
477 /* BMIPS5200 "should" use mask/shift, but it's buggy */
478 bmips_write_zscm_reg(0xa0, (val << 16) | val);
479 bmips_read_zscm_reg(0xa0);
480 } else {
481 write_c0_brcm_bootvec((read_c0_brcm_bootvec() & mask) |
482 (val << shift));
483 }
484 }
485 preempt_enable();
486}
487
488static void bmips_set_reset_vec(int cpu, u32 val)
489{
490 struct reset_vec_info info;
491
492 if (current_cpu_type() == CPU_BMIPS5000) {
493 /* this needs to run from CPU0 (which is always online) */
494 info.cpu = cpu;
495 info.val = val;
496 bmips_set_reset_vec_remote(&info);
497 } else {
498 void __iomem *cbr = BMIPS_GET_CBR();
499
500 if (cpu == 0)
501 __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
502 else {
503 if (current_cpu_type() != CPU_BMIPS4380)
504 return;
505 __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
506 }
507 }
508 __sync();
509 back_to_back_c0_hazard();
510}
511
476void bmips_ebase_setup(void) 512void bmips_ebase_setup(void)
477{ 513{
478 unsigned long new_ebase = ebase; 514 unsigned long new_ebase = ebase;
479 void __iomem __maybe_unused *cbr;
480 515
481 BUG_ON(ebase != CKSEG0); 516 BUG_ON(ebase != CKSEG0);
482 517
@@ -496,15 +531,14 @@ void bmips_ebase_setup(void)
496 &bmips_smp_int_vec, 0x80); 531 &bmips_smp_int_vec, 0x80);
497 __sync(); 532 __sync();
498 return; 533 return;
534 case CPU_BMIPS3300:
499 case CPU_BMIPS4380: 535 case CPU_BMIPS4380:
500 /* 536 /*
501 * 0x8000_0000: reset/NMI (initially in kseg1) 537 * 0x8000_0000: reset/NMI (initially in kseg1)
502 * 0x8000_0400: normal vectors 538 * 0x8000_0400: normal vectors
503 */ 539 */
504 new_ebase = 0x80000400; 540 new_ebase = 0x80000400;
505 cbr = BMIPS_GET_CBR(); 541 bmips_set_reset_vec(0, RESET_FROM_KSEG0);
506 __raw_writel(0x80080800, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
507 __raw_writel(0xa0080800, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
508 break; 542 break;
509 case CPU_BMIPS5000: 543 case CPU_BMIPS5000:
510 /* 544 /*
@@ -512,10 +546,8 @@ void bmips_ebase_setup(void)
512 * 0x8000_1000: normal vectors 546 * 0x8000_1000: normal vectors
513 */ 547 */
514 new_ebase = 0x80001000; 548 new_ebase = 0x80001000;
515 write_c0_brcm_bootvec(0xa0088008); 549 bmips_set_reset_vec(0, RESET_FROM_KSEG0);
516 write_c0_ebase(new_ebase); 550 write_c0_ebase(new_ebase);
517 if (max_cpus > 2)
518 bmips_write_zscm_reg(0xa0, 0xa008a008);
519 break; 551 break;
520 default: 552 default:
521 return; 553 return;
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
index fc8a51553426..1e0a93c5a3e7 100644
--- a/arch/mips/kernel/smp-cmp.c
+++ b/arch/mips/kernel/smp-cmp.c
@@ -24,6 +24,7 @@
24#include <linux/cpumask.h> 24#include <linux/cpumask.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/compiler.h> 26#include <linux/compiler.h>
27#include <linux/irqchip/mips-gic.h>
27 28
28#include <linux/atomic.h> 29#include <linux/atomic.h>
29#include <asm/cacheflush.h> 30#include <asm/cacheflush.h>
@@ -37,7 +38,6 @@
37#include <asm/mipsmtregs.h> 38#include <asm/mipsmtregs.h>
38#include <asm/mips_mt.h> 39#include <asm/mips_mt.h>
39#include <asm/amon.h> 40#include <asm/amon.h>
40#include <asm/gic.h>
41 41
42static void cmp_init_secondary(void) 42static void cmp_init_secondary(void)
43{ 43{
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index e6e16a1d4add..bed7590e475f 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -9,13 +9,13 @@
9 */ 9 */
10 10
11#include <linux/io.h> 11#include <linux/io.h>
12#include <linux/irqchip/mips-gic.h>
12#include <linux/sched.h> 13#include <linux/sched.h>
13#include <linux/slab.h> 14#include <linux/slab.h>
14#include <linux/smp.h> 15#include <linux/smp.h>
15#include <linux/types.h> 16#include <linux/types.h>
16 17
17#include <asm/bcache.h> 18#include <asm/bcache.h>
18#include <asm/gic.h>
19#include <asm/mips-cm.h> 19#include <asm/mips-cm.h>
20#include <asm/mips-cpc.h> 20#include <asm/mips-cpc.h>
21#include <asm/mips_mt.h> 21#include <asm/mips_mt.h>
@@ -273,8 +273,8 @@ static void cps_init_secondary(void)
273 if (cpu_has_mipsmt) 273 if (cpu_has_mipsmt)
274 dmt(); 274 dmt();
275 275
276 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 276 change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
277 STATUSF_IP6 | STATUSF_IP7); 277 STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
278} 278}
279 279
280static void cps_smp_finish(void) 280static void cps_smp_finish(void)
diff --git a/arch/mips/kernel/smp-gic.c b/arch/mips/kernel/smp-gic.c
index 3b21a96d1ccb..5f0ab5bcd01e 100644
--- a/arch/mips/kernel/smp-gic.c
+++ b/arch/mips/kernel/smp-gic.c
@@ -12,9 +12,9 @@
12 * option) any later version. 12 * option) any later version.
13 */ 13 */
14 14
15#include <linux/irqchip/mips-gic.h>
15#include <linux/printk.h> 16#include <linux/printk.h>
16 17
17#include <asm/gic.h>
18#include <asm/mips-cpc.h> 18#include <asm/mips-cpc.h>
19#include <asm/smp-ops.h> 19#include <asm/smp-ops.h>
20 20
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
index 21f23add04f4..ad86951b73bd 100644
--- a/arch/mips/kernel/smp-mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -21,6 +21,7 @@
21#include <linux/sched.h> 21#include <linux/sched.h>
22#include <linux/cpumask.h> 22#include <linux/cpumask.h>
23#include <linux/interrupt.h> 23#include <linux/interrupt.h>
24#include <linux/irqchip/mips-gic.h>
24#include <linux/compiler.h> 25#include <linux/compiler.h>
25#include <linux/smp.h> 26#include <linux/smp.h>
26 27
@@ -34,7 +35,6 @@
34#include <asm/mipsregs.h> 35#include <asm/mipsregs.h>
35#include <asm/mipsmtregs.h> 36#include <asm/mipsmtregs.h>
36#include <asm/mips_mt.h> 37#include <asm/mips_mt.h>
37#include <asm/gic.h>
38 38
39static void __init smvp_copy_vpe_config(void) 39static void __init smvp_copy_vpe_config(void)
40{ 40{
@@ -119,7 +119,7 @@ static void vsmp_send_ipi_single(int cpu, unsigned int action)
119 unsigned long flags; 119 unsigned long flags;
120 int vpflags; 120 int vpflags;
121 121
122#ifdef CONFIG_IRQ_GIC 122#ifdef CONFIG_MIPS_GIC
123 if (gic_present) { 123 if (gic_present) {
124 gic_send_ipi_single(cpu, action); 124 gic_send_ipi_single(cpu, action);
125 return; 125 return;
@@ -158,7 +158,7 @@ static void vsmp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
158 158
159static void vsmp_init_secondary(void) 159static void vsmp_init_secondary(void)
160{ 160{
161#ifdef CONFIG_IRQ_GIC 161#ifdef CONFIG_MIPS_GIC
162 /* This is Malta specific: IPI,performance and timer interrupts */ 162 /* This is Malta specific: IPI,performance and timer interrupts */
163 if (gic_present) 163 if (gic_present)
164 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 | 164 change_c0_status(ST0_IM, STATUSF_IP3 | STATUSF_IP4 |
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
index 4a4f9dda5658..604b558809c4 100644
--- a/arch/mips/kernel/syscall.c
+++ b/arch/mips/kernel/syscall.c
@@ -117,6 +117,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
117 "2: sc %[tmp], (%[addr]) \n" 117 "2: sc %[tmp], (%[addr]) \n"
118 " beqzl %[tmp], 1b \n" 118 " beqzl %[tmp], 1b \n"
119 "3: \n" 119 "3: \n"
120 " .insn \n"
120 " .section .fixup,\"ax\" \n" 121 " .section .fixup,\"ax\" \n"
121 "4: li %[err], %[efault] \n" 122 "4: li %[err], %[efault] \n"
122 " j 3b \n" 123 " j 3b \n"
@@ -142,6 +143,7 @@ static inline int mips_atomic_set(unsigned long addr, unsigned long new)
142 "2: sc %[tmp], (%[addr]) \n" 143 "2: sc %[tmp], (%[addr]) \n"
143 " bnez %[tmp], 4f \n" 144 " bnez %[tmp], 4f \n"
144 "3: \n" 145 "3: \n"
146 " .insn \n"
145 " .subsection 2 \n" 147 " .subsection 2 \n"
146 "4: b 1b \n" 148 "4: b 1b \n"
147 " .previous \n" 149 " .previous \n"
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 22b19c275044..ad3d2031c327 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -724,6 +724,50 @@ int process_fpemu_return(int sig, void __user *fault_addr)
724 } 724 }
725} 725}
726 726
727static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
728 unsigned long old_epc, unsigned long old_ra)
729{
730 union mips_instruction inst = { .word = opcode };
731 void __user *fault_addr = NULL;
732 int sig;
733
734 /* If it's obviously not an FP instruction, skip it */
735 switch (inst.i_format.opcode) {
736 case cop1_op:
737 case cop1x_op:
738 case lwc1_op:
739 case ldc1_op:
740 case swc1_op:
741 case sdc1_op:
742 break;
743
744 default:
745 return -1;
746 }
747
748 /*
749 * do_ri skipped over the instruction via compute_return_epc, undo
750 * that for the FPU emulator.
751 */
752 regs->cp0_epc = old_epc;
753 regs->regs[31] = old_ra;
754
755 /* Save the FP context to struct thread_struct */
756 lose_fpu(1);
757
758 /* Run the emulator */
759 sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
760 &fault_addr);
761
762 /* If something went wrong, signal */
763 process_fpemu_return(sig, fault_addr);
764
765 /* Restore the hardware register state */
766 own_fpu(1);
767
768 return 0;
769}
770
727/* 771/*
728 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX 772 * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
729 */ 773 */
@@ -1016,6 +1060,9 @@ asmlinkage void do_ri(struct pt_regs *regs)
1016 1060
1017 if (status < 0) 1061 if (status < 0)
1018 status = simulate_sync(regs, opcode); 1062 status = simulate_sync(regs, opcode);
1063
1064 if (status < 0)
1065 status = simulate_fp(regs, opcode, old_epc, old31);
1019 } 1066 }
1020 1067
1021 if (status < 0) 1068 if (status < 0)
@@ -1380,12 +1427,19 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1380 show_regs(regs); 1427 show_regs(regs);
1381 1428
1382 if (multi_match) { 1429 if (multi_match) {
1383 printk("Index : %0x\n", read_c0_index()); 1430 pr_err("Index : %0x\n", read_c0_index());
1384 printk("Pagemask: %0x\n", read_c0_pagemask()); 1431 pr_err("Pagemask: %0x\n", read_c0_pagemask());
1385 printk("EntryHi : %0*lx\n", field, read_c0_entryhi()); 1432 pr_err("EntryHi : %0*lx\n", field, read_c0_entryhi());
1386 printk("EntryLo0: %0*lx\n", field, read_c0_entrylo0()); 1433 pr_err("EntryLo0: %0*lx\n", field, read_c0_entrylo0());
1387 printk("EntryLo1: %0*lx\n", field, read_c0_entrylo1()); 1434 pr_err("EntryLo1: %0*lx\n", field, read_c0_entrylo1());
1388 printk("\n"); 1435 pr_err("Wired : %0x\n", read_c0_wired());
1436 pr_err("Pagegrain: %0x\n", read_c0_pagegrain());
1437 if (cpu_has_htw) {
1438 pr_err("PWField : %0*lx\n", field, read_c0_pwfield());
1439 pr_err("PWSize : %0*lx\n", field, read_c0_pwsize());
1440 pr_err("PWCtl : %0x\n", read_c0_pwctl());
1441 }
1442 pr_err("\n");
1389 dump_tlb_all(); 1443 dump_tlb_all();
1390 } 1444 }
1391 1445
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 0f1af58b036a..ed2a278722a9 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -16,9 +16,11 @@
16#include <linux/elf.h> 16#include <linux/elf.h>
17#include <linux/vmalloc.h> 17#include <linux/vmalloc.h>
18#include <linux/unistd.h> 18#include <linux/unistd.h>
19#include <linux/random.h>
19 20
20#include <asm/vdso.h> 21#include <asm/vdso.h>
21#include <asm/uasm.h> 22#include <asm/uasm.h>
23#include <asm/processor.h>
22 24
23/* 25/*
24 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 26 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
@@ -67,7 +69,18 @@ subsys_initcall(init_vdso);
67 69
68static unsigned long vdso_addr(unsigned long start) 70static unsigned long vdso_addr(unsigned long start)
69{ 71{
70 return STACK_TOP; 72 unsigned long offset = 0UL;
73
74 if (current->flags & PF_RANDOMIZE) {
75 offset = get_random_int();
76 offset <<= PAGE_SHIFT;
77 if (TASK_IS_32BIT_ADDR)
78 offset &= 0xfffffful;
79 else
80 offset &= 0xffffffful;
81 }
82
83 return STACK_TOP + offset;
71} 84}
72 85
73int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) 86int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 468ffa043607..7edcd4946fc1 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -49,6 +49,7 @@
49 49
50/* Activation Status Register */ 50/* Activation Status Register */
51#define ACTS_ASC0_ACT 0x00001000 51#define ACTS_ASC0_ACT 0x00001000
52#define ACTS_SSC0 0x00002000
52#define ACTS_ASC1_ACT 0x00000800 53#define ACTS_ASC1_ACT 0x00000800
53#define ACTS_I2C_ACT 0x00004000 54#define ACTS_I2C_ACT 0x00004000
54#define ACTS_P0 0x00010000 55#define ACTS_P0 0x00010000
@@ -147,12 +148,11 @@ static void falcon_gpe_enable(void)
147 if (status & (1 << (GPPC_OFFSET + 1))) 148 if (status & (1 << (GPPC_OFFSET + 1)))
148 return; 149 return;
149 150
150 if (status_r32(STATUS_CONFIG) == 0) 151 freq = (status_r32(STATUS_CONFIG) &
152 GPEFREQ_MASK) >>
153 GPEFREQ_OFFSET;
154 if (freq == 0)
151 freq = 1; /* use 625MHz on unfused chip */ 155 freq = 1; /* use 625MHz on unfused chip */
152 else
153 freq = (status_r32(STATUS_CONFIG) &
154 GPEFREQ_MASK) >>
155 GPEFREQ_OFFSET;
156 156
157 /* apply new frequency */ 157 /* apply new frequency */
158 sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1), 158 sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
@@ -260,5 +260,6 @@ void __init ltq_soc_init(void)
260 clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4); 260 clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
261 clkdev_add_sys("1e100b00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT); 261 clkdev_add_sys("1e100b00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
262 clkdev_add_sys("1e100c00.serial", SYSCTL_SYS1, ACTS_ASC0_ACT); 262 clkdev_add_sys("1e100c00.serial", SYSCTL_SYS1, ACTS_ASC0_ACT);
263 clkdev_add_sys("1e100d00.spi", SYSCTL_SYS1, ACTS_SSC0);
263 clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT); 264 clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
264} 265}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 030568a70ac4..6ab10573490d 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -70,6 +70,7 @@ static struct resource ltq_eiu_irq[MAX_EIU];
70static void __iomem *ltq_icu_membase[MAX_IM]; 70static void __iomem *ltq_icu_membase[MAX_IM];
71static void __iomem *ltq_eiu_membase; 71static void __iomem *ltq_eiu_membase;
72static struct irq_domain *ltq_domain; 72static struct irq_domain *ltq_domain;
73static int ltq_perfcount_irq;
73 74
74int ltq_eiu_get_irq(int exin) 75int ltq_eiu_get_irq(int exin)
75{ 76{
@@ -378,30 +379,6 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
378 panic("Failed to remap icu memory"); 379 panic("Failed to remap icu memory");
379 } 380 }
380 381
381 /* the external interrupts are optional and xway only */
382 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
383 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
384 /* find out how many external irq sources we have */
385 exin_avail = of_irq_count(eiu_node);
386
387 if (exin_avail > MAX_EIU)
388 exin_avail = MAX_EIU;
389
390 ret = of_irq_to_resource_table(eiu_node,
391 ltq_eiu_irq, exin_avail);
392 if (ret != exin_avail)
393 panic("failed to load external irq resources");
394
395 if (request_mem_region(res.start, resource_size(&res),
396 res.name) < 0)
397 pr_err("Failed to request eiu memory");
398
399 ltq_eiu_membase = ioremap_nocache(res.start,
400 resource_size(&res));
401 if (!ltq_eiu_membase)
402 panic("Failed to remap eiu memory");
403 }
404
405 /* turn off all irqs by default */ 382 /* turn off all irqs by default */
406 for (i = 0; i < MAX_IM; i++) { 383 for (i = 0; i < MAX_IM; i++) {
407 /* make sure all irqs are turned off by default */ 384 /* make sure all irqs are turned off by default */
@@ -449,7 +426,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
449#endif 426#endif
450 427
451 /* tell oprofile which irq to use */ 428 /* tell oprofile which irq to use */
452 cp0_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ); 429 ltq_perfcount_irq = irq_create_mapping(ltq_domain, LTQ_PERF_IRQ);
453 430
454 /* 431 /*
455 * if the timer irq is not one of the mips irqs we need to 432 * if the timer irq is not one of the mips irqs we need to
@@ -458,9 +435,38 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
458 if (MIPS_CPU_TIMER_IRQ != 7) 435 if (MIPS_CPU_TIMER_IRQ != 7)
459 irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ); 436 irq_create_mapping(ltq_domain, MIPS_CPU_TIMER_IRQ);
460 437
438 /* the external interrupts are optional and xway only */
439 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
440 if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
441 /* find out how many external irq sources we have */
442 exin_avail = of_irq_count(eiu_node);
443
444 if (exin_avail > MAX_EIU)
445 exin_avail = MAX_EIU;
446
447 ret = of_irq_to_resource_table(eiu_node,
448 ltq_eiu_irq, exin_avail);
449 if (ret != exin_avail)
450 panic("failed to load external irq resources");
451
452 if (request_mem_region(res.start, resource_size(&res),
453 res.name) < 0)
454 pr_err("Failed to request eiu memory");
455
456 ltq_eiu_membase = ioremap_nocache(res.start,
457 resource_size(&res));
458 if (!ltq_eiu_membase)
459 panic("Failed to remap eiu memory");
460 }
461
461 return 0; 462 return 0;
462} 463}
463 464
465int get_c0_perfcount_int(void)
466{
467 return ltq_perfcount_irq;
468}
469
464unsigned int get_c0_compare_int(void) 470unsigned int get_c0_compare_int(void)
465{ 471{
466 return MIPS_CPU_TIMER_IRQ; 472 return MIPS_CPU_TIMER_IRQ;
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 7447d322d14e..39ab3e786e59 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -36,6 +36,11 @@ const char *get_system_type(void)
36 return soc_info.sys_type; 36 return soc_info.sys_type;
37} 37}
38 38
39int ltq_soc_type(void)
40{
41 return soc_info.type;
42}
43
39void prom_free_prom_memory(void) 44void prom_free_prom_memory(void)
40{ 45{
41} 46}
@@ -72,6 +77,8 @@ void __init plat_mem_setup(void)
72 * parsed resulting in our memory appearing 77 * parsed resulting in our memory appearing
73 */ 78 */
74 __dt_setup_arch(__dtb_start); 79 __dt_setup_arch(__dtb_start);
80
81 strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
75} 82}
76 83
77void __init device_tree_init(void) 84void __init device_tree_init(void)
@@ -97,16 +104,7 @@ void __init prom_init(void)
97 104
98int __init plat_of_setup(void) 105int __init plat_of_setup(void)
99{ 106{
100 static struct of_device_id of_ids[3]; 107 return __dt_register_buses(soc_info.compatible, "simple-bus");
101
102 if (!of_have_populated_dt())
103 panic("device tree not present");
104
105 strlcpy(of_ids[0].compatible, soc_info.compatible,
106 sizeof(of_ids[0].compatible));
107 strncpy(of_ids[1].compatible, "simple-bus",
108 sizeof(of_ids[1].compatible));
109 return of_platform_populate(NULL, of_ids, NULL, NULL);
110} 108}
111 109
112arch_initcall(plat_of_setup); 110arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
index 087497d97357..a2edc538f477 100644
--- a/arch/mips/lantiq/xway/Makefile
+++ b/arch/mips/lantiq/xway/Makefile
@@ -1,3 +1,5 @@
1obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o dcdc.o 1obj-y := prom.o sysctrl.o clk.o reset.o dma.o gptu.o dcdc.o
2 2
3obj-y += vmmc.o
4
3obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o 5obj-$(CONFIG_XRX200_PHY_FW) += xrx200_phy_fw.o
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 1fa0f175357e..fe68f9ae47c1 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -14,6 +14,7 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include <linux/of_address.h> 15#include <linux/of_address.h>
16#include <linux/of_platform.h> 16#include <linux/of_platform.h>
17#include <linux/reset-controller.h>
17 18
18#include <asm/reboot.h> 19#include <asm/reboot.h>
19 20
@@ -113,10 +114,77 @@ void ltq_reset_once(unsigned int module, ulong u)
113 ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ); 114 ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ);
114} 115}
115 116
117static int ltq_assert_device(struct reset_controller_dev *rcdev,
118 unsigned long id)
119{
120 u32 val;
121
122 if (id < 8)
123 return -1;
124
125 val = ltq_rcu_r32(RCU_RST_REQ);
126 val |= BIT(id);
127 ltq_rcu_w32(val, RCU_RST_REQ);
128
129 return 0;
130}
131
132static int ltq_deassert_device(struct reset_controller_dev *rcdev,
133 unsigned long id)
134{
135 u32 val;
136
137 if (id < 8)
138 return -1;
139
140 val = ltq_rcu_r32(RCU_RST_REQ);
141 val &= ~BIT(id);
142 ltq_rcu_w32(val, RCU_RST_REQ);
143
144 return 0;
145}
146
147static int ltq_reset_device(struct reset_controller_dev *rcdev,
148 unsigned long id)
149{
150 ltq_assert_device(rcdev, id);
151 return ltq_deassert_device(rcdev, id);
152}
153
154static struct reset_control_ops reset_ops = {
155 .reset = ltq_reset_device,
156 .assert = ltq_assert_device,
157 .deassert = ltq_deassert_device,
158};
159
160static struct reset_controller_dev reset_dev = {
161 .ops = &reset_ops,
162 .owner = THIS_MODULE,
163 .nr_resets = 32,
164 .of_reset_n_cells = 1,
165};
166
167void ltq_rst_init(void)
168{
169 reset_dev.of_node = of_find_compatible_node(NULL, NULL,
170 "lantiq,xway-reset");
171 if (!reset_dev.of_node)
172 pr_err("Failed to find reset controller node");
173 else
174 reset_controller_register(&reset_dev);
175}
176
116static void ltq_machine_restart(char *command) 177static void ltq_machine_restart(char *command)
117{ 178{
179 u32 val = ltq_rcu_r32(RCU_RST_REQ);
180
181 if (of_device_is_compatible(ltq_rcu_np, "lantiq,rcu-xrx200"))
182 val |= RCU_RD_GPHY1_XRX200 | RCU_RD_GPHY0_XRX200;
183
184 val |= RCU_RD_SRST;
185
118 local_irq_disable(); 186 local_irq_disable();
119 ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | RCU_RD_SRST, RCU_RST_REQ); 187 ltq_rcu_w32(val, RCU_RST_REQ);
120 unreachable(); 188 unreachable();
121} 189}
122 190
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
new file mode 100644
index 000000000000..696cd57f6f13
--- /dev/null
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -0,0 +1,69 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/module.h>
10#include <linux/of_platform.h>
11#include <linux/of_gpio.h>
12#include <linux/dma-mapping.h>
13
14#include <lantiq_soc.h>
15
16static unsigned int *cp1_base;
17
18unsigned int *ltq_get_cp1_base(void)
19{
20 if (!cp1_base)
21 panic("no cp1 base was set\n");
22
23 return cp1_base;
24}
25EXPORT_SYMBOL(ltq_get_cp1_base);
26
27static int vmmc_probe(struct platform_device *pdev)
28{
29#define CP1_SIZE (1 << 20)
30 int gpio_count;
31 dma_addr_t dma;
32
33 cp1_base =
34 (void *) CPHYSADDR(dma_alloc_coherent(NULL, CP1_SIZE,
35 &dma, GFP_ATOMIC));
36
37 gpio_count = of_gpio_count(pdev->dev.of_node);
38 while (gpio_count > 0) {
39 enum of_gpio_flags flags;
40 int gpio = of_get_gpio_flags(pdev->dev.of_node,
41 --gpio_count, &flags);
42 if (gpio_request(gpio, "vmmc-relay"))
43 continue;
44 dev_info(&pdev->dev, "requested GPIO %d\n", gpio);
45 gpio_direction_output(gpio,
46 (flags & OF_GPIO_ACTIVE_LOW) ? (0) : (1));
47 }
48
49 dev_info(&pdev->dev, "reserved %dMB at 0x%p", CP1_SIZE >> 20, cp1_base);
50
51 return 0;
52}
53
54static const struct of_device_id vmmc_match[] = {
55 { .compatible = "lantiq,vmmc-xway" },
56 {},
57};
58MODULE_DEVICE_TABLE(of, vmmc_match);
59
60static struct platform_driver vmmc_driver = {
61 .probe = vmmc_probe,
62 .driver = {
63 .name = "lantiq,vmmc",
64 .owner = THIS_MODULE,
65 .of_match_table = vmmc_match,
66 },
67};
68
69module_platform_driver(vmmc_driver);
diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c
index d4d9d31f152e..7c1e54c6a36c 100644
--- a/arch/mips/lantiq/xway/xrx200_phy_fw.c
+++ b/arch/mips/lantiq/xway/xrx200_phy_fw.c
@@ -24,7 +24,28 @@ static dma_addr_t xway_gphy_load(struct platform_device *pdev)
24 void *fw_addr; 24 void *fw_addr;
25 size_t size; 25 size_t size;
26 26
27 if (of_property_read_string(pdev->dev.of_node, "firmware", &fw_name)) { 27 if (of_get_property(pdev->dev.of_node, "firmware1", NULL) ||
28 of_get_property(pdev->dev.of_node, "firmware2", NULL)) {
29 switch (ltq_soc_type()) {
30 case SOC_TYPE_VR9:
31 if (of_property_read_string(pdev->dev.of_node,
32 "firmware1", &fw_name)) {
33 dev_err(&pdev->dev,
34 "failed to load firmware filename\n");
35 return 0;
36 }
37 break;
38 case SOC_TYPE_VR9_2:
39 if (of_property_read_string(pdev->dev.of_node,
40 "firmware2", &fw_name)) {
41 dev_err(&pdev->dev,
42 "failed to load firmware filename\n");
43 return 0;
44 }
45 break;
46 }
47 } else if (of_property_read_string(pdev->dev.of_node,
48 "firmware", &fw_name)) {
28 dev_err(&pdev->dev, "failed to load firmware filename\n"); 49 dev_err(&pdev->dev, "failed to load firmware filename\n");
29 return 0; 50 return 0;
30 } 51 }
diff --git a/arch/mips/lib/iomap.c b/arch/mips/lib/iomap.c
index e3acb2dad33a..8e7e378ce51c 100644
--- a/arch/mips/lib/iomap.c
+++ b/arch/mips/lib/iomap.c
@@ -97,14 +97,14 @@ EXPORT_SYMBOL(iowrite32be);
97 97
98/* 98/*
99 * These are the "repeat MMIO read/write" functions. 99 * These are the "repeat MMIO read/write" functions.
100 * Note the "__raw" accesses, since we don't want to 100 * Note the "__mem" accesses, since we want to convert
101 * convert to CPU byte order. We write in "IO byte 101 * to CPU byte order if the host bus happens to not match the
102 * order" (we also don't have IO barriers). 102 * endianness of PCI/ISA (see mach-generic/mangle-port.h).
103 */ 103 */
104static inline void mmio_insb(void __iomem *addr, u8 *dst, int count) 104static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
105{ 105{
106 while (--count >= 0) { 106 while (--count >= 0) {
107 u8 data = __raw_readb(addr); 107 u8 data = __mem_readb(addr);
108 *dst = data; 108 *dst = data;
109 dst++; 109 dst++;
110 } 110 }
@@ -113,7 +113,7 @@ static inline void mmio_insb(void __iomem *addr, u8 *dst, int count)
113static inline void mmio_insw(void __iomem *addr, u16 *dst, int count) 113static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
114{ 114{
115 while (--count >= 0) { 115 while (--count >= 0) {
116 u16 data = __raw_readw(addr); 116 u16 data = __mem_readw(addr);
117 *dst = data; 117 *dst = data;
118 dst++; 118 dst++;
119 } 119 }
@@ -122,7 +122,7 @@ static inline void mmio_insw(void __iomem *addr, u16 *dst, int count)
122static inline void mmio_insl(void __iomem *addr, u32 *dst, int count) 122static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
123{ 123{
124 while (--count >= 0) { 124 while (--count >= 0) {
125 u32 data = __raw_readl(addr); 125 u32 data = __mem_readl(addr);
126 *dst = data; 126 *dst = data;
127 dst++; 127 dst++;
128 } 128 }
@@ -131,7 +131,7 @@ static inline void mmio_insl(void __iomem *addr, u32 *dst, int count)
131static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count) 131static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
132{ 132{
133 while (--count >= 0) { 133 while (--count >= 0) {
134 __raw_writeb(*src, addr); 134 __mem_writeb(*src, addr);
135 src++; 135 src++;
136 } 136 }
137} 137}
@@ -139,7 +139,7 @@ static inline void mmio_outsb(void __iomem *addr, const u8 *src, int count)
139static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count) 139static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
140{ 140{
141 while (--count >= 0) { 141 while (--count >= 0) {
142 __raw_writew(*src, addr); 142 __mem_writew(*src, addr);
143 src++; 143 src++;
144 } 144 }
145} 145}
@@ -147,7 +147,7 @@ static inline void mmio_outsw(void __iomem *addr, const u16 *src, int count)
147static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count) 147static inline void mmio_outsl(void __iomem *addr, const u32 *src, int count)
148{ 148{
149 while (--count >= 0) { 149 while (--count >= 0) {
150 __raw_writel(*src, addr); 150 __mem_writel(*src, addr);
151 src++; 151 src++;
152 } 152 }
153} 153}
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 7b0e5462ca51..c8fe6b1968fb 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -114,8 +114,7 @@
114 R10KCBARRIER(0(ra)) 114 R10KCBARRIER(0(ra))
115#ifdef __MIPSEB__ 115#ifdef __MIPSEB__
116 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ 116 EX(LONG_S_L, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
117#endif 117#else
118#ifdef __MIPSEL__
119 EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */ 118 EX(LONG_S_R, a1, (a0), .Lfirst_fixup\@) /* make word/dword aligned */
120#endif 119#endif
121 PTR_SUBU a0, t0 /* long align ptr */ 120 PTR_SUBU a0, t0 /* long align ptr */
@@ -164,8 +163,7 @@
164 R10KCBARRIER(0(ra)) 163 R10KCBARRIER(0(ra))
165#ifdef __MIPSEB__ 164#ifdef __MIPSEB__
166 EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@) 165 EX(LONG_S_R, a1, -1(a0), .Llast_fixup\@)
167#endif 166#else
168#ifdef __MIPSEL__
169 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@) 167 EX(LONG_S_L, a1, -1(a0), .Llast_fixup\@)
170#endif 168#endif
1711: jr ra 1691: jr ra
diff --git a/arch/mips/lib/mips-atomic.c b/arch/mips/lib/mips-atomic.c
index 57bcdaf1f1c8..be777d9a3f85 100644
--- a/arch/mips/lib/mips-atomic.c
+++ b/arch/mips/lib/mips-atomic.c
@@ -42,15 +42,11 @@ notrace void arch_local_irq_disable(void)
42 __asm__ __volatile__( 42 __asm__ __volatile__(
43 " .set push \n" 43 " .set push \n"
44 " .set noat \n" 44 " .set noat \n"
45#if defined(CONFIG_CPU_MIPSR2)
46 /* see irqflags.h for inline function */
47#else
48 " mfc0 $1,$12 \n" 45 " mfc0 $1,$12 \n"
49 " ori $1,0x1f \n" 46 " ori $1,0x1f \n"
50 " xori $1,0x1f \n" 47 " xori $1,0x1f \n"
51 " .set noreorder \n" 48 " .set noreorder \n"
52 " mtc0 $1,$12 \n" 49 " mtc0 $1,$12 \n"
53#endif
54 " " __stringify(__irq_disable_hazard) " \n" 50 " " __stringify(__irq_disable_hazard) " \n"
55 " .set pop \n" 51 " .set pop \n"
56 : /* no outputs */ 52 : /* no outputs */
@@ -72,15 +68,11 @@ notrace unsigned long arch_local_irq_save(void)
72 " .set push \n" 68 " .set push \n"
73 " .set reorder \n" 69 " .set reorder \n"
74 " .set noat \n" 70 " .set noat \n"
75#if defined(CONFIG_CPU_MIPSR2)
76 /* see irqflags.h for inline function */
77#else
78 " mfc0 %[flags], $12 \n" 71 " mfc0 %[flags], $12 \n"
79 " ori $1, %[flags], 0x1f \n" 72 " ori $1, %[flags], 0x1f \n"
80 " xori $1, 0x1f \n" 73 " xori $1, 0x1f \n"
81 " .set noreorder \n" 74 " .set noreorder \n"
82 " mtc0 $1, $12 \n" 75 " mtc0 $1, $12 \n"
83#endif
84 " " __stringify(__irq_disable_hazard) " \n" 76 " " __stringify(__irq_disable_hazard) " \n"
85 " .set pop \n" 77 " .set pop \n"
86 : [flags] "=r" (flags) 78 : [flags] "=r" (flags)
@@ -103,18 +95,12 @@ notrace void arch_local_irq_restore(unsigned long flags)
103 " .set push \n" 95 " .set push \n"
104 " .set noreorder \n" 96 " .set noreorder \n"
105 " .set noat \n" 97 " .set noat \n"
106#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
107 /* see irqflags.h for inline function */
108#elif defined(CONFIG_CPU_MIPSR2)
109 /* see irqflags.h for inline function */
110#else
111 " mfc0 $1, $12 \n" 98 " mfc0 $1, $12 \n"
112 " andi %[flags], 1 \n" 99 " andi %[flags], 1 \n"
113 " ori $1, 0x1f \n" 100 " ori $1, 0x1f \n"
114 " xori $1, 0x1f \n" 101 " xori $1, 0x1f \n"
115 " or %[flags], $1 \n" 102 " or %[flags], $1 \n"
116 " mtc0 %[flags], $12 \n" 103 " mtc0 %[flags], $12 \n"
117#endif
118 " " __stringify(__irq_disable_hazard) " \n" 104 " " __stringify(__irq_disable_hazard) " \n"
119 " .set pop \n" 105 " .set pop \n"
120 : [flags] "=r" (__tmp1) 106 : [flags] "=r" (__tmp1)
@@ -136,18 +122,12 @@ notrace void __arch_local_irq_restore(unsigned long flags)
136 " .set push \n" 122 " .set push \n"
137 " .set noreorder \n" 123 " .set noreorder \n"
138 " .set noat \n" 124 " .set noat \n"
139#if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
140 /* see irqflags.h for inline function */
141#elif defined(CONFIG_CPU_MIPSR2)
142 /* see irqflags.h for inline function */
143#else
144 " mfc0 $1, $12 \n" 125 " mfc0 $1, $12 \n"
145 " andi %[flags], 1 \n" 126 " andi %[flags], 1 \n"
146 " ori $1, 0x1f \n" 127 " ori $1, 0x1f \n"
147 " xori $1, 0x1f \n" 128 " xori $1, 0x1f \n"
148 " or %[flags], $1 \n" 129 " or %[flags], $1 \n"
149 " mtc0 %[flags], $12 \n" 130 " mtc0 %[flags], $12 \n"
150#endif
151 " " __stringify(__irq_disable_hazard) " \n" 131 " " __stringify(__irq_disable_hazard) " \n"
152 " .set pop \n" 132 " .set pop \n"
153 : [flags] "=r" (__tmp1) 133 : [flags] "=r" (__tmp1)
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 1ef365ab3cd3..975a13855116 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -9,6 +9,7 @@
9#include <linux/mm.h> 9#include <linux/mm.h>
10 10
11#include <asm/mipsregs.h> 11#include <asm/mipsregs.h>
12#include <asm/mmu_context.h>
12#include <asm/page.h> 13#include <asm/page.h>
13#include <asm/pgtable.h> 14#include <asm/pgtable.h>
14#include <asm/tlbdebug.h> 15#include <asm/tlbdebug.h>
@@ -21,7 +22,7 @@ static void dump_tlb(int first, int last)
21 unsigned int asid; 22 unsigned int asid;
22 unsigned long entryhi, entrylo0; 23 unsigned long entryhi, entrylo0;
23 24
24 asid = read_c0_entryhi() & 0xfc0; 25 asid = read_c0_entryhi() & ASID_MASK;
25 26
26 for (i = first; i <= last; i++) { 27 for (i = first; i <= last; i++) {
27 write_c0_index(i<<8); 28 write_c0_index(i<<8);
@@ -34,8 +35,8 @@ static void dump_tlb(int first, int last)
34 entrylo0 = read_c0_entrylo0(); 35 entrylo0 = read_c0_entrylo0();
35 36
36 /* Unused entries have a virtual address of KSEG0. */ 37 /* Unused entries have a virtual address of KSEG0. */
37 if ((entryhi & 0xfffff000) != 0x80000000 38 if ((entryhi & PAGE_MASK) != KSEG0
38 && (entryhi & 0xfc0) == asid) { 39 && (entryhi & ASID_MASK) == asid) {
39 /* 40 /*
40 * Only print entries in use 41 * Only print entries in use
41 */ 42 */
@@ -43,8 +44,8 @@ static void dump_tlb(int first, int last)
43 44
44 printk("va=%08lx asid=%08lx" 45 printk("va=%08lx asid=%08lx"
45 " [pa=%06lx n=%d d=%d v=%d g=%d]", 46 " [pa=%06lx n=%d d=%d v=%d g=%d]",
46 (entryhi & 0xfffff000), 47 entryhi & PAGE_MASK,
47 entryhi & 0xfc0, 48 entryhi & ASID_MASK,
48 entrylo0 & PAGE_MASK, 49 entrylo0 & PAGE_MASK,
49 (entrylo0 & (1 << 11)) ? 1 : 0, 50 (entrylo0 & (1 << 11)) ? 1 : 0,
50 (entrylo0 & (1 << 10)) ? 1 : 0, 51 (entrylo0 & (1 << 10)) ? 1 : 0,
diff --git a/arch/mips/lib/strlen_user.S b/arch/mips/lib/strlen_user.S
index bef65c98df59..929bbacd697e 100644
--- a/arch/mips/lib/strlen_user.S
+++ b/arch/mips/lib/strlen_user.S
@@ -28,7 +28,6 @@ LEAF(__strlen_\func\()_asm)
28 and v0, a0 28 and v0, a0
29 bnez v0, .Lfault\@ 29 bnez v0, .Lfault\@
30 30
31FEXPORT(__strlen_\func\()_nocheck_asm)
32 move v0, a0 31 move v0, a0
33.ifeqs "\func", "kernel" 32.ifeqs "\func", "kernel"
341: EX(lbu, v1, (v0), .Lfault\@) 331: EX(lbu, v1, (v0), .Lfault\@)
@@ -48,9 +47,7 @@ FEXPORT(__strlen_\func\()_nocheck_asm)
48#ifndef CONFIG_EVA 47#ifndef CONFIG_EVA
49 /* Set aliases */ 48 /* Set aliases */
50 .global __strlen_user_asm 49 .global __strlen_user_asm
51 .global __strlen_user_nocheck_asm
52 .set __strlen_user_asm, __strlen_kernel_asm 50 .set __strlen_user_asm, __strlen_kernel_asm
53 .set __strlen_user_nocheck_asm, __strlen_kernel_nocheck_asm
54#endif 51#endif
55 52
56__BUILD_STRLEN_ASM kernel 53__BUILD_STRLEN_ASM kernel
diff --git a/arch/mips/loongson/Kconfig b/arch/mips/loongson/Kconfig
index 1b91fc6a921b..156de85b82cd 100644
--- a/arch/mips/loongson/Kconfig
+++ b/arch/mips/loongson/Kconfig
@@ -86,6 +86,7 @@ config LOONGSON_MACH3X
86 select LOONGSON_MC146818 86 select LOONGSON_MC146818
87 select ZONE_DMA32 87 select ZONE_DMA32
88 select LEFI_FIRMWARE_INTERFACE 88 select LEFI_FIRMWARE_INTERFACE
89 select PHYS48_TO_HT40
89 help 90 help
90 Generic Loongson 3 family machines utilize the 3A/3B revision 91 Generic Loongson 3 family machines utilize the 3A/3B revision
91 of Loongson processor and RS780/SBX00 chipset. 92 of Loongson processor and RS780/SBX00 chipset.
@@ -107,6 +108,18 @@ config CS5536_MFGPT
107 108
108 If unsure, say Yes. 109 If unsure, say Yes.
109 110
111config RS780_HPET
112 bool "RS780/SBX00 HPET Timer"
113 depends on LOONGSON_MACH3X
114 select MIPS_EXTERNAL_TIMER
115 help
116 This option enables the hpet timer of AMD RS780/SBX00.
117
118 If you want to enable the Loongson3 CPUFreq Driver, Please enable
119 this option at first, otherwise, You will get wrong system time.
120
121 If unsure, say Yes.
122
110config LOONGSON_SUSPEND 123config LOONGSON_SUSPEND
111 bool 124 bool
112 default y 125 default y
@@ -131,6 +144,10 @@ config SWIOTLB
131 select NEED_SG_DMA_LENGTH 144 select NEED_SG_DMA_LENGTH
132 select NEED_DMA_MAP_STATE 145 select NEED_DMA_MAP_STATE
133 146
147config PHYS48_TO_HT40
148 bool
149 default y if CPU_LOONGSON3
150
134config LOONGSON_MC146818 151config LOONGSON_MC146818
135 bool 152 bool
136 default n 153 default n
diff --git a/arch/mips/loongson/common/cs5536/cs5536_pci.c b/arch/mips/loongson/common/cs5536/cs5536_pci.c
index 81bed9d18061..b739723205f8 100644
--- a/arch/mips/loongson/common/cs5536/cs5536_pci.c
+++ b/arch/mips/loongson/common/cs5536/cs5536_pci.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/types.h> 23#include <linux/types.h>
24#include <cs5536/cs5536_pci.h>
24#include <cs5536/cs5536_vsm.h> 25#include <cs5536/cs5536_vsm.h>
25 26
26enum { 27enum {
@@ -35,21 +36,21 @@ enum {
35}; 36};
36 37
37static const cs5536_pci_vsm_write vsm_conf_write[] = { 38static const cs5536_pci_vsm_write vsm_conf_write[] = {
38 [CS5536_ISA_FUNC] pci_isa_write_reg, 39 [CS5536_ISA_FUNC] = pci_isa_write_reg,
39 [reserved_func] NULL, 40 [reserved_func] = NULL,
40 [CS5536_IDE_FUNC] pci_ide_write_reg, 41 [CS5536_IDE_FUNC] = pci_ide_write_reg,
41 [CS5536_ACC_FUNC] pci_acc_write_reg, 42 [CS5536_ACC_FUNC] = pci_acc_write_reg,
42 [CS5536_OHCI_FUNC] pci_ohci_write_reg, 43 [CS5536_OHCI_FUNC] = pci_ohci_write_reg,
43 [CS5536_EHCI_FUNC] pci_ehci_write_reg, 44 [CS5536_EHCI_FUNC] = pci_ehci_write_reg,
44}; 45};
45 46
46static const cs5536_pci_vsm_read vsm_conf_read[] = { 47static const cs5536_pci_vsm_read vsm_conf_read[] = {
47 [CS5536_ISA_FUNC] pci_isa_read_reg, 48 [CS5536_ISA_FUNC] = pci_isa_read_reg,
48 [reserved_func] NULL, 49 [reserved_func] = NULL,
49 [CS5536_IDE_FUNC] pci_ide_read_reg, 50 [CS5536_IDE_FUNC] = pci_ide_read_reg,
50 [CS5536_ACC_FUNC] pci_acc_read_reg, 51 [CS5536_ACC_FUNC] = pci_acc_read_reg,
51 [CS5536_OHCI_FUNC] pci_ohci_read_reg, 52 [CS5536_OHCI_FUNC] = pci_ohci_read_reg,
52 [CS5536_EHCI_FUNC] pci_ehci_read_reg, 53 [CS5536_EHCI_FUNC] = pci_ehci_read_reg,
53}; 54};
54 55
55/* 56/*
diff --git a/arch/mips/loongson/common/dma-swiotlb.c b/arch/mips/loongson/common/dma-swiotlb.c
index c2be01f91575..2c6b989c1bc4 100644
--- a/arch/mips/loongson/common/dma-swiotlb.c
+++ b/arch/mips/loongson/common/dma-swiotlb.c
@@ -105,11 +105,25 @@ static int loongson_dma_set_mask(struct device *dev, u64 mask)
105 105
106dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) 106dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
107{ 107{
108 long nid;
109#ifdef CONFIG_PHYS48_TO_HT40
110 /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
111 * Loongson-3's 48bit address space and embed it into 40bit */
112 nid = (paddr >> 44) & 0x3;
113 paddr = ((nid << 44) ^ paddr) | (nid << 37);
114#endif
108 return paddr; 115 return paddr;
109} 116}
110 117
111phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) 118phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
112{ 119{
120 long nid;
121#ifdef CONFIG_PHYS48_TO_HT40
122 /* We extract 2bit node id (bit 44~47, only bit 44~45 used now) from
123 * Loongson-3's 48bit address space and embed it into 40bit */
124 nid = (daddr >> 37) & 0x3;
125 daddr = ((nid << 37) ^ daddr) | (nid << 44);
126#endif
113 return daddr; 127 return daddr;
114} 128}
115 129
diff --git a/arch/mips/loongson/common/early_printk.c b/arch/mips/loongson/common/early_printk.c
index ced461b39069..6ca632e529dc 100644
--- a/arch/mips/loongson/common/early_printk.c
+++ b/arch/mips/loongson/common/early_printk.c
@@ -30,7 +30,7 @@ void prom_putchar(char c)
30 int timeout; 30 int timeout;
31 unsigned char *uart_base; 31 unsigned char *uart_base;
32 32
33 uart_base = (unsigned char *)_loongson_uart_base; 33 uart_base = (unsigned char *)_loongson_uart_base[0];
34 timeout = 1024; 34 timeout = 1024;
35 35
36 while (((serial_in(uart_base, UART_LSR) & UART_LSR_THRE) == 0) && 36 while (((serial_in(uart_base, UART_LSR) & UART_LSR_THRE) == 0) &&
diff --git a/arch/mips/loongson/common/env.c b/arch/mips/loongson/common/env.c
index f15228550a22..045ea3d47c87 100644
--- a/arch/mips/loongson/common/env.c
+++ b/arch/mips/loongson/common/env.c
@@ -21,6 +21,7 @@
21#include <asm/bootinfo.h> 21#include <asm/bootinfo.h>
22#include <loongson.h> 22#include <loongson.h>
23#include <boot_param.h> 23#include <boot_param.h>
24#include <workarounds.h>
24 25
25u32 cpu_clock_freq; 26u32 cpu_clock_freq;
26EXPORT_SYMBOL(cpu_clock_freq); 27EXPORT_SYMBOL(cpu_clock_freq);
@@ -31,7 +32,6 @@ u64 loongson_chipcfg[MAX_PACKAGES] = {0xffffffffbfc00180};
31u64 loongson_freqctrl[MAX_PACKAGES]; 32u64 loongson_freqctrl[MAX_PACKAGES];
32 33
33unsigned long long smp_group[4]; 34unsigned long long smp_group[4];
34int cpuhotplug_workaround = 0;
35 35
36#define parse_even_earlier(res, option, p) \ 36#define parse_even_earlier(res, option, p) \
37do { \ 37do { \
@@ -67,6 +67,7 @@ void __init prom_init_env(void)
67#else 67#else
68 struct boot_params *boot_p; 68 struct boot_params *boot_p;
69 struct loongson_params *loongson_p; 69 struct loongson_params *loongson_p;
70 struct system_loongson *esys;
70 struct efi_cpuinfo_loongson *ecpu; 71 struct efi_cpuinfo_loongson *ecpu;
71 struct irq_source_routing_table *eirq_source; 72 struct irq_source_routing_table *eirq_source;
72 73
@@ -74,6 +75,8 @@ void __init prom_init_env(void)
74 boot_p = (struct boot_params *)fw_arg2; 75 boot_p = (struct boot_params *)fw_arg2;
75 loongson_p = &(boot_p->efi.smbios.lp); 76 loongson_p = &(boot_p->efi.smbios.lp);
76 77
78 esys = (struct system_loongson *)
79 ((u64)loongson_p + loongson_p->system_offset);
77 ecpu = (struct efi_cpuinfo_loongson *) 80 ecpu = (struct efi_cpuinfo_loongson *)
78 ((u64)loongson_p + loongson_p->cpu_offset); 81 ((u64)loongson_p + loongson_p->cpu_offset);
79 eirq_source = (struct irq_source_routing_table *) 82 eirq_source = (struct irq_source_routing_table *)
@@ -95,6 +98,7 @@ void __init prom_init_env(void)
95 loongson_chipcfg[2] = 0x900020001fe00180; 98 loongson_chipcfg[2] = 0x900020001fe00180;
96 loongson_chipcfg[3] = 0x900030001fe00180; 99 loongson_chipcfg[3] = 0x900030001fe00180;
97 loongson_sysconf.ht_control_base = 0x90000EFDFB000000; 100 loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
101 loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
98 } else if (ecpu->cputype == Loongson_3B) { 102 } else if (ecpu->cputype == Loongson_3B) {
99 loongson_sysconf.cores_per_node = 4; /* One chip has 2 nodes */ 103 loongson_sysconf.cores_per_node = 4; /* One chip has 2 nodes */
100 loongson_sysconf.cores_per_package = 8; 104 loongson_sysconf.cores_per_package = 8;
@@ -111,7 +115,7 @@ void __init prom_init_env(void)
111 loongson_freqctrl[2] = 0x900040001fe001d0; 115 loongson_freqctrl[2] = 0x900040001fe001d0;
112 loongson_freqctrl[3] = 0x900060001fe001d0; 116 loongson_freqctrl[3] = 0x900060001fe001d0;
113 loongson_sysconf.ht_control_base = 0x90001EFDFB000000; 117 loongson_sysconf.ht_control_base = 0x90001EFDFB000000;
114 cpuhotplug_workaround = 1; 118 loongson_sysconf.workarounds = WORKAROUND_CPUHOTPLUG;
115 } else { 119 } else {
116 loongson_sysconf.cores_per_node = 1; 120 loongson_sysconf.cores_per_node = 1;
117 loongson_sysconf.cores_per_package = 1; 121 loongson_sysconf.cores_per_package = 1;
@@ -119,6 +123,8 @@ void __init prom_init_env(void)
119 } 123 }
120 124
121 loongson_sysconf.nr_cpus = ecpu->nr_cpus; 125 loongson_sysconf.nr_cpus = ecpu->nr_cpus;
126 loongson_sysconf.boot_cpu_id = ecpu->cpu_startup_core_id;
127 loongson_sysconf.reserved_cpus_mask = ecpu->reserved_cores_mask;
122 if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0) 128 if (ecpu->nr_cpus > NR_CPUS || ecpu->nr_cpus == 0)
123 loongson_sysconf.nr_cpus = NR_CPUS; 129 loongson_sysconf.nr_cpus = NR_CPUS;
124 loongson_sysconf.nr_nodes = (loongson_sysconf.nr_cpus + 130 loongson_sysconf.nr_nodes = (loongson_sysconf.nr_cpus +
@@ -141,6 +147,24 @@ void __init prom_init_env(void)
141 pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n", 147 pr_debug("Shutdown Addr: %llx, Restart Addr: %llx, VBIOS Addr: %llx\n",
142 loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr, 148 loongson_sysconf.poweroff_addr, loongson_sysconf.restart_addr,
143 loongson_sysconf.vgabios_addr); 149 loongson_sysconf.vgabios_addr);
150
151 memset(loongson_sysconf.ecname, 0, 32);
152 if (esys->has_ec)
153 memcpy(loongson_sysconf.ecname, esys->ec_name, 32);
154 loongson_sysconf.workarounds |= esys->workarounds;
155
156 loongson_sysconf.nr_uarts = esys->nr_uarts;
157 if (esys->nr_uarts < 1 || esys->nr_uarts > MAX_UARTS)
158 loongson_sysconf.nr_uarts = 1;
159 memcpy(loongson_sysconf.uarts, esys->uarts,
160 sizeof(struct uart_device) * loongson_sysconf.nr_uarts);
161
162 loongson_sysconf.nr_sensors = esys->nr_sensors;
163 if (loongson_sysconf.nr_sensors > MAX_SENSORS)
164 loongson_sysconf.nr_sensors = 0;
165 if (loongson_sysconf.nr_sensors)
166 memcpy(loongson_sysconf.sensors, esys->sensors,
167 sizeof(struct sensor_device) * loongson_sysconf.nr_sensors);
144#endif 168#endif
145 if (cpu_clock_freq == 0) { 169 if (cpu_clock_freq == 0) {
146 processor_id = (&current_cpu_data)->processor_id; 170 processor_id = (&current_cpu_data)->processor_id;
diff --git a/arch/mips/loongson/common/gpio.c b/arch/mips/loongson/common/gpio.c
index 21869908aaa4..29dbaa253061 100644
--- a/arch/mips/loongson/common/gpio.c
+++ b/arch/mips/loongson/common/gpio.c
@@ -37,7 +37,7 @@ int gpio_get_value(unsigned gpio)
37 val = LOONGSON_GPIODATA; 37 val = LOONGSON_GPIODATA;
38 spin_unlock(&gpio_lock); 38 spin_unlock(&gpio_lock);
39 39
40 return ((val & mask) != 0); 40 return (val & mask) != 0;
41} 41}
42EXPORT_SYMBOL(gpio_get_value); 42EXPORT_SYMBOL(gpio_get_value);
43 43
diff --git a/arch/mips/loongson/common/init.c b/arch/mips/loongson/common/init.c
index f6af3aba4c86..9b987fe98b5b 100644
--- a/arch/mips/loongson/common/init.c
+++ b/arch/mips/loongson/common/init.c
@@ -9,6 +9,7 @@
9 */ 9 */
10 10
11#include <linux/bootmem.h> 11#include <linux/bootmem.h>
12#include <asm/bootinfo.h>
12#include <asm/smp-ops.h> 13#include <asm/smp-ops.h>
13 14
14#include <loongson.h> 15#include <loongson.h>
diff --git a/arch/mips/loongson/common/machtype.c b/arch/mips/loongson/common/machtype.c
index 1a4797984b8d..f2807bc662a3 100644
--- a/arch/mips/loongson/common/machtype.c
+++ b/arch/mips/loongson/common/machtype.c
@@ -19,19 +19,16 @@
19#define MACHTYPE_LEN 50 19#define MACHTYPE_LEN 50
20 20
21static const char *system_types[] = { 21static const char *system_types[] = {
22 [MACH_LOONGSON_UNKNOWN] "unknown loongson machine", 22 [MACH_LOONGSON_UNKNOWN] = "unknown loongson machine",
23 [MACH_LEMOTE_FL2E] "lemote-fuloong-2e-box", 23 [MACH_LEMOTE_FL2E] = "lemote-fuloong-2e-box",
24 [MACH_LEMOTE_FL2F] "lemote-fuloong-2f-box", 24 [MACH_LEMOTE_FL2F] = "lemote-fuloong-2f-box",
25 [MACH_LEMOTE_ML2F7] "lemote-mengloong-2f-7inches", 25 [MACH_LEMOTE_ML2F7] = "lemote-mengloong-2f-7inches",
26 [MACH_LEMOTE_YL2F89] "lemote-yeeloong-2f-8.9inches", 26 [MACH_LEMOTE_YL2F89] = "lemote-yeeloong-2f-8.9inches",
27 [MACH_DEXXON_GDIUM2F10] "dexxon-gdium-2f", 27 [MACH_DEXXON_GDIUM2F10] = "dexxon-gdium-2f",
28 [MACH_LEMOTE_NAS] "lemote-nas-2f", 28 [MACH_LEMOTE_NAS] = "lemote-nas-2f",
29 [MACH_LEMOTE_LL2F] "lemote-lynloong-2f", 29 [MACH_LEMOTE_LL2F] = "lemote-lynloong-2f",
30 [MACH_LEMOTE_A1004] "lemote-3a-notebook-a1004", 30 [MACH_LOONGSON_GENERIC] = "generic-loongson-machine",
31 [MACH_LEMOTE_A1101] "lemote-3a-itx-a1101", 31 [MACH_LOONGSON_END] = NULL,
32 [MACH_LEMOTE_A1201] "lemote-2gq-notebook-a1201",
33 [MACH_LEMOTE_A1205] "lemote-2gq-aio-a1205",
34 [MACH_LOONGSON_END] NULL,
35}; 32};
36 33
37const char *get_system_type(void) 34const char *get_system_type(void)
diff --git a/arch/mips/loongson/common/rtc.c b/arch/mips/loongson/common/rtc.c
index a90d87c01555..b5709af09f7f 100644
--- a/arch/mips/loongson/common/rtc.c
+++ b/arch/mips/loongson/common/rtc.c
@@ -14,7 +14,7 @@
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/mc146818rtc.h> 15#include <linux/mc146818rtc.h>
16 16
17struct resource loongson_rtc_resources[] = { 17static struct resource loongson_rtc_resources[] = {
18 { 18 {
19 .start = RTC_PORT(0), 19 .start = RTC_PORT(0),
20 .end = RTC_PORT(1), 20 .end = RTC_PORT(1),
diff --git a/arch/mips/loongson/common/serial.c b/arch/mips/loongson/common/serial.c
index bd2b7095b6dc..c23fa1373729 100644
--- a/arch/mips/loongson/common/serial.c
+++ b/arch/mips/loongson/common/serial.c
@@ -38,20 +38,17 @@
38 .regshift = 0, \ 38 .regshift = 0, \
39} 39}
40 40
41static struct plat_serial8250_port uart8250_data[][2] = { 41static struct plat_serial8250_port uart8250_data[][MAX_UARTS + 1] = {
42 [MACH_LOONGSON_UNKNOWN] {}, 42 [MACH_LOONGSON_UNKNOWN] = {},
43 [MACH_LEMOTE_FL2E] {PORT(4, 1843200), {} }, 43 [MACH_LEMOTE_FL2E] = {PORT(4, 1843200), {} },
44 [MACH_LEMOTE_FL2F] {PORT(3, 1843200), {} }, 44 [MACH_LEMOTE_FL2F] = {PORT(3, 1843200), {} },
45 [MACH_LEMOTE_ML2F7] {PORT_M(3, 3686400), {} }, 45 [MACH_LEMOTE_ML2F7] = {PORT_M(3, 3686400), {} },
46 [MACH_LEMOTE_YL2F89] {PORT_M(3, 3686400), {} }, 46 [MACH_LEMOTE_YL2F89] = {PORT_M(3, 3686400), {} },
47 [MACH_DEXXON_GDIUM2F10] {PORT_M(3, 3686400), {} }, 47 [MACH_DEXXON_GDIUM2F10] = {PORT_M(3, 3686400), {} },
48 [MACH_LEMOTE_NAS] {PORT_M(3, 3686400), {} }, 48 [MACH_LEMOTE_NAS] = {PORT_M(3, 3686400), {} },
49 [MACH_LEMOTE_LL2F] {PORT(3, 1843200), {} }, 49 [MACH_LEMOTE_LL2F] = {PORT(3, 1843200), {} },
50 [MACH_LEMOTE_A1004] {PORT_M(2, 33177600), {} }, 50 [MACH_LOONGSON_GENERIC] = {PORT_M(2, 25000000), {} },
51 [MACH_LEMOTE_A1101] {PORT_M(2, 25000000), {} }, 51 [MACH_LOONGSON_END] = {},
52 [MACH_LEMOTE_A1201] {PORT_M(2, 25000000), {} },
53 [MACH_LEMOTE_A1205] {PORT_M(2, 25000000), {} },
54 [MACH_LOONGSON_END] {},
55}; 52};
56 53
57static struct platform_device uart8250_device = { 54static struct platform_device uart8250_device = {
@@ -61,17 +58,52 @@ static struct platform_device uart8250_device = {
61 58
62static int __init serial_init(void) 59static int __init serial_init(void)
63{ 60{
61 int i;
64 unsigned char iotype; 62 unsigned char iotype;
65 63
66 iotype = uart8250_data[mips_machtype][0].iotype; 64 iotype = uart8250_data[mips_machtype][0].iotype;
67 65
68 if (UPIO_MEM == iotype) 66 if (UPIO_MEM == iotype) {
67 uart8250_data[mips_machtype][0].mapbase =
68 loongson_uart_base[0];
69 uart8250_data[mips_machtype][0].membase = 69 uart8250_data[mips_machtype][0].membase =
70 (void __iomem *)_loongson_uart_base; 70 (void __iomem *)_loongson_uart_base[0];
71 }
71 else if (UPIO_PORT == iotype) 72 else if (UPIO_PORT == iotype)
72 uart8250_data[mips_machtype][0].iobase = 73 uart8250_data[mips_machtype][0].iobase =
73 loongson_uart_base - LOONGSON_PCIIO_BASE; 74 loongson_uart_base[0] - LOONGSON_PCIIO_BASE;
74 75
76 if (loongson_sysconf.uarts[0].uartclk)
77 uart8250_data[mips_machtype][0].uartclk =
78 loongson_sysconf.uarts[0].uartclk;
79
80 for (i = 1; i < loongson_sysconf.nr_uarts; i++) {
81 iotype = loongson_sysconf.uarts[i].iotype;
82 uart8250_data[mips_machtype][i].iotype = iotype;
83 loongson_uart_base[i] = loongson_sysconf.uarts[i].uart_base;
84
85 if (UPIO_MEM == iotype) {
86 uart8250_data[mips_machtype][i].irq =
87 MIPS_CPU_IRQ_BASE + loongson_sysconf.uarts[i].int_offset;
88 uart8250_data[mips_machtype][i].mapbase =
89 loongson_uart_base[i];
90 uart8250_data[mips_machtype][i].membase =
91 ioremap_nocache(loongson_uart_base[i], 8);
92 } else if (UPIO_PORT == iotype) {
93 uart8250_data[mips_machtype][i].irq =
94 loongson_sysconf.uarts[i].int_offset;
95 uart8250_data[mips_machtype][i].iobase =
96 loongson_uart_base[i] - LOONGSON_PCIIO_BASE;
97 }
98
99 uart8250_data[mips_machtype][i].uartclk =
100 loongson_sysconf.uarts[i].uartclk;
101 uart8250_data[mips_machtype][i].flags =
102 UPF_BOOT_AUTOCONF | UPF_SKIP_TEST;
103 }
104
105 memset(&uart8250_data[mips_machtype][loongson_sysconf.nr_uarts],
106 0, sizeof(struct plat_serial8250_port));
75 uart8250_device.dev.platform_data = uart8250_data[mips_machtype]; 107 uart8250_device.dev.platform_data = uart8250_data[mips_machtype];
76 108
77 return platform_device_register(&uart8250_device); 109 return platform_device_register(&uart8250_device);
diff --git a/arch/mips/loongson/common/setup.c b/arch/mips/loongson/common/setup.c
index bb4ac922e47a..d477dd6bb326 100644
--- a/arch/mips/loongson/common/setup.c
+++ b/arch/mips/loongson/common/setup.c
@@ -10,6 +10,7 @@
10#include <linux/module.h> 10#include <linux/module.h>
11 11
12#include <asm/wbflush.h> 12#include <asm/wbflush.h>
13#include <asm/bootinfo.h>
13 14
14#include <loongson.h> 15#include <loongson.h>
15 16
diff --git a/arch/mips/loongson/common/time.c b/arch/mips/loongson/common/time.c
index 262a1f65b05e..e1a5382ad47e 100644
--- a/arch/mips/loongson/common/time.c
+++ b/arch/mips/loongson/common/time.c
@@ -12,6 +12,7 @@
12 */ 12 */
13#include <asm/mc146818-time.h> 13#include <asm/mc146818-time.h>
14#include <asm/time.h> 14#include <asm/time.h>
15#include <asm/hpet.h>
15 16
16#include <loongson.h> 17#include <loongson.h>
17#include <cs5536/cs5536_mfgpt.h> 18#include <cs5536/cs5536_mfgpt.h>
@@ -21,7 +22,11 @@ void __init plat_time_init(void)
21 /* setup mips r4k timer */ 22 /* setup mips r4k timer */
22 mips_hpt_frequency = cpu_clock_freq / 2; 23 mips_hpt_frequency = cpu_clock_freq / 2;
23 24
25#ifdef CONFIG_RS780_HPET
26 setup_hpet_timer();
27#else
24 setup_mfgpt0_timer(); 28 setup_mfgpt0_timer();
29#endif
25} 30}
26 31
27void read_persistent_clock(struct timespec *ts) 32void read_persistent_clock(struct timespec *ts)
diff --git a/arch/mips/loongson/common/uart_base.c b/arch/mips/loongson/common/uart_base.c
index 1e1eeea73fde..9de559d58e1f 100644
--- a/arch/mips/loongson/common/uart_base.c
+++ b/arch/mips/loongson/common/uart_base.c
@@ -13,22 +13,27 @@
13 13
14#include <loongson.h> 14#include <loongson.h>
15 15
16/* ioremapped */
17unsigned long _loongson_uart_base;
18EXPORT_SYMBOL(_loongson_uart_base);
19/* raw */ 16/* raw */
20unsigned long loongson_uart_base; 17unsigned long loongson_uart_base[MAX_UARTS] = {};
18/* ioremapped */
19unsigned long _loongson_uart_base[MAX_UARTS] = {};
20
21EXPORT_SYMBOL(loongson_uart_base); 21EXPORT_SYMBOL(loongson_uart_base);
22EXPORT_SYMBOL(_loongson_uart_base);
22 23
23void prom_init_loongson_uart_base(void) 24void prom_init_loongson_uart_base(void)
24{ 25{
25 switch (mips_machtype) { 26 switch (mips_machtype) {
27 case MACH_LOONGSON_GENERIC:
28 /* The CPU provided serial port (CPU) */
29 loongson_uart_base[0] = LOONGSON_REG_BASE + 0x1e0;
30 break;
26 case MACH_LEMOTE_FL2E: 31 case MACH_LEMOTE_FL2E:
27 loongson_uart_base = LOONGSON_PCIIO_BASE + 0x3f8; 32 loongson_uart_base[0] = LOONGSON_PCIIO_BASE + 0x3f8;
28 break; 33 break;
29 case MACH_LEMOTE_FL2F: 34 case MACH_LEMOTE_FL2F:
30 case MACH_LEMOTE_LL2F: 35 case MACH_LEMOTE_LL2F:
31 loongson_uart_base = LOONGSON_PCIIO_BASE + 0x2f8; 36 loongson_uart_base[0] = LOONGSON_PCIIO_BASE + 0x2f8;
32 break; 37 break;
33 case MACH_LEMOTE_ML2F7: 38 case MACH_LEMOTE_ML2F7:
34 case MACH_LEMOTE_YL2F89: 39 case MACH_LEMOTE_YL2F89:
@@ -36,17 +41,10 @@ void prom_init_loongson_uart_base(void)
36 case MACH_LEMOTE_NAS: 41 case MACH_LEMOTE_NAS:
37 default: 42 default:
38 /* The CPU provided serial port (LPC) */ 43 /* The CPU provided serial port (LPC) */
39 loongson_uart_base = LOONGSON_LIO1_BASE + 0x3f8; 44 loongson_uart_base[0] = LOONGSON_LIO1_BASE + 0x3f8;
40 break;
41 case MACH_LEMOTE_A1004:
42 case MACH_LEMOTE_A1101:
43 case MACH_LEMOTE_A1201:
44 case MACH_LEMOTE_A1205:
45 /* The CPU provided serial port (CPU) */
46 loongson_uart_base = LOONGSON_REG_BASE + 0x1e0;
47 break; 45 break;
48 } 46 }
49 47
50 _loongson_uart_base = 48 _loongson_uart_base[0] =
51 (unsigned long)ioremap_nocache(loongson_uart_base, 8); 49 (unsigned long)ioremap_nocache(loongson_uart_base[0], 8);
52} 50}
diff --git a/arch/mips/loongson/lemote-2f/irq.c b/arch/mips/loongson/lemote-2f/irq.c
index 6f8682e44483..cab5f43e0e29 100644
--- a/arch/mips/loongson/lemote-2f/irq.c
+++ b/arch/mips/loongson/lemote-2f/irq.c
@@ -93,13 +93,13 @@ static irqreturn_t ip6_action(int cpl, void *dev_id)
93 return IRQ_HANDLED; 93 return IRQ_HANDLED;
94} 94}
95 95
96struct irqaction ip6_irqaction = { 96static struct irqaction ip6_irqaction = {
97 .handler = ip6_action, 97 .handler = ip6_action,
98 .name = "cascade", 98 .name = "cascade",
99 .flags = IRQF_SHARED | IRQF_NO_THREAD, 99 .flags = IRQF_SHARED | IRQF_NO_THREAD,
100}; 100};
101 101
102struct irqaction cascade_irqaction = { 102static struct irqaction cascade_irqaction = {
103 .handler = no_action, 103 .handler = no_action,
104 .name = "cascade", 104 .name = "cascade",
105 .flags = IRQF_NO_THREAD, 105 .flags = IRQF_NO_THREAD,
diff --git a/arch/mips/loongson/lemote-2f/reset.c b/arch/mips/loongson/lemote-2f/reset.c
index 79ac694fe744..a26ca7fcd7e0 100644
--- a/arch/mips/loongson/lemote-2f/reset.c
+++ b/arch/mips/loongson/lemote-2f/reset.c
@@ -76,7 +76,7 @@ static void fl2f_shutdown(void)
76 76
77/* reset support for yeeloong2f and mengloong2f notebook */ 77/* reset support for yeeloong2f and mengloong2f notebook */
78 78
79void ml2f_reboot(void) 79static void ml2f_reboot(void)
80{ 80{
81 reset_cpu(); 81 reset_cpu();
82 82
diff --git a/arch/mips/loongson/loongson-3/Makefile b/arch/mips/loongson/loongson-3/Makefile
index b4df775b9f30..622fead5ebc9 100644
--- a/arch/mips/loongson/loongson-3/Makefile
+++ b/arch/mips/loongson/loongson-3/Makefile
@@ -1,8 +1,10 @@
1# 1#
2# Makefile for Loongson-3 family machines 2# Makefile for Loongson-3 family machines
3# 3#
4obj-y += irq.o cop2-ex.o 4obj-y += irq.o cop2-ex.o platform.o
5 5
6obj-$(CONFIG_SMP) += smp.o 6obj-$(CONFIG_SMP) += smp.o
7 7
8obj-$(CONFIG_NUMA) += numa.o 8obj-$(CONFIG_NUMA) += numa.o
9
10obj-$(CONFIG_RS780_HPET) += hpet.o
diff --git a/arch/mips/loongson/loongson-3/hpet.c b/arch/mips/loongson/loongson-3/hpet.c
new file mode 100644
index 000000000000..e898d68668a9
--- /dev/null
+++ b/arch/mips/loongson/loongson-3/hpet.c
@@ -0,0 +1,257 @@
1#include <linux/init.h>
2#include <linux/pci.h>
3#include <linux/percpu.h>
4#include <linux/delay.h>
5#include <linux/spinlock.h>
6#include <linux/interrupt.h>
7
8#include <asm/hpet.h>
9#include <asm/time.h>
10
11#define SMBUS_CFG_BASE (loongson_sysconf.ht_control_base + 0x0300a000)
12#define SMBUS_PCI_REG40 0x40
13#define SMBUS_PCI_REG64 0x64
14#define SMBUS_PCI_REGB4 0xb4
15
16static DEFINE_SPINLOCK(hpet_lock);
17DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
18
19static unsigned int smbus_read(int offset)
20{
21 return *(volatile unsigned int *)(SMBUS_CFG_BASE + offset);
22}
23
24static void smbus_write(int offset, int data)
25{
26 *(volatile unsigned int *)(SMBUS_CFG_BASE + offset) = data;
27}
28
29static void smbus_enable(int offset, int bit)
30{
31 unsigned int cfg = smbus_read(offset);
32
33 cfg |= bit;
34 smbus_write(offset, cfg);
35}
36
37static int hpet_read(int offset)
38{
39 return *(volatile unsigned int *)(HPET_MMIO_ADDR + offset);
40}
41
42static void hpet_write(int offset, int data)
43{
44 *(volatile unsigned int *)(HPET_MMIO_ADDR + offset) = data;
45}
46
47static void hpet_start_counter(void)
48{
49 unsigned int cfg = hpet_read(HPET_CFG);
50
51 cfg |= HPET_CFG_ENABLE;
52 hpet_write(HPET_CFG, cfg);
53}
54
55static void hpet_stop_counter(void)
56{
57 unsigned int cfg = hpet_read(HPET_CFG);
58
59 cfg &= ~HPET_CFG_ENABLE;
60 hpet_write(HPET_CFG, cfg);
61}
62
63static void hpet_reset_counter(void)
64{
65 hpet_write(HPET_COUNTER, 0);
66 hpet_write(HPET_COUNTER + 4, 0);
67}
68
69static void hpet_restart_counter(void)
70{
71 hpet_stop_counter();
72 hpet_reset_counter();
73 hpet_start_counter();
74}
75
76static void hpet_enable_legacy_int(void)
77{
78 /* Do nothing on Loongson-3 */
79}
80
81static void hpet_set_mode(enum clock_event_mode mode,
82 struct clock_event_device *evt)
83{
84 int cfg = 0;
85
86 spin_lock(&hpet_lock);
87 switch (mode) {
88 case CLOCK_EVT_MODE_PERIODIC:
89 pr_info("set clock event to periodic mode!\n");
90 /* stop counter */
91 hpet_stop_counter();
92
93 /* enables the timer0 to generate a periodic interrupt */
94 cfg = hpet_read(HPET_T0_CFG);
95 cfg &= ~HPET_TN_LEVEL;
96 cfg |= HPET_TN_ENABLE | HPET_TN_PERIODIC |
97 HPET_TN_SETVAL | HPET_TN_32BIT;
98 hpet_write(HPET_T0_CFG, cfg);
99
100 /* set the comparator */
101 hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
102 udelay(1);
103 hpet_write(HPET_T0_CMP, HPET_COMPARE_VAL);
104
105 /* start counter */
106 hpet_start_counter();
107 break;
108 case CLOCK_EVT_MODE_SHUTDOWN:
109 case CLOCK_EVT_MODE_UNUSED:
110 cfg = hpet_read(HPET_T0_CFG);
111 cfg &= ~HPET_TN_ENABLE;
112 hpet_write(HPET_T0_CFG, cfg);
113 break;
114 case CLOCK_EVT_MODE_ONESHOT:
115 pr_info("set clock event to one shot mode!\n");
116 cfg = hpet_read(HPET_T0_CFG);
117 /* set timer0 type
118 * 1 : periodic interrupt
119 * 0 : non-periodic(oneshot) interrupt
120 */
121 cfg &= ~HPET_TN_PERIODIC;
122 cfg |= HPET_TN_ENABLE | HPET_TN_32BIT;
123 hpet_write(HPET_T0_CFG, cfg);
124 break;
125 case CLOCK_EVT_MODE_RESUME:
126 hpet_enable_legacy_int();
127 break;
128 }
129 spin_unlock(&hpet_lock);
130}
131
132static int hpet_next_event(unsigned long delta,
133 struct clock_event_device *evt)
134{
135 unsigned int cnt;
136 int res;
137
138 cnt = hpet_read(HPET_COUNTER);
139 cnt += delta;
140 hpet_write(HPET_T0_CMP, cnt);
141
142 res = ((int)(hpet_read(HPET_COUNTER) - cnt) > 0) ? -ETIME : 0;
143 return res;
144}
145
146static irqreturn_t hpet_irq_handler(int irq, void *data)
147{
148 int is_irq;
149 struct clock_event_device *cd;
150 unsigned int cpu = smp_processor_id();
151
152 is_irq = hpet_read(HPET_STATUS);
153 if (is_irq & HPET_T0_IRS) {
154 /* clear the TIMER0 irq status register */
155 hpet_write(HPET_STATUS, HPET_T0_IRS);
156 cd = &per_cpu(hpet_clockevent_device, cpu);
157 cd->event_handler(cd);
158 return IRQ_HANDLED;
159 }
160 return IRQ_NONE;
161}
162
163static struct irqaction hpet_irq = {
164 .handler = hpet_irq_handler,
165 .flags = IRQF_DISABLED | IRQF_NOBALANCING | IRQF_TIMER,
166 .name = "hpet",
167};
168
169/*
170 * hpet address assignation and irq setting should be done in bios.
171 * but pmon don't do this, we just setup here directly.
172 * The operation under is normal. unfortunately, hpet_setup process
173 * is before pci initialize.
174 *
175 * {
176 * struct pci_dev *pdev;
177 *
178 * pdev = pci_get_device(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_SBX00_SMBUS, NULL);
179 * pci_write_config_word(pdev, SMBUS_PCI_REGB4, HPET_ADDR);
180 *
181 * ...
182 * }
183 */
184static void hpet_setup(void)
185{
186 /* set hpet base address */
187 smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
188
189 /* enable decodeing of access to HPET MMIO*/
190 smbus_enable(SMBUS_PCI_REG40, (1 << 28));
191
192 /* HPET irq enable */
193 smbus_enable(SMBUS_PCI_REG64, (1 << 10));
194
195 hpet_enable_legacy_int();
196}
197
198void __init setup_hpet_timer(void)
199{
200 unsigned int cpu = smp_processor_id();
201 struct clock_event_device *cd;
202
203 hpet_setup();
204
205 cd = &per_cpu(hpet_clockevent_device, cpu);
206 cd->name = "hpet";
207 cd->rating = 320;
208 cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
209 cd->set_mode = hpet_set_mode;
210 cd->set_next_event = hpet_next_event;
211 cd->irq = HPET_T0_IRQ;
212 cd->cpumask = cpumask_of(cpu);
213 clockevent_set_clock(cd, HPET_FREQ);
214 cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
215 cd->min_delta_ns = 5000;
216
217 clockevents_register_device(cd);
218 setup_irq(HPET_T0_IRQ, &hpet_irq);
219 pr_info("hpet clock event device register\n");
220}
221
222static cycle_t hpet_read_counter(struct clocksource *cs)
223{
224 return (cycle_t)hpet_read(HPET_COUNTER);
225}
226
227static void hpet_suspend(struct clocksource *cs)
228{
229}
230
231static void hpet_resume(struct clocksource *cs)
232{
233 hpet_setup();
234 hpet_restart_counter();
235}
236
237static struct clocksource csrc_hpet = {
238 .name = "hpet",
239 /* mips clocksource rating is less than 300, so hpet is better. */
240 .rating = 300,
241 .read = hpet_read_counter,
242 .mask = CLOCKSOURCE_MASK(32),
243 /* oneshot mode work normal with this flag */
244 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
245 .suspend = hpet_suspend,
246 .resume = hpet_resume,
247 .mult = 0,
248 .shift = 10,
249};
250
251int __init init_hpet_clocksource(void)
252{
253 csrc_hpet.mult = clocksource_hz2mult(HPET_FREQ, csrc_hpet.shift);
254 return clocksource_register_hz(&csrc_hpet, HPET_FREQ);
255}
256
257arch_initcall(init_hpet_clocksource);
diff --git a/arch/mips/loongson/loongson-3/irq.c b/arch/mips/loongson/loongson-3/irq.c
index ca1c62af5188..21221edda7a9 100644
--- a/arch/mips/loongson/loongson-3/irq.c
+++ b/arch/mips/loongson/loongson-3/irq.c
@@ -9,7 +9,7 @@
9 9
10#include "smp.h" 10#include "smp.h"
11 11
12unsigned int ht_irq[] = {1, 3, 4, 5, 6, 7, 8, 12, 14, 15}; 12unsigned int ht_irq[] = {0, 1, 3, 4, 5, 6, 7, 8, 12, 14, 15};
13 13
14static void ht_irqdispatch(void) 14static void ht_irqdispatch(void)
15{ 15{
@@ -55,8 +55,8 @@ static inline void mask_loongson_irq(struct irq_data *d)
55 /* Workaround: UART IRQ may deliver to any core */ 55 /* Workaround: UART IRQ may deliver to any core */
56 if (d->irq == LOONGSON_UART_IRQ) { 56 if (d->irq == LOONGSON_UART_IRQ) {
57 int cpu = smp_processor_id(); 57 int cpu = smp_processor_id();
58 int node_id = cpu / loongson_sysconf.cores_per_node; 58 int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
59 int core_id = cpu % loongson_sysconf.cores_per_node; 59 int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
60 u64 intenclr_addr = smp_group[node_id] | 60 u64 intenclr_addr = smp_group[node_id] |
61 (u64)(&LOONGSON_INT_ROUTER_INTENCLR); 61 (u64)(&LOONGSON_INT_ROUTER_INTENCLR);
62 u64 introuter_lpc_addr = smp_group[node_id] | 62 u64 introuter_lpc_addr = smp_group[node_id] |
@@ -72,8 +72,8 @@ static inline void unmask_loongson_irq(struct irq_data *d)
72 /* Workaround: UART IRQ may deliver to any core */ 72 /* Workaround: UART IRQ may deliver to any core */
73 if (d->irq == LOONGSON_UART_IRQ) { 73 if (d->irq == LOONGSON_UART_IRQ) {
74 int cpu = smp_processor_id(); 74 int cpu = smp_processor_id();
75 int node_id = cpu / loongson_sysconf.cores_per_node; 75 int node_id = cpu_logical_map(cpu) / loongson_sysconf.cores_per_node;
76 int core_id = cpu % loongson_sysconf.cores_per_node; 76 int core_id = cpu_logical_map(cpu) % loongson_sysconf.cores_per_node;
77 u64 intenset_addr = smp_group[node_id] | 77 u64 intenset_addr = smp_group[node_id] |
78 (u64)(&LOONGSON_INT_ROUTER_INTENSET); 78 (u64)(&LOONGSON_INT_ROUTER_INTENSET);
79 u64 introuter_lpc_addr = smp_group[node_id] | 79 u64 introuter_lpc_addr = smp_group[node_id] |
@@ -102,10 +102,12 @@ void irq_router_init(void)
102 int i; 102 int i;
103 103
104 /* route LPC int to cpu core0 int 0 */ 104 /* route LPC int to cpu core0 int 0 */
105 LOONGSON_INT_ROUTER_LPC = LOONGSON_INT_CORE0_INT0; 105 LOONGSON_INT_ROUTER_LPC =
106 LOONGSON_INT_COREx_INTy(loongson_sysconf.boot_cpu_id, 0);
106 /* route HT1 int0 ~ int7 to cpu core0 INT1*/ 107 /* route HT1 int0 ~ int7 to cpu core0 INT1*/
107 for (i = 0; i < 8; i++) 108 for (i = 0; i < 8; i++)
108 LOONGSON_INT_ROUTER_HT1(i) = LOONGSON_INT_CORE0_INT1; 109 LOONGSON_INT_ROUTER_HT1(i) =
110 LOONGSON_INT_COREx_INTy(loongson_sysconf.boot_cpu_id, 1);
109 /* enable HT1 interrupt */ 111 /* enable HT1 interrupt */
110 LOONGSON_HT1_INTN_EN(0) = 0xffffffff; 112 LOONGSON_HT1_INTN_EN(0) = 0xffffffff;
111 /* enable router interrupt intenset */ 113 /* enable router interrupt intenset */
diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c
index 42323bcc5d28..6cae0e75de27 100644
--- a/arch/mips/loongson/loongson-3/numa.c
+++ b/arch/mips/loongson/loongson-3/numa.c
@@ -224,7 +224,7 @@ static void __init node_mem_init(unsigned int node)
224 224
225static __init void prom_meminit(void) 225static __init void prom_meminit(void)
226{ 226{
227 unsigned int node, cpu; 227 unsigned int node, cpu, active_cpu = 0;
228 228
229 cpu_node_probe(); 229 cpu_node_probe();
230 init_topology_matrix(); 230 init_topology_matrix();
@@ -240,8 +240,14 @@ static __init void prom_meminit(void)
240 node = cpu / loongson_sysconf.cores_per_node; 240 node = cpu / loongson_sysconf.cores_per_node;
241 if (node >= num_online_nodes()) 241 if (node >= num_online_nodes())
242 node = 0; 242 node = 0;
243 pr_info("NUMA: set cpumask cpu %d on node %d\n", cpu, node); 243
244 cpu_set(cpu, __node_data[(node)]->cpumask); 244 if (loongson_sysconf.reserved_cpus_mask & (1<<cpu))
245 continue;
246
247 cpu_set(active_cpu, __node_data[(node)]->cpumask);
248 pr_info("NUMA: set cpumask cpu %d on node %d\n", active_cpu, node);
249
250 active_cpu++;
245 } 251 }
246} 252}
247 253
diff --git a/arch/mips/loongson/loongson-3/platform.c b/arch/mips/loongson/loongson-3/platform.c
new file mode 100644
index 000000000000..25a97cc0ee33
--- /dev/null
+++ b/arch/mips/loongson/loongson-3/platform.c
@@ -0,0 +1,43 @@
1/*
2 * Copyright (C) 2009 Lemote Inc.
3 * Author: Wu Zhangjin, wuzhangjin@gmail.com
4 * Xiang Yu, xiangy@lemote.com
5 * Chen Huacai, chenhc@lemote.com
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the
9 * Free Software Foundation; either version 2 of the License, or (at your
10 * option) any later version.
11 */
12
13#include <linux/err.h>
14#include <linux/slab.h>
15#include <linux/platform_device.h>
16#include <asm/bootinfo.h>
17#include <boot_param.h>
18#include <loongson_hwmon.h>
19#include <workarounds.h>
20
21static int __init loongson3_platform_init(void)
22{
23 int i;
24 struct platform_device *pdev;
25
26 if (loongson_sysconf.ecname[0] != '\0')
27 platform_device_register_simple(loongson_sysconf.ecname, -1, NULL, 0);
28
29 for (i = 0; i < loongson_sysconf.nr_sensors; i++) {
30 if (loongson_sysconf.sensors[i].type > SENSOR_FAN)
31 continue;
32
33 pdev = kzalloc(sizeof(struct platform_device), GFP_KERNEL);
34 pdev->name = loongson_sysconf.sensors[i].name;
35 pdev->id = loongson_sysconf.sensors[i].id;
36 pdev->dev.platform_data = &loongson_sysconf.sensors[i];
37 platform_device_register(pdev);
38 }
39
40 return 0;
41}
42
43arch_initcall(loongson3_platform_init);
diff --git a/arch/mips/loongson/loongson-3/smp.c b/arch/mips/loongson/loongson-3/smp.c
index d8c63af6c7cc..e2eb688b5434 100644
--- a/arch/mips/loongson/loongson-3/smp.c
+++ b/arch/mips/loongson/loongson-3/smp.c
@@ -25,6 +25,7 @@
25#include <asm/tlbflush.h> 25#include <asm/tlbflush.h>
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <loongson.h> 27#include <loongson.h>
28#include <workarounds.h>
28 29
29#include "smp.h" 30#include "smp.h"
30 31
@@ -239,7 +240,7 @@ static void ipi_mailbox_buf_init(void)
239 */ 240 */
240static void loongson3_send_ipi_single(int cpu, unsigned int action) 241static void loongson3_send_ipi_single(int cpu, unsigned int action)
241{ 242{
242 loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu]); 243 loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(cpu)]);
243} 244}
244 245
245static void 246static void
@@ -248,7 +249,7 @@ loongson3_send_ipi_mask(const struct cpumask *mask, unsigned int action)
248 unsigned int i; 249 unsigned int i;
249 250
250 for_each_cpu(i, mask) 251 for_each_cpu(i, mask)
251 loongson3_ipi_write32((u32)action, ipi_set0_regs[i]); 252 loongson3_ipi_write32((u32)action, ipi_set0_regs[cpu_logical_map(i)]);
252} 253}
253 254
254void loongson3_ipi_interrupt(struct pt_regs *regs) 255void loongson3_ipi_interrupt(struct pt_regs *regs)
@@ -257,10 +258,10 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
257 unsigned int action, c0count; 258 unsigned int action, c0count;
258 259
259 /* Load the ipi register to figure out what we're supposed to do */ 260 /* Load the ipi register to figure out what we're supposed to do */
260 action = loongson3_ipi_read32(ipi_status0_regs[cpu]); 261 action = loongson3_ipi_read32(ipi_status0_regs[cpu_logical_map(cpu)]);
261 262
262 /* Clear the ipi register to clear the interrupt */ 263 /* Clear the ipi register to clear the interrupt */
263 loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu]); 264 loongson3_ipi_write32((u32)action, ipi_clear0_regs[cpu_logical_map(cpu)]);
264 265
265 if (action & SMP_RESCHEDULE_YOURSELF) 266 if (action & SMP_RESCHEDULE_YOURSELF)
266 scheduler_ipi(); 267 scheduler_ipi();
@@ -291,12 +292,14 @@ static void loongson3_init_secondary(void)
291 /* Set interrupt mask, but don't enable */ 292 /* Set interrupt mask, but don't enable */
292 change_c0_status(ST0_IM, imask); 293 change_c0_status(ST0_IM, imask);
293 294
294 for (i = 0; i < loongson_sysconf.nr_cpus; i++) 295 for (i = 0; i < num_possible_cpus(); i++)
295 loongson3_ipi_write32(0xffffffff, ipi_en0_regs[i]); 296 loongson3_ipi_write32(0xffffffff, ipi_en0_regs[cpu_logical_map(i)]);
296 297
297 cpu_data[cpu].package = cpu / loongson_sysconf.cores_per_package;
298 cpu_data[cpu].core = cpu % loongson_sysconf.cores_per_package;
299 per_cpu(cpu_state, cpu) = CPU_ONLINE; 298 per_cpu(cpu_state, cpu) = CPU_ONLINE;
299 cpu_data[cpu].core =
300 cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
301 cpu_data[cpu].package =
302 cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
300 303
301 i = 0; 304 i = 0;
302 __this_cpu_write(core0_c0count, 0); 305 __this_cpu_write(core0_c0count, 0);
@@ -314,37 +317,50 @@ static void loongson3_init_secondary(void)
314 317
315static void loongson3_smp_finish(void) 318static void loongson3_smp_finish(void)
316{ 319{
320 int cpu = smp_processor_id();
321
317 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ); 322 write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
318 local_irq_enable(); 323 local_irq_enable();
319 loongson3_ipi_write64(0, 324 loongson3_ipi_write64(0,
320 (void *)(ipi_mailbox_buf[smp_processor_id()]+0x0)); 325 (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0));
321 pr_info("CPU#%d finished, CP0_ST=%x\n", 326 pr_info("CPU#%d finished, CP0_ST=%x\n",
322 smp_processor_id(), read_c0_status()); 327 smp_processor_id(), read_c0_status());
323} 328}
324 329
325static void __init loongson3_smp_setup(void) 330static void __init loongson3_smp_setup(void)
326{ 331{
327 int i, num; 332 int i = 0, num = 0; /* i: physical id, num: logical id */
328 333
329 init_cpu_possible(cpu_none_mask); 334 init_cpu_possible(cpu_none_mask);
330 set_cpu_possible(0, true);
331
332 __cpu_number_map[0] = 0;
333 __cpu_logical_map[0] = 0;
334 335
335 /* For unified kernel, NR_CPUS is the maximum possible value, 336 /* For unified kernel, NR_CPUS is the maximum possible value,
336 * loongson_sysconf.nr_cpus is the really present value */ 337 * loongson_sysconf.nr_cpus is the really present value */
337 for (i = 1, num = 0; i < loongson_sysconf.nr_cpus; i++) { 338 while (i < loongson_sysconf.nr_cpus) {
338 set_cpu_possible(i, true); 339 if (loongson_sysconf.reserved_cpus_mask & (1<<i)) {
339 __cpu_number_map[i] = ++num; 340 /* Reserved physical CPU cores */
340 __cpu_logical_map[num] = i; 341 __cpu_number_map[i] = -1;
342 } else {
343 __cpu_number_map[i] = num;
344 __cpu_logical_map[num] = i;
345 set_cpu_possible(num, true);
346 num++;
347 }
348 i++;
341 } 349 }
350 pr_info("Detected %i available CPU(s)\n", num);
351
352 while (num < loongson_sysconf.nr_cpus) {
353 __cpu_logical_map[num] = -1;
354 num++;
355 }
356
342 ipi_set0_regs_init(); 357 ipi_set0_regs_init();
343 ipi_clear0_regs_init(); 358 ipi_clear0_regs_init();
344 ipi_status0_regs_init(); 359 ipi_status0_regs_init();
345 ipi_en0_regs_init(); 360 ipi_en0_regs_init();
346 ipi_mailbox_buf_init(); 361 ipi_mailbox_buf_init();
347 pr_info("Detected %i available secondary CPU(s)\n", num); 362 cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
363 cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
348} 364}
349 365
350static void __init loongson3_prepare_cpus(unsigned int max_cpus) 366static void __init loongson3_prepare_cpus(unsigned int max_cpus)
@@ -371,10 +387,14 @@ static void loongson3_boot_secondary(int cpu, struct task_struct *idle)
371 pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n", 387 pr_debug("CPU#%d, func_pc=%lx, sp=%lx, gp=%lx\n",
372 cpu, startargs[0], startargs[1], startargs[2]); 388 cpu, startargs[0], startargs[1], startargs[2]);
373 389
374 loongson3_ipi_write64(startargs[3], (void *)(ipi_mailbox_buf[cpu]+0x18)); 390 loongson3_ipi_write64(startargs[3],
375 loongson3_ipi_write64(startargs[2], (void *)(ipi_mailbox_buf[cpu]+0x10)); 391 (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x18));
376 loongson3_ipi_write64(startargs[1], (void *)(ipi_mailbox_buf[cpu]+0x8)); 392 loongson3_ipi_write64(startargs[2],
377 loongson3_ipi_write64(startargs[0], (void *)(ipi_mailbox_buf[cpu]+0x0)); 393 (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x10));
394 loongson3_ipi_write64(startargs[1],
395 (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x8));
396 loongson3_ipi_write64(startargs[0],
397 (void *)(ipi_mailbox_buf[cpu_logical_map(cpu)]+0x0));
378} 398}
379 399
380#ifdef CONFIG_HOTPLUG_CPU 400#ifdef CONFIG_HOTPLUG_CPU
@@ -568,7 +588,7 @@ void loongson3_disable_clock(int cpu)
568 if (loongson_sysconf.cputype == Loongson_3A) { 588 if (loongson_sysconf.cputype == Loongson_3A) {
569 LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id)); 589 LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
570 } else if (loongson_sysconf.cputype == Loongson_3B) { 590 } else if (loongson_sysconf.cputype == Loongson_3B) {
571 if (!cpuhotplug_workaround) 591 if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
572 LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3)); 592 LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
573 } 593 }
574} 594}
@@ -581,7 +601,7 @@ void loongson3_enable_clock(int cpu)
581 if (loongson_sysconf.cputype == Loongson_3A) { 601 if (loongson_sysconf.cputype == Loongson_3A) {
582 LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id); 602 LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
583 } else if (loongson_sysconf.cputype == Loongson_3B) { 603 } else if (loongson_sysconf.cputype == Loongson_3B) {
584 if (!cpuhotplug_workaround) 604 if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
585 LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3); 605 LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
586 } 606 }
587} 607}
diff --git a/arch/mips/loongson1/Kconfig b/arch/mips/loongson1/Kconfig
index e23c25d09963..a2b796eaf3c3 100644
--- a/arch/mips/loongson1/Kconfig
+++ b/arch/mips/loongson1/Kconfig
@@ -5,8 +5,8 @@ choice
5 5
6config LOONGSON1_LS1B 6config LOONGSON1_LS1B
7 bool "Loongson LS1B board" 7 bool "Loongson LS1B board"
8 select CEVT_R4K 8 select CEVT_R4K if !MIPS_EXTERNAL_TIMER
9 select CSRC_R4K 9 select CSRC_R4K if !MIPS_EXTERNAL_TIMER
10 select SYS_HAS_CPU_LOONGSON1B 10 select SYS_HAS_CPU_LOONGSON1B
11 select DMA_NONCOHERENT 11 select DMA_NONCOHERENT
12 select BOOT_ELF32 12 select BOOT_ELF32
@@ -16,8 +16,46 @@ config LOONGSON1_LS1B
16 select SYS_SUPPORTS_HIGHMEM 16 select SYS_SUPPORTS_HIGHMEM
17 select SYS_SUPPORTS_MIPS16 17 select SYS_SUPPORTS_MIPS16
18 select SYS_HAS_EARLY_PRINTK 18 select SYS_HAS_EARLY_PRINTK
19 select USE_GENERIC_EARLY_PRINTK_8250
19 select COMMON_CLK 20 select COMMON_CLK
20 21
21endchoice 22endchoice
22 23
24menuconfig CEVT_CSRC_LS1X
25 bool "Use PWM Timer for clockevent/clocksource"
26 select MIPS_EXTERNAL_TIMER
27 depends on CPU_LOONGSON1
28 help
29 This option changes the default clockevent/clocksource to PWM Timer,
30 and is required by Loongson1 CPUFreq support.
31
32 If unsure, say N.
33
34choice
35 prompt "Select clockevent/clocksource"
36 depends on CEVT_CSRC_LS1X
37 default TIMER_USE_PWM0
38
39config TIMER_USE_PWM0
40 bool "Use PWM Timer 0"
41 help
42 Use PWM Timer 0 as the default clockevent/clocksourcer.
43
44config TIMER_USE_PWM1
45 bool "Use PWM Timer 1"
46 help
47 Use PWM Timer 1 as the default clockevent/clocksourcer.
48
49config TIMER_USE_PWM2
50 bool "Use PWM Timer 2"
51 help
52 Use PWM Timer 2 as the default clockevent/clocksourcer.
53
54config TIMER_USE_PWM3
55 bool "Use PWM Timer 3"
56 help
57 Use PWM Timer 3 as the default clockevent/clocksourcer.
58
59endchoice
60
23endif # MACH_LOONGSON1 61endif # MACH_LOONGSON1
diff --git a/arch/mips/loongson1/common/Makefile b/arch/mips/loongson1/common/Makefile
index b2797709ef5b..723b4ce3b8f0 100644
--- a/arch/mips/loongson1/common/Makefile
+++ b/arch/mips/loongson1/common/Makefile
@@ -2,4 +2,4 @@
2# Makefile for common code of loongson1 based machines. 2# Makefile for common code of loongson1 based machines.
3# 3#
4 4
5obj-y += clock.o irq.o platform.o prom.o reset.o setup.o 5obj-y += time.o irq.o platform.o prom.o reset.o setup.o
diff --git a/arch/mips/loongson1/common/clock.c b/arch/mips/loongson1/common/clock.c
deleted file mode 100644
index b4437f19c3d9..000000000000
--- a/arch/mips/loongson1/common/clock.c
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10#include <linux/clk.h>
11#include <linux/err.h>
12#include <asm/time.h>
13#include <platform.h>
14
15void __init plat_time_init(void)
16{
17 struct clk *clk;
18
19 /* Initialize LS1X clocks */
20 ls1x_clk_init();
21
22 /* setup mips r4k timer */
23 clk = clk_get(NULL, "cpu");
24 if (IS_ERR(clk))
25 panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
26
27 mips_hpt_frequency = clk_get_rate(clk) / 2;
28}
diff --git a/arch/mips/loongson1/common/platform.c b/arch/mips/loongson1/common/platform.c
index fdf8cb5987a4..ddf1d4cbf31e 100644
--- a/arch/mips/loongson1/common/platform.c
+++ b/arch/mips/loongson1/common/platform.c
@@ -16,8 +16,10 @@
16#include <linux/usb/ehci_pdriver.h> 16#include <linux/usb/ehci_pdriver.h>
17#include <asm-generic/sizes.h> 17#include <asm-generic/sizes.h>
18 18
19#include <cpufreq.h>
19#include <loongson1.h> 20#include <loongson1.h>
20 21
22/* 8250/16550 compatible UART */
21#define LS1X_UART(_id) \ 23#define LS1X_UART(_id) \
22 { \ 24 { \
23 .mapbase = LS1X_UART ## _id ## _BASE, \ 25 .mapbase = LS1X_UART ## _id ## _BASE, \
@@ -27,7 +29,7 @@
27 .type = PORT_16550A, \ 29 .type = PORT_16550A, \
28 } 30 }
29 31
30static struct plat_serial8250_port ls1x_serial8250_port[] = { 32static struct plat_serial8250_port ls1x_serial8250_pdata[] = {
31 LS1X_UART(0), 33 LS1X_UART(0),
32 LS1X_UART(1), 34 LS1X_UART(1),
33 LS1X_UART(2), 35 LS1X_UART(2),
@@ -35,11 +37,11 @@ static struct plat_serial8250_port ls1x_serial8250_port[] = {
35 {}, 37 {},
36}; 38};
37 39
38struct platform_device ls1x_uart_device = { 40struct platform_device ls1x_uart_pdev = {
39 .name = "serial8250", 41 .name = "serial8250",
40 .id = PLAT8250_DEV_PLATFORM, 42 .id = PLAT8250_DEV_PLATFORM,
41 .dev = { 43 .dev = {
42 .platform_data = ls1x_serial8250_port, 44 .platform_data = ls1x_serial8250_pdata,
43 }, 45 },
44}; 46};
45 47
@@ -48,16 +50,97 @@ void __init ls1x_serial_setup(struct platform_device *pdev)
48 struct clk *clk; 50 struct clk *clk;
49 struct plat_serial8250_port *p; 51 struct plat_serial8250_port *p;
50 52
51 clk = clk_get(NULL, pdev->name); 53 clk = clk_get(&pdev->dev, pdev->name);
52 if (IS_ERR(clk)) 54 if (IS_ERR(clk)) {
53 panic("unable to get %s clock, err=%ld", 55 pr_err("unable to get %s clock, err=%ld",
54 pdev->name, PTR_ERR(clk)); 56 pdev->name, PTR_ERR(clk));
57 return;
58 }
59 clk_prepare_enable(clk);
55 60
56 for (p = pdev->dev.platform_data; p->flags != 0; ++p) 61 for (p = pdev->dev.platform_data; p->flags != 0; ++p)
57 p->uartclk = clk_get_rate(clk); 62 p->uartclk = clk_get_rate(clk);
58} 63}
59 64
65/* CPUFreq */
66static struct plat_ls1x_cpufreq ls1x_cpufreq_pdata = {
67 .clk_name = "cpu_clk",
68 .osc_clk_name = "osc_33m_clk",
69 .max_freq = 266 * 1000,
70 .min_freq = 33 * 1000,
71};
72
73struct platform_device ls1x_cpufreq_pdev = {
74 .name = "ls1x-cpufreq",
75 .dev = {
76 .platform_data = &ls1x_cpufreq_pdata,
77 },
78};
79
60/* Synopsys Ethernet GMAC */ 80/* Synopsys Ethernet GMAC */
81static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
82 .phy_mask = 0,
83};
84
85static struct stmmac_dma_cfg ls1x_eth_dma_cfg = {
86 .pbl = 1,
87};
88
89int ls1x_eth_mux_init(struct platform_device *pdev, void *priv)
90{
91 struct plat_stmmacenet_data *plat_dat = NULL;
92 u32 val;
93
94 val = __raw_readl(LS1X_MUX_CTRL1);
95
96 plat_dat = dev_get_platdata(&pdev->dev);
97 if (plat_dat->bus_id) {
98 __raw_writel(__raw_readl(LS1X_MUX_CTRL0) | GMAC1_USE_UART1 |
99 GMAC1_USE_UART0, LS1X_MUX_CTRL0);
100 switch (plat_dat->interface) {
101 case PHY_INTERFACE_MODE_RGMII:
102 val &= ~(GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
103 break;
104 case PHY_INTERFACE_MODE_MII:
105 val |= (GMAC1_USE_TXCLK | GMAC1_USE_PWM23);
106 break;
107 default:
108 pr_err("unsupported mii mode %d\n",
109 plat_dat->interface);
110 return -ENOTSUPP;
111 }
112 val &= ~GMAC1_SHUT;
113 } else {
114 switch (plat_dat->interface) {
115 case PHY_INTERFACE_MODE_RGMII:
116 val &= ~(GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
117 break;
118 case PHY_INTERFACE_MODE_MII:
119 val |= (GMAC0_USE_TXCLK | GMAC0_USE_PWM01);
120 break;
121 default:
122 pr_err("unsupported mii mode %d\n",
123 plat_dat->interface);
124 return -ENOTSUPP;
125 }
126 val &= ~GMAC0_SHUT;
127 }
128 __raw_writel(val, LS1X_MUX_CTRL1);
129
130 return 0;
131}
132
133static struct plat_stmmacenet_data ls1x_eth0_pdata = {
134 .bus_id = 0,
135 .phy_addr = -1,
136 .interface = PHY_INTERFACE_MODE_MII,
137 .mdio_bus_data = &ls1x_mdio_bus_data,
138 .dma_cfg = &ls1x_eth_dma_cfg,
139 .has_gmac = 1,
140 .tx_coe = 1,
141 .init = ls1x_eth_mux_init,
142};
143
61static struct resource ls1x_eth0_resources[] = { 144static struct resource ls1x_eth0_resources[] = {
62 [0] = { 145 [0] = {
63 .start = LS1X_GMAC0_BASE, 146 .start = LS1X_GMAC0_BASE,
@@ -71,25 +154,47 @@ static struct resource ls1x_eth0_resources[] = {
71 }, 154 },
72}; 155};
73 156
74static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = { 157struct platform_device ls1x_eth0_pdev = {
75 .phy_mask = 0, 158 .name = "stmmaceth",
159 .id = 0,
160 .num_resources = ARRAY_SIZE(ls1x_eth0_resources),
161 .resource = ls1x_eth0_resources,
162 .dev = {
163 .platform_data = &ls1x_eth0_pdata,
164 },
76}; 165};
77 166
78static struct plat_stmmacenet_data ls1x_eth_data = { 167static struct plat_stmmacenet_data ls1x_eth1_pdata = {
79 .bus_id = 0, 168 .bus_id = 1,
80 .phy_addr = -1, 169 .phy_addr = -1,
170 .interface = PHY_INTERFACE_MODE_MII,
81 .mdio_bus_data = &ls1x_mdio_bus_data, 171 .mdio_bus_data = &ls1x_mdio_bus_data,
172 .dma_cfg = &ls1x_eth_dma_cfg,
82 .has_gmac = 1, 173 .has_gmac = 1,
83 .tx_coe = 1, 174 .tx_coe = 1,
175 .init = ls1x_eth_mux_init,
84}; 176};
85 177
86struct platform_device ls1x_eth0_device = { 178static struct resource ls1x_eth1_resources[] = {
179 [0] = {
180 .start = LS1X_GMAC1_BASE,
181 .end = LS1X_GMAC1_BASE + SZ_64K - 1,
182 .flags = IORESOURCE_MEM,
183 },
184 [1] = {
185 .name = "macirq",
186 .start = LS1X_GMAC1_IRQ,
187 .flags = IORESOURCE_IRQ,
188 },
189};
190
191struct platform_device ls1x_eth1_pdev = {
87 .name = "stmmaceth", 192 .name = "stmmaceth",
88 .id = 0, 193 .id = 1,
89 .num_resources = ARRAY_SIZE(ls1x_eth0_resources), 194 .num_resources = ARRAY_SIZE(ls1x_eth1_resources),
90 .resource = ls1x_eth0_resources, 195 .resource = ls1x_eth1_resources,
91 .dev = { 196 .dev = {
92 .platform_data = &ls1x_eth_data, 197 .platform_data = &ls1x_eth1_pdata,
93 }, 198 },
94}; 199};
95 200
@@ -111,7 +216,7 @@ static struct resource ls1x_ehci_resources[] = {
111static struct usb_ehci_pdata ls1x_ehci_pdata = { 216static struct usb_ehci_pdata ls1x_ehci_pdata = {
112}; 217};
113 218
114struct platform_device ls1x_ehci_device = { 219struct platform_device ls1x_ehci_pdev = {
115 .name = "ehci-platform", 220 .name = "ehci-platform",
116 .id = -1, 221 .id = -1,
117 .num_resources = ARRAY_SIZE(ls1x_ehci_resources), 222 .num_resources = ARRAY_SIZE(ls1x_ehci_resources),
@@ -123,7 +228,7 @@ struct platform_device ls1x_ehci_device = {
123}; 228};
124 229
125/* Real Time Clock */ 230/* Real Time Clock */
126struct platform_device ls1x_rtc_device = { 231struct platform_device ls1x_rtc_pdev = {
127 .name = "ls1x-rtc", 232 .name = "ls1x-rtc",
128 .id = -1, 233 .id = -1,
129}; 234};
diff --git a/arch/mips/loongson1/common/prom.c b/arch/mips/loongson1/common/prom.c
index 2a47af5a55c3..68600980ea49 100644
--- a/arch/mips/loongson1/common/prom.c
+++ b/arch/mips/loongson1/common/prom.c
@@ -27,7 +27,7 @@ char *prom_getenv(char *envname)
27 i = strlen(envname); 27 i = strlen(envname);
28 28
29 while (*env) { 29 while (*env) {
30 if (strncmp(envname, *env, i) == 0 && *(*env+i) == '=') 30 if (strncmp(envname, *env, i) == 0 && *(*env + i) == '=')
31 return *env + i + 1; 31 return *env + i + 1;
32 env++; 32 env++;
33 } 33 }
@@ -49,7 +49,7 @@ void __init prom_init_cmdline(void)
49 for (i = 1; i < prom_argc; i++) { 49 for (i = 1; i < prom_argc; i++) {
50 strcpy(c, prom_argv[i]); 50 strcpy(c, prom_argv[i]);
51 c += strlen(prom_argv[i]); 51 c += strlen(prom_argv[i]);
52 if (i < prom_argc-1) 52 if (i < prom_argc - 1)
53 *c++ = ' '; 53 *c++ = ' ';
54 } 54 }
55 *c = 0; 55 *c = 0;
@@ -57,6 +57,7 @@ void __init prom_init_cmdline(void)
57 57
58void __init prom_init(void) 58void __init prom_init(void)
59{ 59{
60 void __iomem *uart_base;
60 prom_argc = fw_arg0; 61 prom_argc = fw_arg0;
61 prom_argv = (char **)fw_arg1; 62 prom_argv = (char **)fw_arg1;
62 prom_envp = (char **)fw_arg2; 63 prom_envp = (char **)fw_arg2;
@@ -65,23 +66,18 @@ void __init prom_init(void)
65 66
66 memsize = env_or_default("memsize", DEFAULT_MEMSIZE); 67 memsize = env_or_default("memsize", DEFAULT_MEMSIZE);
67 highmemsize = env_or_default("highmemsize", 0x0); 68 highmemsize = env_or_default("highmemsize", 0x0);
68}
69 69
70void __init prom_free_prom_memory(void) 70 if (strstr(arcs_cmdline, "console=ttyS3"))
71{ 71 uart_base = ioremap_nocache(LS1X_UART3_BASE, 0x0f);
72 else if (strstr(arcs_cmdline, "console=ttyS2"))
73 uart_base = ioremap_nocache(LS1X_UART2_BASE, 0x0f);
74 else if (strstr(arcs_cmdline, "console=ttyS1"))
75 uart_base = ioremap_nocache(LS1X_UART1_BASE, 0x0f);
76 else
77 uart_base = ioremap_nocache(LS1X_UART0_BASE, 0x0f);
78 setup_8250_early_printk_port((unsigned long)uart_base, 0, 0);
72} 79}
73 80
74#define PORT(offset) (u8 *)(KSEG1ADDR(LS1X_UART0_BASE + offset)) 81void __init prom_free_prom_memory(void)
75
76void prom_putchar(char c)
77{ 82{
78 int timeout;
79
80 timeout = 1024;
81
82 while (((readb(PORT(UART_LSR)) & UART_LSR_THRE) == 0)
83 && (timeout-- > 0))
84 ;
85
86 writeb(c, PORT(UART_TX));
87} 83}
diff --git a/arch/mips/loongson1/common/reset.c b/arch/mips/loongson1/common/reset.c
index 547f34b69e4c..c41e4ca56ab4 100644
--- a/arch/mips/loongson1/common/reset.c
+++ b/arch/mips/loongson1/common/reset.c
@@ -14,12 +14,7 @@
14 14
15#include <loongson1.h> 15#include <loongson1.h>
16 16
17static void ls1x_restart(char *command) 17static void __iomem *wdt_base;
18{
19 __raw_writel(0x1, LS1X_WDT_EN);
20 __raw_writel(0x5000000, LS1X_WDT_TIMER);
21 __raw_writel(0x1, LS1X_WDT_SET);
22}
23 18
24static void ls1x_halt(void) 19static void ls1x_halt(void)
25{ 20{
@@ -29,6 +24,15 @@ static void ls1x_halt(void)
29 } 24 }
30} 25}
31 26
27static void ls1x_restart(char *command)
28{
29 __raw_writel(0x1, wdt_base + WDT_EN);
30 __raw_writel(0x1, wdt_base + WDT_TIMER);
31 __raw_writel(0x1, wdt_base + WDT_SET);
32
33 ls1x_halt();
34}
35
32static void ls1x_power_off(void) 36static void ls1x_power_off(void)
33{ 37{
34 ls1x_halt(); 38 ls1x_halt();
@@ -36,6 +40,10 @@ static void ls1x_power_off(void)
36 40
37static int __init ls1x_reboot_setup(void) 41static int __init ls1x_reboot_setup(void)
38{ 42{
43 wdt_base = ioremap_nocache(LS1X_WDT_BASE, 0x0f);
44 if (!wdt_base)
45 panic("Failed to remap watchdog registers");
46
39 _machine_restart = ls1x_restart; 47 _machine_restart = ls1x_restart;
40 _machine_halt = ls1x_halt; 48 _machine_halt = ls1x_halt;
41 pm_power_off = ls1x_power_off; 49 pm_power_off = ls1x_power_off;
diff --git a/arch/mips/loongson1/common/time.c b/arch/mips/loongson1/common/time.c
new file mode 100644
index 000000000000..df0f850d6a5f
--- /dev/null
+++ b/arch/mips/loongson1/common/time.c
@@ -0,0 +1,226 @@
1/*
2 * Copyright (c) 2014 Zhang, Keguang <keguang.zhang@gmail.com>
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
8 */
9
10#include <linux/clk.h>
11#include <linux/interrupt.h>
12#include <asm/time.h>
13
14#include <loongson1.h>
15#include <platform.h>
16
17#ifdef CONFIG_CEVT_CSRC_LS1X
18
19#if defined(CONFIG_TIMER_USE_PWM1)
20#define LS1X_TIMER_BASE LS1X_PWM1_BASE
21#define LS1X_TIMER_IRQ LS1X_PWM1_IRQ
22
23#elif defined(CONFIG_TIMER_USE_PWM2)
24#define LS1X_TIMER_BASE LS1X_PWM2_BASE
25#define LS1X_TIMER_IRQ LS1X_PWM2_IRQ
26
27#elif defined(CONFIG_TIMER_USE_PWM3)
28#define LS1X_TIMER_BASE LS1X_PWM3_BASE
29#define LS1X_TIMER_IRQ LS1X_PWM3_IRQ
30
31#else
32#define LS1X_TIMER_BASE LS1X_PWM0_BASE
33#define LS1X_TIMER_IRQ LS1X_PWM0_IRQ
34#endif
35
36DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
37
38static void __iomem *timer_base;
39static uint32_t ls1x_jiffies_per_tick;
40
41static inline void ls1x_pwmtimer_set_period(uint32_t period)
42{
43 __raw_writel(period, timer_base + PWM_HRC);
44 __raw_writel(period, timer_base + PWM_LRC);
45}
46
47static inline void ls1x_pwmtimer_restart(void)
48{
49 __raw_writel(0x0, timer_base + PWM_CNT);
50 __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
51}
52
53void __init ls1x_pwmtimer_init(void)
54{
55 timer_base = ioremap(LS1X_TIMER_BASE, 0xf);
56 if (!timer_base)
57 panic("Failed to remap timer registers");
58
59 ls1x_jiffies_per_tick = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
60
61 ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
62 ls1x_pwmtimer_restart();
63}
64
65static cycle_t ls1x_clocksource_read(struct clocksource *cs)
66{
67 unsigned long flags;
68 int count;
69 u32 jifs;
70 static int old_count;
71 static u32 old_jifs;
72
73 raw_spin_lock_irqsave(&ls1x_timer_lock, flags);
74 /*
75 * Although our caller may have the read side of xtime_lock,
76 * this is now a seqlock, and we are cheating in this routine
77 * by having side effects on state that we cannot undo if
78 * there is a collision on the seqlock and our caller has to
79 * retry. (Namely, old_jifs and old_count.) So we must treat
80 * jiffies as volatile despite the lock. We read jiffies
81 * before latching the timer count to guarantee that although
82 * the jiffies value might be older than the count (that is,
83 * the counter may underflow between the last point where
84 * jiffies was incremented and the point where we latch the
85 * count), it cannot be newer.
86 */
87 jifs = jiffies;
88 /* read the count */
89 count = __raw_readl(timer_base + PWM_CNT);
90
91 /*
92 * It's possible for count to appear to go the wrong way for this
93 * reason:
94 *
95 * The timer counter underflows, but we haven't handled the resulting
96 * interrupt and incremented jiffies yet.
97 *
98 * Previous attempts to handle these cases intelligently were buggy, so
99 * we just do the simple thing now.
100 */
101 if (count < old_count && jifs == old_jifs)
102 count = old_count;
103
104 old_count = count;
105 old_jifs = jifs;
106
107 raw_spin_unlock_irqrestore(&ls1x_timer_lock, flags);
108
109 return (cycle_t) (jifs * ls1x_jiffies_per_tick) + count;
110}
111
112static struct clocksource ls1x_clocksource = {
113 .name = "ls1x-pwmtimer",
114 .read = ls1x_clocksource_read,
115 .mask = CLOCKSOURCE_MASK(24),
116 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
117};
118
119static irqreturn_t ls1x_clockevent_isr(int irq, void *devid)
120{
121 struct clock_event_device *cd = devid;
122
123 ls1x_pwmtimer_restart();
124 cd->event_handler(cd);
125
126 return IRQ_HANDLED;
127}
128
129static void ls1x_clockevent_set_mode(enum clock_event_mode mode,
130 struct clock_event_device *cd)
131{
132 raw_spin_lock(&ls1x_timer_lock);
133 switch (mode) {
134 case CLOCK_EVT_MODE_PERIODIC:
135 ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
136 ls1x_pwmtimer_restart();
137 case CLOCK_EVT_MODE_RESUME:
138 __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
139 break;
140 case CLOCK_EVT_MODE_ONESHOT:
141 case CLOCK_EVT_MODE_SHUTDOWN:
142 __raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
143 timer_base + PWM_CTRL);
144 break;
145 default:
146 break;
147 }
148 raw_spin_unlock(&ls1x_timer_lock);
149}
150
151static int ls1x_clockevent_set_next(unsigned long evt,
152 struct clock_event_device *cd)
153{
154 raw_spin_lock(&ls1x_timer_lock);
155 ls1x_pwmtimer_set_period(evt);
156 ls1x_pwmtimer_restart();
157 raw_spin_unlock(&ls1x_timer_lock);
158
159 return 0;
160}
161
162static struct clock_event_device ls1x_clockevent = {
163 .name = "ls1x-pwmtimer",
164 .features = CLOCK_EVT_FEAT_PERIODIC,
165 .rating = 300,
166 .irq = LS1X_TIMER_IRQ,
167 .set_next_event = ls1x_clockevent_set_next,
168 .set_mode = ls1x_clockevent_set_mode,
169};
170
171static struct irqaction ls1x_pwmtimer_irqaction = {
172 .name = "ls1x-pwmtimer",
173 .handler = ls1x_clockevent_isr,
174 .dev_id = &ls1x_clockevent,
175 .flags = IRQF_PERCPU | IRQF_TIMER,
176};
177
178static void __init ls1x_time_init(void)
179{
180 struct clock_event_device *cd = &ls1x_clockevent;
181 int ret;
182
183 if (!mips_hpt_frequency)
184 panic("Invalid timer clock rate");
185
186 ls1x_pwmtimer_init();
187
188 clockevent_set_clock(cd, mips_hpt_frequency);
189 cd->max_delta_ns = clockevent_delta2ns(0xffffff, cd);
190 cd->min_delta_ns = clockevent_delta2ns(0x000300, cd);
191 cd->cpumask = cpumask_of(smp_processor_id());
192 clockevents_register_device(cd);
193
194 ls1x_clocksource.rating = 200 + mips_hpt_frequency / 10000000;
195 ret = clocksource_register_hz(&ls1x_clocksource, mips_hpt_frequency);
196 if (ret)
197 panic(KERN_ERR "Failed to register clocksource: %d\n", ret);
198
199 setup_irq(LS1X_TIMER_IRQ, &ls1x_pwmtimer_irqaction);
200}
201#endif /* CONFIG_CEVT_CSRC_LS1X */
202
203void __init plat_time_init(void)
204{
205 struct clk *clk = NULL;
206
207 /* initialize LS1X clocks */
208 ls1x_clk_init();
209
210#ifdef CONFIG_CEVT_CSRC_LS1X
211 /* setup LS1X PWM timer */
212 clk = clk_get(NULL, "ls1x_pwmtimer");
213 if (IS_ERR(clk))
214 panic("unable to get timer clock, err=%ld", PTR_ERR(clk));
215
216 mips_hpt_frequency = clk_get_rate(clk);
217 ls1x_time_init();
218#else
219 /* setup mips r4k timer */
220 clk = clk_get(NULL, "cpu_clk");
221 if (IS_ERR(clk))
222 panic("unable to get cpu clock, err=%ld", PTR_ERR(clk));
223
224 mips_hpt_frequency = clk_get_rate(clk) / 2;
225#endif /* CONFIG_CEVT_CSRC_LS1X */
226}
diff --git a/arch/mips/loongson1/ls1b/board.c b/arch/mips/loongson1/ls1b/board.c
index b26b10dac70a..58daeea25739 100644
--- a/arch/mips/loongson1/ls1b/board.c
+++ b/arch/mips/loongson1/ls1b/board.c
@@ -10,17 +10,19 @@
10#include <platform.h> 10#include <platform.h>
11 11
12static struct platform_device *ls1b_platform_devices[] __initdata = { 12static struct platform_device *ls1b_platform_devices[] __initdata = {
13 &ls1x_uart_device, 13 &ls1x_uart_pdev,
14 &ls1x_eth0_device, 14 &ls1x_cpufreq_pdev,
15 &ls1x_ehci_device, 15 &ls1x_eth0_pdev,
16 &ls1x_rtc_device, 16 &ls1x_eth1_pdev,
17 &ls1x_ehci_pdev,
18 &ls1x_rtc_pdev,
17}; 19};
18 20
19static int __init ls1b_platform_init(void) 21static int __init ls1b_platform_init(void)
20{ 22{
21 int err; 23 int err;
22 24
23 ls1x_serial_setup(&ls1x_uart_device); 25 ls1x_serial_setup(&ls1x_uart_pdev);
24 26
25 err = platform_add_devices(ls1b_platform_devices, 27 err = platform_add_devices(ls1b_platform_devices,
26 ARRAY_SIZE(ls1b_platform_devices)); 28 ARRAY_SIZE(ls1b_platform_devices));
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index cac529a405b8..9dfcd7fc1bc3 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -643,9 +643,14 @@ static inline int cop1_64bit(struct pt_regs *xcp)
643 return !test_thread_flag(TIF_32BIT_FPREGS); 643 return !test_thread_flag(TIF_32BIT_FPREGS);
644} 644}
645 645
646static inline bool hybrid_fprs(void)
647{
648 return test_thread_flag(TIF_HYBRID_FPREGS);
649}
650
646#define SIFROMREG(si, x) \ 651#define SIFROMREG(si, x) \
647do { \ 652do { \
648 if (cop1_64bit(xcp)) \ 653 if (cop1_64bit(xcp) && !hybrid_fprs()) \
649 (si) = (int)get_fpr32(&ctx->fpr[x], 0); \ 654 (si) = (int)get_fpr32(&ctx->fpr[x], 0); \
650 else \ 655 else \
651 (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \ 656 (si) = (int)get_fpr32(&ctx->fpr[(x) & ~1], (x) & 1); \
@@ -653,7 +658,7 @@ do { \
653 658
654#define SITOREG(si, x) \ 659#define SITOREG(si, x) \
655do { \ 660do { \
656 if (cop1_64bit(xcp)) { \ 661 if (cop1_64bit(xcp) && !hybrid_fprs()) { \
657 unsigned i; \ 662 unsigned i; \
658 set_fpr32(&ctx->fpr[x], 0, si); \ 663 set_fpr32(&ctx->fpr[x], 0, si); \
659 for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \ 664 for (i = 1; i < ARRAY_SIZE(ctx->fpr[x].val32); i++) \
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index fd134675fc2e..068f45a415fc 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -38,7 +38,7 @@ int ieee754dp_isnan(union ieee754dp x)
38static inline int ieee754dp_issnan(union ieee754dp x) 38static inline int ieee754dp_issnan(union ieee754dp x)
39{ 39{
40 assert(ieee754dp_isnan(x)); 40 assert(ieee754dp_isnan(x));
41 return ((DPMANT(x) & DP_MBIT(DP_FBITS-1)) == DP_MBIT(DP_FBITS-1)); 41 return (DPMANT(x) & DP_MBIT(DP_FBITS - 1)) == DP_MBIT(DP_FBITS - 1);
42} 42}
43 43
44 44
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index d348efe91445..ba88301579c2 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -38,7 +38,7 @@ int ieee754sp_isnan(union ieee754sp x)
38static inline int ieee754sp_issnan(union ieee754sp x) 38static inline int ieee754sp_issnan(union ieee754sp x)
39{ 39{
40 assert(ieee754sp_isnan(x)); 40 assert(ieee754sp_isnan(x));
41 return (SPMANT(x) & SP_MBIT(SP_FBITS-1)); 41 return SPMANT(x) & SP_MBIT(SP_FBITS - 1);
42} 42}
43 43
44 44
diff --git a/arch/mips/mm/Makefile b/arch/mips/mm/Makefile
index 7f4f93ab22b7..67ede4ef9b8d 100644
--- a/arch/mips/mm/Makefile
+++ b/arch/mips/mm/Makefile
@@ -4,7 +4,13 @@
4 4
5obj-y += cache.o dma-default.o extable.o fault.o \ 5obj-y += cache.o dma-default.o extable.o fault.o \
6 gup.o init.o mmap.o page.o page-funcs.o \ 6 gup.o init.o mmap.o page.o page-funcs.o \
7 tlbex.o tlbex-fault.o tlb-funcs.o uasm-mips.o 7 tlbex.o tlbex-fault.o tlb-funcs.o
8
9ifdef CONFIG_CPU_MICROMIPS
10obj-y += uasm-micromips.o
11else
12obj-y += uasm-mips.o
13endif
8 14
9obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o 15obj-$(CONFIG_32BIT) += ioremap.o pgtable-32.o
10obj-$(CONFIG_64BIT) += pgtable-64.o 16obj-$(CONFIG_64BIT) += pgtable-64.o
@@ -22,5 +28,3 @@ obj-$(CONFIG_IP22_CPU_SCACHE) += sc-ip22.o
22obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o 28obj-$(CONFIG_R5000_CPU_SCACHE) += sc-r5k.o
23obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o 29obj-$(CONFIG_RM7000_CPU_SCACHE) += sc-rm7k.o
24obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o 30obj-$(CONFIG_MIPS_CPU_SCACHE) += sc-mips.o
25
26obj-$(CONFIG_SYS_SUPPORTS_MICROMIPS) += uasm-micromips.o
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index fbcd8674ff1d..dd261df005c2 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -917,6 +917,18 @@ static inline void alias_74k_erratum(struct cpuinfo_mips *c)
917 } 917 }
918} 918}
919 919
920static void b5k_instruction_hazard(void)
921{
922 __sync();
923 __sync();
924 __asm__ __volatile__(
925 " nop; nop; nop; nop; nop; nop; nop; nop\n"
926 " nop; nop; nop; nop; nop; nop; nop; nop\n"
927 " nop; nop; nop; nop; nop; nop; nop; nop\n"
928 " nop; nop; nop; nop; nop; nop; nop; nop\n"
929 : : : "memory");
930}
931
920static char *way_string[] = { NULL, "direct mapped", "2-way", 932static char *way_string[] = { NULL, "direct mapped", "2-way",
921 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" 933 "3-way", "4-way", "5-way", "6-way", "7-way", "8-way"
922}; 934};
@@ -1683,6 +1695,37 @@ void r4k_cache_init(void)
1683 1695
1684 coherency_setup(); 1696 coherency_setup();
1685 board_cache_error_setup = r4k_cache_error_setup; 1697 board_cache_error_setup = r4k_cache_error_setup;
1698
1699 /*
1700 * Per-CPU overrides
1701 */
1702 switch (current_cpu_type()) {
1703 case CPU_BMIPS4350:
1704 case CPU_BMIPS4380:
1705 /* No IPI is needed because all CPUs share the same D$ */
1706 flush_data_cache_page = r4k_blast_dcache_page;
1707 break;
1708 case CPU_BMIPS5000:
1709 /* We lose our superpowers if L2 is disabled */
1710 if (c->scache.flags & MIPS_CACHE_NOT_PRESENT)
1711 break;
1712
1713 /* I$ fills from D$ just by emptying the write buffers */
1714 flush_cache_page = (void *)b5k_instruction_hazard;
1715 flush_cache_range = (void *)b5k_instruction_hazard;
1716 flush_cache_sigtramp = (void *)b5k_instruction_hazard;
1717 local_flush_data_cache_page = (void *)b5k_instruction_hazard;
1718 flush_data_cache_page = (void *)b5k_instruction_hazard;
1719 flush_icache_range = (void *)b5k_instruction_hazard;
1720 local_flush_icache_range = (void *)b5k_instruction_hazard;
1721
1722 /* Cache aliases are handled in hardware; allow HIGHMEM */
1723 current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES;
1724
1725 /* Optimization: an L2 flush implicitly flushes the L1 */
1726 current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
1727 break;
1728 }
1686} 1729}
1687 1730
1688static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd, 1731static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd,
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 33ba3c558fe4..af5f046e627e 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -61,6 +61,11 @@ static inline struct page *dma_addr_to_page(struct device *dev,
61 * Warning on the terminology - Linux calls an uncached area coherent; 61 * Warning on the terminology - Linux calls an uncached area coherent;
62 * MIPS terminology calls memory areas with hardware maintained coherency 62 * MIPS terminology calls memory areas with hardware maintained coherency
63 * coherent. 63 * coherent.
64 *
65 * Note that the R14000 and R16000 should also be checked for in this
66 * condition. However this function is only called on non-I/O-coherent
67 * systems and only the R10000 and R12000 are used in such systems, the
68 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
64 */ 69 */
65static inline int cpu_needs_post_dma_flush(struct device *dev) 70static inline int cpu_needs_post_dma_flush(struct device *dev)
66{ 71{
diff --git a/arch/mips/mm/gup.c b/arch/mips/mm/gup.c
index 06ce17c2a905..7cba480568c8 100644
--- a/arch/mips/mm/gup.c
+++ b/arch/mips/mm/gup.c
@@ -17,7 +17,7 @@
17 17
18static inline pte_t gup_get_pte(pte_t *ptep) 18static inline pte_t gup_get_pte(pte_t *ptep)
19{ 19{
20#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 20#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
21 pte_t pte; 21 pte_t pte;
22 22
23retry: 23retry:
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index f42e35e42790..448cde372af0 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -95,7 +95,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
95 idx += in_interrupt() ? FIX_N_COLOURS : 0; 95 idx += in_interrupt() ? FIX_N_COLOURS : 0;
96 vaddr = __fix_to_virt(FIX_CMAP_END - idx); 96 vaddr = __fix_to_virt(FIX_CMAP_END - idx);
97 pte = mk_pte(page, prot); 97 pte = mk_pte(page, prot);
98#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 98#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
99 entrylo = pte.pte_high; 99 entrylo = pte.pte_high;
100#else 100#else
101 entrylo = pte_to_entrylo(pte_val(pte)); 101 entrylo = pte_to_entrylo(pte_val(pte));
diff --git a/arch/mips/mm/ioremap.c b/arch/mips/mm/ioremap.c
index 7f840bc08abf..8d5008cbdc0f 100644
--- a/arch/mips/mm/ioremap.c
+++ b/arch/mips/mm/ioremap.c
@@ -17,9 +17,9 @@
17#include <asm/tlbflush.h> 17#include <asm/tlbflush.h>
18 18
19static inline void remap_area_pte(pte_t * pte, unsigned long address, 19static inline void remap_area_pte(pte_t * pte, unsigned long address,
20 phys_t size, phys_t phys_addr, unsigned long flags) 20 phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
21{ 21{
22 phys_t end; 22 phys_addr_t end;
23 unsigned long pfn; 23 unsigned long pfn;
24 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE 24 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | __READABLE
25 | __WRITEABLE | flags); 25 | __WRITEABLE | flags);
@@ -43,9 +43,9 @@ static inline void remap_area_pte(pte_t * pte, unsigned long address,
43} 43}
44 44
45static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, 45static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
46 phys_t size, phys_t phys_addr, unsigned long flags) 46 phys_addr_t size, phys_addr_t phys_addr, unsigned long flags)
47{ 47{
48 phys_t end; 48 phys_addr_t end;
49 49
50 address &= ~PGDIR_MASK; 50 address &= ~PGDIR_MASK;
51 end = address + size; 51 end = address + size;
@@ -64,8 +64,8 @@ static inline int remap_area_pmd(pmd_t * pmd, unsigned long address,
64 return 0; 64 return 0;
65} 65}
66 66
67static int remap_area_pages(unsigned long address, phys_t phys_addr, 67static int remap_area_pages(unsigned long address, phys_addr_t phys_addr,
68 phys_t size, unsigned long flags) 68 phys_addr_t size, unsigned long flags)
69{ 69{
70 int error; 70 int error;
71 pgd_t * dir; 71 pgd_t * dir;
@@ -111,13 +111,13 @@ static int remap_area_pages(unsigned long address, phys_t phys_addr,
111 * caller shouldn't need to know that small detail. 111 * caller shouldn't need to know that small detail.
112 */ 112 */
113 113
114#define IS_LOW512(addr) (!((phys_t)(addr) & (phys_t) ~0x1fffffffULL)) 114#define IS_LOW512(addr) (!((phys_addr_t)(addr) & (phys_addr_t) ~0x1fffffffULL))
115 115
116void __iomem * __ioremap(phys_t phys_addr, phys_t size, unsigned long flags) 116void __iomem * __ioremap(phys_addr_t phys_addr, phys_addr_t size, unsigned long flags)
117{ 117{
118 struct vm_struct * area; 118 struct vm_struct * area;
119 unsigned long offset; 119 unsigned long offset;
120 phys_t last_addr; 120 phys_addr_t last_addr;
121 void * addr; 121 void * addr;
122 122
123 phys_addr = fixup_bigphys_addr(phys_addr, size); 123 phys_addr = fixup_bigphys_addr(phys_addr, size);
diff --git a/arch/mips/mm/sc-r5k.c b/arch/mips/mm/sc-r5k.c
index 0216ed6eaa2a..751b5cd18bf2 100644
--- a/arch/mips/mm/sc-r5k.c
+++ b/arch/mips/mm/sc-r5k.c
@@ -81,7 +81,7 @@ static inline int __init r5k_sc_probe(void)
81 unsigned long config = read_c0_config(); 81 unsigned long config = read_c0_config();
82 82
83 if (config & CONF_SC) 83 if (config & CONF_SC)
84 return(0); 84 return 0;
85 85
86 scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20); 86 scache_size = (512 * 1024) << ((config & R5K_CONF_SS) >> 20);
87 87
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index c3917e251f59..e90b2e899291 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -332,7 +332,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
332 { 332 {
333 ptep = pte_offset_map(pmdp, address); 333 ptep = pte_offset_map(pmdp, address);
334 334
335#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) 335#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
336 write_c0_entrylo0(ptep->pte_high); 336 write_c0_entrylo0(ptep->pte_high);
337 ptep++; 337 ptep++;
338 write_c0_entrylo1(ptep->pte_high); 338 write_c0_entrylo1(ptep->pte_high);
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index e3328a96e809..3978a3d81366 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -637,7 +637,7 @@ static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
637 if (cpu_has_rixi) { 637 if (cpu_has_rixi) {
638 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL)); 638 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
639 } else { 639 } else {
640#ifdef CONFIG_64BIT_PHYS_ADDR 640#ifdef CONFIG_PHYS_ADDR_T_64BIT
641 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL)); 641 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
642#else 642#else
643 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL)); 643 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -1009,7 +1009,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1009 * 64bit address support (36bit on a 32bit CPU) in a 32bit 1009 * 64bit address support (36bit on a 32bit CPU) in a 32bit
1010 * Kernel is a special case. Only a few CPUs use it. 1010 * Kernel is a special case. Only a few CPUs use it.
1011 */ 1011 */
1012#ifdef CONFIG_64BIT_PHYS_ADDR 1012#ifdef CONFIG_PHYS_ADDR_T_64BIT
1013 if (cpu_has_64bits) { 1013 if (cpu_has_64bits) {
1014 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */ 1014 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
1015 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */ 1015 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
@@ -1510,14 +1510,14 @@ static void
1510iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr) 1510iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
1511{ 1511{
1512#ifdef CONFIG_SMP 1512#ifdef CONFIG_SMP
1513# ifdef CONFIG_64BIT_PHYS_ADDR 1513# ifdef CONFIG_PHYS_ADDR_T_64BIT
1514 if (cpu_has_64bits) 1514 if (cpu_has_64bits)
1515 uasm_i_lld(p, pte, 0, ptr); 1515 uasm_i_lld(p, pte, 0, ptr);
1516 else 1516 else
1517# endif 1517# endif
1518 UASM_i_LL(p, pte, 0, ptr); 1518 UASM_i_LL(p, pte, 0, ptr);
1519#else 1519#else
1520# ifdef CONFIG_64BIT_PHYS_ADDR 1520# ifdef CONFIG_PHYS_ADDR_T_64BIT
1521 if (cpu_has_64bits) 1521 if (cpu_has_64bits)
1522 uasm_i_ld(p, pte, 0, ptr); 1522 uasm_i_ld(p, pte, 0, ptr);
1523 else 1523 else
@@ -1530,13 +1530,13 @@ static void
1530iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, 1530iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1531 unsigned int mode) 1531 unsigned int mode)
1532{ 1532{
1533#ifdef CONFIG_64BIT_PHYS_ADDR 1533#ifdef CONFIG_PHYS_ADDR_T_64BIT
1534 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); 1534 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1535#endif 1535#endif
1536 1536
1537 uasm_i_ori(p, pte, pte, mode); 1537 uasm_i_ori(p, pte, pte, mode);
1538#ifdef CONFIG_SMP 1538#ifdef CONFIG_SMP
1539# ifdef CONFIG_64BIT_PHYS_ADDR 1539# ifdef CONFIG_PHYS_ADDR_T_64BIT
1540 if (cpu_has_64bits) 1540 if (cpu_has_64bits)
1541 uasm_i_scd(p, pte, 0, ptr); 1541 uasm_i_scd(p, pte, 0, ptr);
1542 else 1542 else
@@ -1548,7 +1548,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1548 else 1548 else
1549 uasm_il_beqz(p, r, pte, label_smp_pgtable_change); 1549 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1550 1550
1551# ifdef CONFIG_64BIT_PHYS_ADDR 1551# ifdef CONFIG_PHYS_ADDR_T_64BIT
1552 if (!cpu_has_64bits) { 1552 if (!cpu_has_64bits) {
1553 /* no uasm_i_nop needed */ 1553 /* no uasm_i_nop needed */
1554 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); 1554 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
@@ -1563,14 +1563,14 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
1563 uasm_i_nop(p); 1563 uasm_i_nop(p);
1564# endif 1564# endif
1565#else 1565#else
1566# ifdef CONFIG_64BIT_PHYS_ADDR 1566# ifdef CONFIG_PHYS_ADDR_T_64BIT
1567 if (cpu_has_64bits) 1567 if (cpu_has_64bits)
1568 uasm_i_sd(p, pte, 0, ptr); 1568 uasm_i_sd(p, pte, 0, ptr);
1569 else 1569 else
1570# endif 1570# endif
1571 UASM_i_SW(p, pte, 0, ptr); 1571 UASM_i_SW(p, pte, 0, ptr);
1572 1572
1573# ifdef CONFIG_64BIT_PHYS_ADDR 1573# ifdef CONFIG_PHYS_ADDR_T_64BIT
1574 if (!cpu_has_64bits) { 1574 if (!cpu_has_64bits) {
1575 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); 1575 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1576 uasm_i_ori(p, pte, pte, hwmode); 1576 uasm_i_ori(p, pte, pte, hwmode);
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 6708a2dbf934..8e02291cfc0c 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -96,9 +96,11 @@ static struct insn insn_table[] = {
96 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 96 { insn_lw, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
97 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD }, 97 { insn_lwx, M(spec3_op, 0, 0, 0, lwx_op, lx_op), RS | RT | RD },
98 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET}, 98 { insn_mfc0, M(cop0_op, mfc_op, 0, 0, 0, 0), RT | RD | SET},
99 { insn_mfhc0, M(cop0_op, mfhc0_op, 0, 0, 0, 0), RT | RD | SET},
99 { insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD }, 100 { insn_mfhi, M(spec_op, 0, 0, 0, 0, mfhi_op), RD },
100 { insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD }, 101 { insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD },
101 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET}, 102 { insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
103 { insn_mthc0, M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
102 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD}, 104 { insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
103 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM }, 105 { insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
104 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD }, 106 { insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index a01b0d6cedd2..4adf30284813 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -51,12 +51,12 @@ enum opcode {
51 insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret, 51 insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
52 insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb, 52 insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb,
53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, 53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
54 insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul, 54 insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_mtc0,
55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, 55 insn_mthc0, insn_mul, insn_or, insn_ori, insn_pref, insn_rfe,
56 insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, 56 insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sllv, insn_slt,
57 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, 57 insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu,
58 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, 58 insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi,
59 insn_xor, insn_xori, insn_yield, 59 insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield,
60}; 60};
61 61
62struct insn { 62struct insn {
@@ -284,9 +284,11 @@ I_u2s3u1(_lld)
284I_u1s2(_lui) 284I_u1s2(_lui)
285I_u2s3u1(_lw) 285I_u2s3u1(_lw)
286I_u1u2u3(_mfc0) 286I_u1u2u3(_mfc0)
287I_u1u2u3(_mfhc0)
287I_u1(_mfhi) 288I_u1(_mfhi)
288I_u1(_mflo) 289I_u1(_mflo)
289I_u1u2u3(_mtc0) 290I_u1u2u3(_mtc0)
291I_u1u2u3(_mthc0)
290I_u3u1u2(_mul) 292I_u3u1u2(_mul)
291I_u2u1u3(_ori) 293I_u2u1u3(_ori)
292I_u3u1u2(_or) 294I_u3u1u2(_or)
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 0f60256d3784..6849f533154f 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -111,7 +111,7 @@ static void __init mips_ejtag_setup(void)
111 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80); 111 flush_icache_range((unsigned long)base, (unsigned long)base + 0x80);
112} 112}
113 113
114phys_t mips_cpc_default_phys_base(void) 114phys_addr_t mips_cpc_default_phys_base(void)
115{ 115{
116 return CPC_BASE_ADDR; 116 return CPC_BASE_ADDR;
117} 117}
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index e4f43baa8f67..d1392f8f5811 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -18,6 +18,7 @@
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/irqchip/mips-gic.h>
21#include <linux/kernel_stat.h> 22#include <linux/kernel_stat.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/random.h> 24#include <linux/random.h>
@@ -33,19 +34,13 @@
33#include <asm/mips-boards/generic.h> 34#include <asm/mips-boards/generic.h>
34#include <asm/mips-boards/msc01_pci.h> 35#include <asm/mips-boards/msc01_pci.h>
35#include <asm/msc01_ic.h> 36#include <asm/msc01_ic.h>
36#include <asm/gic.h>
37#include <asm/setup.h> 37#include <asm/setup.h>
38#include <asm/rtlx.h> 38#include <asm/rtlx.h>
39 39
40static unsigned long _msc01_biu_base; 40static void __iomem *_msc01_biu_base;
41static unsigned int ipi_map[NR_CPUS];
42 41
43static DEFINE_RAW_SPINLOCK(mips_irq_lock); 42static DEFINE_RAW_SPINLOCK(mips_irq_lock);
44 43
45#ifdef CONFIG_MIPS_GIC_IPI
46DECLARE_BITMAP(ipi_ints, GIC_NUM_INTRS);
47#endif
48
49static inline int mips_pcibios_iack(void) 44static inline int mips_pcibios_iack(void)
50{ 45{
51 int irq; 46 int irq;
@@ -127,24 +122,10 @@ static void malta_hw0_irqdispatch(void)
127#endif 122#endif
128} 123}
129 124
130static void malta_ipi_irqdispatch(void) 125static irqreturn_t i8259_handler(int irq, void *dev_id)
131{ 126{
132#ifdef CONFIG_MIPS_GIC_IPI 127 malta_hw0_irqdispatch();
133 unsigned long irq; 128 return IRQ_HANDLED;
134 DECLARE_BITMAP(pending, GIC_NUM_INTRS);
135
136 gic_get_int_mask(pending, ipi_ints);
137
138 irq = find_first_bit(pending, GIC_NUM_INTRS);
139
140 while (irq < GIC_NUM_INTRS) {
141 do_IRQ(MIPS_GIC_IRQ_BASE + irq);
142
143 irq = find_next_bit(pending, GIC_NUM_INTRS, irq + 1);
144 }
145#endif
146 if (gic_compare_int())
147 do_IRQ(MIPS_GIC_IRQ_BASE);
148} 129}
149 130
150static void corehi_irqdispatch(void) 131static void corehi_irqdispatch(void)
@@ -203,95 +184,10 @@ static void corehi_irqdispatch(void)
203 die("CoreHi interrupt", regs); 184 die("CoreHi interrupt", regs);
204} 185}
205 186
206static inline int clz(unsigned long x) 187static irqreturn_t corehi_handler(int irq, void *dev_id)
207{
208 __asm__(
209 " .set push \n"
210 " .set mips32 \n"
211 " clz %0, %1 \n"
212 " .set pop \n"
213 : "=r" (x)
214 : "r" (x));
215
216 return x;
217}
218
219/*
220 * Version of ffs that only looks at bits 12..15.
221 */
222static inline unsigned int irq_ffs(unsigned int pending)
223{
224#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
225 return -clz(pending) + 31 - CAUSEB_IP;
226#else
227 unsigned int a0 = 7;
228 unsigned int t0;
229
230 t0 = pending & 0xf000;
231 t0 = t0 < 1;
232 t0 = t0 << 2;
233 a0 = a0 - t0;
234 pending = pending << t0;
235
236 t0 = pending & 0xc000;
237 t0 = t0 < 1;
238 t0 = t0 << 1;
239 a0 = a0 - t0;
240 pending = pending << t0;
241
242 t0 = pending & 0x8000;
243 t0 = t0 < 1;
244 /* t0 = t0 << 2; */
245 a0 = a0 - t0;
246 /* pending = pending << t0; */
247
248 return a0;
249#endif
250}
251
252/*
253 * IRQs on the Malta board look basically (barring software IRQs which we
254 * don't use at all and all external interrupt sources are combined together
255 * on hardware interrupt 0 (MIPS IRQ 2)) like:
256 *
257 * MIPS IRQ Source
258 * -------- ------
259 * 0 Software (ignored)
260 * 1 Software (ignored)
261 * 2 Combined hardware interrupt (hw0)
262 * 3 Hardware (ignored)
263 * 4 Hardware (ignored)
264 * 5 Hardware (ignored)
265 * 6 Hardware (ignored)
266 * 7 R4k timer (what we use)
267 *
268 * We handle the IRQ according to _our_ priority which is:
269 *
270 * Highest ---- R4k Timer
271 * Lowest ---- Combined hardware interrupt
272 *
273 * then we just return, if multiple IRQs are pending then we will just take
274 * another exception, big deal.
275 */
276
277asmlinkage void plat_irq_dispatch(void)
278{ 188{
279 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM; 189 corehi_irqdispatch();
280 int irq; 190 return IRQ_HANDLED;
281
282 if (unlikely(!pending)) {
283 spurious_interrupt();
284 return;
285 }
286
287 irq = irq_ffs(pending);
288
289 if (irq == MIPSCPU_INT_I8259A)
290 malta_hw0_irqdispatch();
291 else if (gic_present && ((1 << irq) & ipi_map[smp_processor_id()]))
292 malta_ipi_irqdispatch();
293 else
294 do_IRQ(MIPS_CPU_IRQ_BASE + irq);
295} 191}
296 192
297#ifdef CONFIG_MIPS_MT_SMP 193#ifdef CONFIG_MIPS_MT_SMP
@@ -312,13 +208,6 @@ static void ipi_call_dispatch(void)
312 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ); 208 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
313} 209}
314 210
315#endif /* CONFIG_MIPS_MT_SMP */
316
317#ifdef CONFIG_MIPS_GIC_IPI
318
319#define GIC_MIPS_CPU_IPI_RESCHED_IRQ 3
320#define GIC_MIPS_CPU_IPI_CALL_IRQ 4
321
322static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) 211static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
323{ 212{
324#ifdef CONFIG_MIPS_VPE_APSP_API_CMP 213#ifdef CONFIG_MIPS_VPE_APSP_API_CMP
@@ -349,31 +238,16 @@ static struct irqaction irq_call = {
349 .flags = IRQF_PERCPU, 238 .flags = IRQF_PERCPU,
350 .name = "IPI_call" 239 .name = "IPI_call"
351}; 240};
352#endif /* CONFIG_MIPS_GIC_IPI */ 241#endif /* CONFIG_MIPS_MT_SMP */
353
354static int gic_resched_int_base;
355static int gic_call_int_base;
356#define GIC_RESCHED_INT(cpu) (gic_resched_int_base+(cpu))
357#define GIC_CALL_INT(cpu) (gic_call_int_base+(cpu))
358
359unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
360{
361 return GIC_CALL_INT(cpu);
362}
363
364unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
365{
366 return GIC_RESCHED_INT(cpu);
367}
368 242
369static struct irqaction i8259irq = { 243static struct irqaction i8259irq = {
370 .handler = no_action, 244 .handler = i8259_handler,
371 .name = "XT-PIC cascade", 245 .name = "XT-PIC cascade",
372 .flags = IRQF_NO_THREAD, 246 .flags = IRQF_NO_THREAD,
373}; 247};
374 248
375static struct irqaction corehi_irqaction = { 249static struct irqaction corehi_irqaction = {
376 .handler = no_action, 250 .handler = corehi_handler,
377 .name = "CoreHi", 251 .name = "CoreHi",
378 .flags = IRQF_NO_THREAD, 252 .flags = IRQF_NO_THREAD,
379}; 253};
@@ -399,60 +273,6 @@ static msc_irqmap_t msc_eicirqmap[] __initdata = {
399 273
400static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap); 274static int msc_nr_eicirqs __initdata = ARRAY_SIZE(msc_eicirqmap);
401 275
402/*
403 * This GIC specific tabular array defines the association between External
404 * Interrupts and CPUs/Core Interrupts. The nature of the External
405 * Interrupts is also defined here - polarity/trigger.
406 */
407
408#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
409#define X GIC_UNUSED
410
411static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
412 { X, X, X, X, 0 },
413 { X, X, X, X, 0 },
414 { X, X, X, X, 0 },
415 { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
416 { 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
417 { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
418 { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
419 { 0, GIC_CPU_INT4, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
420 { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
421 { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
422 { X, X, X, X, 0 },
423 { X, X, X, X, 0 },
424 { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
425 { 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
426 { 0, GIC_CPU_NMI, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
427 { X, X, X, X, 0 },
428 /* The remainder of this table is initialised by fill_ipi_map */
429};
430#undef X
431
432#ifdef CONFIG_MIPS_GIC_IPI
433static void __init fill_ipi_map1(int baseintr, int cpu, int cpupin)
434{
435 int intr = baseintr + cpu;
436 gic_intr_map[intr].cpunum = cpu;
437 gic_intr_map[intr].pin = cpupin;
438 gic_intr_map[intr].polarity = GIC_POL_POS;
439 gic_intr_map[intr].trigtype = GIC_TRIG_EDGE;
440 gic_intr_map[intr].flags = 0;
441 ipi_map[cpu] |= (1 << (cpupin + 2));
442 bitmap_set(ipi_ints, intr, 1);
443}
444
445static void __init fill_ipi_map(void)
446{
447 int cpu;
448
449 for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
450 fill_ipi_map1(gic_resched_int_base, cpu, GIC_CPU_INT1);
451 fill_ipi_map1(gic_call_int_base, cpu, GIC_CPU_INT2);
452 }
453}
454#endif
455
456void __init arch_init_ipiirq(int irq, struct irqaction *action) 276void __init arch_init_ipiirq(int irq, struct irqaction *action)
457{ 277{
458 setup_irq(irq, action); 278 setup_irq(irq, action);
@@ -461,6 +281,8 @@ void __init arch_init_ipiirq(int irq, struct irqaction *action)
461 281
462void __init arch_init_irq(void) 282void __init arch_init_irq(void)
463{ 283{
284 int corehi_irq, i8259_irq;
285
464 init_i8259_irqs(); 286 init_i8259_irqs();
465 287
466 if (!cpu_has_veic) 288 if (!cpu_has_veic)
@@ -471,12 +293,12 @@ void __init arch_init_irq(void)
471 gic_present = 1; 293 gic_present = 1;
472 } else { 294 } else {
473 if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) { 295 if (mips_revision_sconid == MIPS_REVISION_SCON_ROCIT) {
474 _msc01_biu_base = (unsigned long) 296 _msc01_biu_base = ioremap_nocache(MSC01_BIU_REG_BASE,
475 ioremap_nocache(MSC01_BIU_REG_BASE,
476 MSC01_BIU_ADDRSPACE_SZ); 297 MSC01_BIU_ADDRSPACE_SZ);
477 gic_present = (REG(_msc01_biu_base, MSC01_SC_CFG) & 298 gic_present =
478 MSC01_SC_CFG_GICPRES_MSK) >> 299 (__raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS) &
479 MSC01_SC_CFG_GICPRES_SHF; 300 MSC01_SC_CFG_GICPRES_MSK) >>
301 MSC01_SC_CFG_GICPRES_SHF;
480 } 302 }
481 } 303 }
482 if (gic_present) 304 if (gic_present)
@@ -507,63 +329,20 @@ void __init arch_init_irq(void)
507 msc_nr_irqs); 329 msc_nr_irqs);
508 } 330 }
509 331
510 if (cpu_has_veic) {
511 set_vi_handler(MSC01E_INT_I8259A, malta_hw0_irqdispatch);
512 set_vi_handler(MSC01E_INT_COREHI, corehi_irqdispatch);
513 setup_irq(MSC01E_INT_BASE+MSC01E_INT_I8259A, &i8259irq);
514 setup_irq(MSC01E_INT_BASE+MSC01E_INT_COREHI, &corehi_irqaction);
515 } else if (cpu_has_vint) {
516 set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
517 set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch);
518 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
519 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
520 &corehi_irqaction);
521 } else {
522 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
523 setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
524 &corehi_irqaction);
525 }
526
527 if (gic_present) { 332 if (gic_present) {
528 /* FIXME */
529 int i; 333 int i;
530#if defined(CONFIG_MIPS_GIC_IPI) 334
531 gic_call_int_base = GIC_NUM_INTRS - 335 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, MIPSCPU_INT_GIC,
532 (NR_CPUS - nr_cpu_ids) * 2 - nr_cpu_ids; 336 MIPS_GIC_IRQ_BASE);
533 gic_resched_int_base = gic_call_int_base - nr_cpu_ids;
534 fill_ipi_map();
535#endif
536 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map,
537 ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE);
538 if (!mips_cm_present()) { 337 if (!mips_cm_present()) {
539 /* Enable the GIC */ 338 /* Enable the GIC */
540 i = REG(_msc01_biu_base, MSC01_SC_CFG); 339 i = __raw_readl(_msc01_biu_base + MSC01_SC_CFG_OFS);
541 REG(_msc01_biu_base, MSC01_SC_CFG) = 340 __raw_writel(i | (0x1 << MSC01_SC_CFG_GICENA_SHF),
542 (i | (0x1 << MSC01_SC_CFG_GICENA_SHF)); 341 _msc01_biu_base + MSC01_SC_CFG_OFS);
543 pr_debug("GIC Enabled\n"); 342 pr_debug("GIC Enabled\n");
544 } 343 }
545#if defined(CONFIG_MIPS_GIC_IPI) 344 i8259_irq = MIPS_GIC_IRQ_BASE + GIC_INT_I8259A;
546 /* set up ipi interrupts */ 345 corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
547 if (cpu_has_vint) {
548 set_vi_handler(MIPSCPU_INT_IPI0, malta_ipi_irqdispatch);
549 set_vi_handler(MIPSCPU_INT_IPI1, malta_ipi_irqdispatch);
550 }
551 /* Argh.. this really needs sorting out.. */
552 pr_info("CPU%d: status register was %08x\n",
553 smp_processor_id(), read_c0_status());
554 write_c0_status(read_c0_status() | STATUSF_IP3 | STATUSF_IP4);
555 pr_info("CPU%d: status register now %08x\n",
556 smp_processor_id(), read_c0_status());
557 write_c0_status(0x1100dc00);
558 pr_info("CPU%d: status register frc %08x\n",
559 smp_processor_id(), read_c0_status());
560 for (i = 0; i < nr_cpu_ids; i++) {
561 arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
562 GIC_RESCHED_INT(i), &irq_resched);
563 arch_init_ipiirq(MIPS_GIC_IRQ_BASE +
564 GIC_CALL_INT(i), &irq_call);
565 }
566#endif
567 } else { 346 } else {
568#if defined(CONFIG_MIPS_MT_SMP) 347#if defined(CONFIG_MIPS_MT_SMP)
569 /* set up ipi interrupts */ 348 /* set up ipi interrupts */
@@ -573,12 +352,6 @@ void __init arch_init_irq(void)
573 cpu_ipi_resched_irq = MSC01E_INT_SW0; 352 cpu_ipi_resched_irq = MSC01E_INT_SW0;
574 cpu_ipi_call_irq = MSC01E_INT_SW1; 353 cpu_ipi_call_irq = MSC01E_INT_SW1;
575 } else { 354 } else {
576 if (cpu_has_vint) {
577 set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ,
578 ipi_resched_dispatch);
579 set_vi_handler (MIPS_CPU_IPI_CALL_IRQ,
580 ipi_call_dispatch);
581 }
582 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE + 355 cpu_ipi_resched_irq = MIPS_CPU_IRQ_BASE +
583 MIPS_CPU_IPI_RESCHED_IRQ; 356 MIPS_CPU_IPI_RESCHED_IRQ;
584 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE + 357 cpu_ipi_call_irq = MIPS_CPU_IRQ_BASE +
@@ -587,7 +360,21 @@ void __init arch_init_irq(void)
587 arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched); 360 arch_init_ipiirq(cpu_ipi_resched_irq, &irq_resched);
588 arch_init_ipiirq(cpu_ipi_call_irq, &irq_call); 361 arch_init_ipiirq(cpu_ipi_call_irq, &irq_call);
589#endif 362#endif
363 if (cpu_has_veic) {
364 set_vi_handler(MSC01E_INT_I8259A,
365 malta_hw0_irqdispatch);
366 set_vi_handler(MSC01E_INT_COREHI,
367 corehi_irqdispatch);
368 i8259_irq = MSC01E_INT_BASE + MSC01E_INT_I8259A;
369 corehi_irq = MSC01E_INT_BASE + MSC01E_INT_COREHI;
370 } else {
371 i8259_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_I8259A;
372 corehi_irq = MIPS_CPU_IRQ_BASE + MIPSCPU_INT_COREHI;
373 }
590 } 374 }
375
376 setup_irq(i8259_irq, &i8259irq);
377 setup_irq(corehi_irq, &corehi_irqaction);
591} 378}
592 379
593void malta_be_init(void) 380void malta_be_init(void)
@@ -714,37 +501,3 @@ int malta_be_handler(struct pt_regs *regs, int is_fixup)
714 501
715 return retval; 502 return retval;
716} 503}
717
718void gic_enable_interrupt(int irq_vec)
719{
720 GIC_SET_INTR_MASK(irq_vec);
721}
722
723void gic_disable_interrupt(int irq_vec)
724{
725 GIC_CLR_INTR_MASK(irq_vec);
726}
727
728void gic_irq_ack(struct irq_data *d)
729{
730 int irq = (d->irq - gic_irq_base);
731
732 GIC_CLR_INTR_MASK(irq);
733
734 if (gic_irq_flags[irq] & GIC_TRIG_EDGE)
735 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq);
736}
737
738void gic_finish_irq(struct irq_data *d)
739{
740 /* Enable interrupts. */
741 GIC_SET_INTR_MASK(d->irq - gic_irq_base);
742}
743
744void __init gic_platform_init(int irqs, struct irq_chip *irq_controller)
745{
746 int i;
747
748 for (i = gic_irq_base; i < (gic_irq_base + irqs); i++)
749 irq_set_chip(i, irq_controller);
750}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 3778a359f3ad..ce02dbdedc62 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -24,6 +24,7 @@
24#include <linux/sched.h> 24#include <linux/sched.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/irqchip/mips-gic.h>
27#include <linux/timex.h> 28#include <linux/timex.h>
28#include <linux/mc146818rtc.h> 29#include <linux/mc146818rtc.h>
29 30
@@ -37,7 +38,6 @@
37#include <asm/time.h> 38#include <asm/time.h>
38#include <asm/mc146818-time.h> 39#include <asm/mc146818-time.h>
39#include <asm/msc01_ic.h> 40#include <asm/msc01_ic.h>
40#include <asm/gic.h>
41 41
42#include <asm/mips-boards/generic.h> 42#include <asm/mips-boards/generic.h>
43#include <asm/mips-boards/maltaint.h> 43#include <asm/mips-boards/maltaint.h>
@@ -46,6 +46,8 @@ static int mips_cpu_timer_irq;
46static int mips_cpu_perf_irq; 46static int mips_cpu_perf_irq;
47extern int cp0_perfcount_irq; 47extern int cp0_perfcount_irq;
48 48
49static unsigned int gic_frequency;
50
49static void mips_timer_dispatch(void) 51static void mips_timer_dispatch(void)
50{ 52{
51 do_IRQ(mips_cpu_timer_irq); 53 do_IRQ(mips_cpu_timer_irq);
@@ -70,9 +72,7 @@ static void __init estimate_frequencies(void)
70{ 72{
71 unsigned long flags; 73 unsigned long flags;
72 unsigned int count, start; 74 unsigned int count, start;
73#ifdef CONFIG_IRQ_GIC 75 cycle_t giccount = 0, gicstart = 0;
74 unsigned int giccount = 0, gicstart = 0;
75#endif
76 76
77#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ 77#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
78 mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000; 78 mips_hpt_frequency = CONFIG_KVM_GUEST_TIMER_FREQ * 1000000;
@@ -87,32 +87,26 @@ static void __init estimate_frequencies(void)
87 87
88 /* Initialize counters. */ 88 /* Initialize counters. */
89 start = read_c0_count(); 89 start = read_c0_count();
90#ifdef CONFIG_IRQ_GIC
91 if (gic_present) 90 if (gic_present)
92 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), gicstart); 91 gicstart = gic_read_count();
93#endif
94 92
95 /* Read counter exactly on falling edge of update flag. */ 93 /* Read counter exactly on falling edge of update flag. */
96 while (CMOS_READ(RTC_REG_A) & RTC_UIP); 94 while (CMOS_READ(RTC_REG_A) & RTC_UIP);
97 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP)); 95 while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
98 96
99 count = read_c0_count(); 97 count = read_c0_count();
100#ifdef CONFIG_IRQ_GIC
101 if (gic_present) 98 if (gic_present)
102 GICREAD(GIC_REG(SHARED, GIC_SH_COUNTER_31_00), giccount); 99 giccount = gic_read_count();
103#endif
104 100
105 local_irq_restore(flags); 101 local_irq_restore(flags);
106 102
107 count -= start; 103 count -= start;
108 mips_hpt_frequency = count; 104 mips_hpt_frequency = count;
109 105
110#ifdef CONFIG_IRQ_GIC
111 if (gic_present) { 106 if (gic_present) {
112 giccount -= gicstart; 107 giccount -= gicstart;
113 gic_frequency = giccount; 108 gic_frequency = giccount;
114 } 109 }
115#endif
116} 110}
117 111
118void read_persistent_clock(struct timespec *ts) 112void read_persistent_clock(struct timespec *ts)
@@ -121,35 +115,30 @@ void read_persistent_clock(struct timespec *ts)
121 ts->tv_nsec = 0; 115 ts->tv_nsec = 0;
122} 116}
123 117
124static void __init plat_perf_setup(void) 118int get_c0_perfcount_int(void)
125{ 119{
126#ifdef MSC01E_INT_BASE
127 if (cpu_has_veic) { 120 if (cpu_has_veic) {
128 set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch); 121 set_vi_handler(MSC01E_INT_PERFCTR, mips_perf_dispatch);
129 mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; 122 mips_cpu_perf_irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
130 } else 123 } else if (gic_present) {
131#endif 124 mips_cpu_perf_irq = gic_get_c0_perfcount_int();
132 if (cp0_perfcount_irq >= 0) { 125 } else if (cp0_perfcount_irq >= 0) {
133 if (cpu_has_vint)
134 set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch);
135 mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 126 mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
136#ifdef CONFIG_SMP 127 } else {
137 irq_set_handler(mips_cpu_perf_irq, handle_percpu_irq); 128 mips_cpu_perf_irq = -1;
138#endif
139 } 129 }
130
131 return mips_cpu_perf_irq;
140} 132}
141 133
142unsigned int get_c0_compare_int(void) 134unsigned int get_c0_compare_int(void)
143{ 135{
144#ifdef MSC01E_INT_BASE
145 if (cpu_has_veic) { 136 if (cpu_has_veic) {
146 set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch); 137 set_vi_handler(MSC01E_INT_CPUCTR, mips_timer_dispatch);
147 mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR; 138 mips_cpu_timer_irq = MSC01E_INT_BASE + MSC01E_INT_CPUCTR;
148 } else 139 } else if (gic_present) {
149#endif 140 mips_cpu_timer_irq = gic_get_c0_compare_int();
150 { 141 } else {
151 if (cpu_has_vint)
152 set_vi_handler(cp0_compare_irq, mips_timer_dispatch);
153 mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; 142 mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
154 } 143 }
155 144
@@ -191,16 +180,14 @@ void __init plat_time_init(void)
191 setup_pit_timer(); 180 setup_pit_timer();
192#endif 181#endif
193 182
194#ifdef CONFIG_IRQ_GIC 183#ifdef CONFIG_MIPS_GIC
195 if (gic_present) { 184 if (gic_present) {
196 freq = freqround(gic_frequency, 5000); 185 freq = freqround(gic_frequency, 5000);
197 printk("GIC frequency %d.%02d MHz\n", freq/1000000, 186 printk("GIC frequency %d.%02d MHz\n", freq/1000000,
198 (freq%1000000)*100/1000000); 187 (freq%1000000)*100/1000000);
199#ifdef CONFIG_CSRC_GIC 188#ifdef CONFIG_CLKSRC_MIPS_GIC
200 gic_clocksource_init(gic_frequency); 189 gic_clocksource_init(gic_frequency);
201#endif 190#endif
202 } 191 }
203#endif 192#endif
204
205 plat_perf_setup();
206} 193}
diff --git a/arch/mips/mti-sead3/sead3-ehci.c b/arch/mips/mti-sead3/sead3-ehci.c
index 772fc056a92d..014dd7ba4d68 100644
--- a/arch/mips/mti-sead3/sead3-ehci.c
+++ b/arch/mips/mti-sead3/sead3-ehci.c
@@ -9,6 +9,9 @@
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/dma-mapping.h> 10#include <linux/dma-mapping.h>
11#include <linux/platform_device.h> 11#include <linux/platform_device.h>
12#include <linux/irqchip/mips-gic.h>
13
14#include <asm/mips-boards/sead3int.h>
12 15
13struct resource ehci_resources[] = { 16struct resource ehci_resources[] = {
14 { 17 {
@@ -17,7 +20,6 @@ struct resource ehci_resources[] = {
17 .flags = IORESOURCE_MEM 20 .flags = IORESOURCE_MEM
18 }, 21 },
19 { 22 {
20 .start = MIPS_CPU_IRQ_BASE + 2,
21 .flags = IORESOURCE_IRQ 23 .flags = IORESOURCE_IRQ
22 } 24 }
23}; 25};
@@ -37,6 +39,10 @@ static struct platform_device ehci_device = {
37 39
38static int __init ehci_init(void) 40static int __init ehci_init(void)
39{ 41{
42 if (gic_present)
43 ehci_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_EHCI;
44 else
45 ehci_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_EHCI;
40 return platform_device_register(&ehci_device); 46 return platform_device_register(&ehci_device);
41} 47}
42 48
diff --git a/arch/mips/mti-sead3/sead3-int.c b/arch/mips/mti-sead3/sead3-int.c
index 6a560ac03def..e31e17f81eef 100644
--- a/arch/mips/mti-sead3/sead3-int.c
+++ b/arch/mips/mti-sead3/sead3-int.c
@@ -7,9 +7,9 @@
7 */ 7 */
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/irqchip/mips-gic.h>
10#include <linux/io.h> 11#include <linux/io.h>
11 12
12#include <asm/gic.h>
13#include <asm/irq_cpu.h> 13#include <asm/irq_cpu.h>
14#include <asm/setup.h> 14#include <asm/setup.h>
15 15
@@ -20,138 +20,23 @@
20#define SEAD_CONFIG_BASE 0x1b100110 20#define SEAD_CONFIG_BASE 0x1b100110
21#define SEAD_CONFIG_SIZE 4 21#define SEAD_CONFIG_SIZE 4
22 22
23static unsigned long sead3_config_reg; 23static void __iomem *sead3_config_reg;
24
25/*
26 * This table defines the setup for each external GIC interrupt. It is
27 * indexed by interrupt number.
28 */
29#define GIC_CPU_NMI GIC_MAP_TO_NMI_MSK
30static struct gic_intr_map gic_intr_map[GIC_NUM_INTRS] = {
31 { 0, GIC_CPU_INT4, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
32 { 0, GIC_CPU_INT3, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
33 { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
34 { 0, GIC_CPU_INT2, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
35 { 0, GIC_CPU_INT1, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
36 { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
37 { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
38 { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
39 { 0, GIC_CPU_INT0, GIC_POL_POS, GIC_TRIG_LEVEL, GIC_FLAG_TRANSPARENT },
40 { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
41 { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
42 { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
43 { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
44 { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
45 { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
46 { GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED, GIC_UNUSED },
47};
48
49asmlinkage void plat_irq_dispatch(void)
50{
51 unsigned int pending = read_c0_cause() & read_c0_status() & ST0_IM;
52 int irq;
53
54 irq = (fls(pending) - CAUSEB_IP - 1);
55 if (irq >= 0)
56 do_IRQ(MIPS_CPU_IRQ_BASE + irq);
57 else
58 spurious_interrupt();
59}
60 24
61void __init arch_init_irq(void) 25void __init arch_init_irq(void)
62{ 26{
63 int i; 27 if (!cpu_has_veic)
64
65 if (!cpu_has_veic) {
66 mips_cpu_irq_init(); 28 mips_cpu_irq_init();
67 29
68 if (cpu_has_vint) { 30 sead3_config_reg = ioremap_nocache(SEAD_CONFIG_BASE, SEAD_CONFIG_SIZE);
69 /* install generic handler */ 31 gic_present = (__raw_readl(sead3_config_reg) &
70 for (i = 0; i < 8; i++) 32 SEAD_CONFIG_GIC_PRESENT_MSK) >>
71 set_vi_handler(i, plat_irq_dispatch);
72 }
73 }
74
75 sead3_config_reg = (unsigned long)ioremap_nocache(SEAD_CONFIG_BASE,
76 SEAD_CONFIG_SIZE);
77 gic_present = (REG32(sead3_config_reg) & SEAD_CONFIG_GIC_PRESENT_MSK) >>
78 SEAD_CONFIG_GIC_PRESENT_SHF; 33 SEAD_CONFIG_GIC_PRESENT_SHF;
79 pr_info("GIC: %spresent\n", (gic_present) ? "" : "not "); 34 pr_info("GIC: %spresent\n", (gic_present) ? "" : "not ");
80 pr_info("EIC: %s\n", 35 pr_info("EIC: %s\n",
81 (current_cpu_data.options & MIPS_CPU_VEIC) ? "on" : "off"); 36 (current_cpu_data.options & MIPS_CPU_VEIC) ? "on" : "off");
82 37
83 if (gic_present) 38 if (gic_present)
84 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, gic_intr_map, 39 gic_init(GIC_BASE_ADDR, GIC_ADDRSPACE_SZ, CPU_INT_GIC,
85 ARRAY_SIZE(gic_intr_map), MIPS_GIC_IRQ_BASE); 40 MIPS_GIC_IRQ_BASE);
86}
87
88void gic_enable_interrupt(int irq_vec)
89{
90 unsigned int i, irq_source;
91
92 /* enable all the interrupts associated with this vector */
93 for (i = 0; i < gic_shared_intr_map[irq_vec].num_shared_intr; i++) {
94 irq_source = gic_shared_intr_map[irq_vec].intr_list[i];
95 GIC_SET_INTR_MASK(irq_source);
96 }
97 /* enable all local interrupts associated with this vector */
98 if (gic_shared_intr_map[irq_vec].local_intr_mask) {
99 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
100 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_SMASK),
101 gic_shared_intr_map[irq_vec].local_intr_mask);
102 }
103} 41}
104 42
105void gic_disable_interrupt(int irq_vec)
106{
107 unsigned int i, irq_source;
108
109 /* disable all the interrupts associated with this vector */
110 for (i = 0; i < gic_shared_intr_map[irq_vec].num_shared_intr; i++) {
111 irq_source = gic_shared_intr_map[irq_vec].intr_list[i];
112 GIC_CLR_INTR_MASK(irq_source);
113 }
114 /* disable all local interrupts associated with this vector */
115 if (gic_shared_intr_map[irq_vec].local_intr_mask) {
116 GICWRITE(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 0);
117 GICWRITE(GIC_REG(VPE_OTHER, GIC_VPE_RMASK),
118 gic_shared_intr_map[irq_vec].local_intr_mask);
119 }
120}
121
122void gic_irq_ack(struct irq_data *d)
123{
124 GIC_CLR_INTR_MASK(d->irq - gic_irq_base);
125}
126
127void gic_finish_irq(struct irq_data *d)
128{
129 unsigned int irq = (d->irq - gic_irq_base);
130 unsigned int i, irq_source;
131
132 /* Clear edge detectors. */
133 for (i = 0; i < gic_shared_intr_map[irq].num_shared_intr; i++) {
134 irq_source = gic_shared_intr_map[irq].intr_list[i];
135 if (gic_irq_flags[irq_source] & GIC_TRIG_EDGE)
136 GICWRITE(GIC_REG(SHARED, GIC_SH_WEDGE), irq_source);
137 }
138
139 /* Enable interrupts. */
140 GIC_SET_INTR_MASK(irq);
141}
142
143void __init gic_platform_init(int irqs, struct irq_chip *irq_controller)
144{
145 int i;
146
147 /*
148 * For non-EIC mode, we want to setup the GIC in pass-through
149 * mode, as if the GIC didn't exist. Do not map any interrupts
150 * for an external interrupt controller.
151 */
152 if (!cpu_has_veic)
153 return;
154
155 for (i = gic_irq_base; i < (gic_irq_base + irqs); i++)
156 irq_set_chip_and_handler(i, irq_controller, handle_percpu_irq);
157}
diff --git a/arch/mips/mti-sead3/sead3-net.c b/arch/mips/mti-sead3/sead3-net.c
index dd11e7eb771c..46176b804576 100644
--- a/arch/mips/mti-sead3/sead3-net.c
+++ b/arch/mips/mti-sead3/sead3-net.c
@@ -7,9 +7,12 @@
7 */ 7 */
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/irq.h> 9#include <linux/irq.h>
10#include <linux/irqchip/mips-gic.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
11#include <linux/smsc911x.h> 12#include <linux/smsc911x.h>
12 13
14#include <asm/mips-boards/sead3int.h>
15
13static struct smsc911x_platform_config sead3_smsc911x_data = { 16static struct smsc911x_platform_config sead3_smsc911x_data = {
14 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, 17 .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
15 .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL, 18 .irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
@@ -17,14 +20,13 @@ static struct smsc911x_platform_config sead3_smsc911x_data = {
17 .phy_interface = PHY_INTERFACE_MODE_MII, 20 .phy_interface = PHY_INTERFACE_MODE_MII,
18}; 21};
19 22
20struct resource sead3_net_resourcess[] = { 23struct resource sead3_net_resources[] = {
21 { 24 {
22 .start = 0x1f010000, 25 .start = 0x1f010000,
23 .end = 0x1f01ffff, 26 .end = 0x1f01ffff,
24 .flags = IORESOURCE_MEM 27 .flags = IORESOURCE_MEM
25 }, 28 },
26 { 29 {
27 .start = MIPS_CPU_IRQ_BASE + 6,
28 .flags = IORESOURCE_IRQ 30 .flags = IORESOURCE_IRQ
29 } 31 }
30}; 32};
@@ -35,12 +37,16 @@ static struct platform_device sead3_net_device = {
35 .dev = { 37 .dev = {
36 .platform_data = &sead3_smsc911x_data, 38 .platform_data = &sead3_smsc911x_data,
37 }, 39 },
38 .num_resources = ARRAY_SIZE(sead3_net_resourcess), 40 .num_resources = ARRAY_SIZE(sead3_net_resources),
39 .resource = sead3_net_resourcess 41 .resource = sead3_net_resources
40}; 42};
41 43
42static int __init sead3_net_init(void) 44static int __init sead3_net_init(void)
43{ 45{
46 if (gic_present)
47 sead3_net_resources[1].start = MIPS_GIC_IRQ_BASE + GIC_INT_NET;
48 else
49 sead3_net_resources[1].start = MIPS_CPU_IRQ_BASE + CPU_INT_NET;
44 return platform_device_register(&sead3_net_device); 50 return platform_device_register(&sead3_net_device);
45} 51}
46 52
diff --git a/arch/mips/mti-sead3/sead3-platform.c b/arch/mips/mti-sead3/sead3-platform.c
index 6c3b33dbed18..53ee6f1f018d 100644
--- a/arch/mips/mti-sead3/sead3-platform.c
+++ b/arch/mips/mti-sead3/sead3-platform.c
@@ -7,12 +7,15 @@
7 */ 7 */
8#include <linux/module.h> 8#include <linux/module.h>
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/irqchip/mips-gic.h>
10#include <linux/serial_8250.h> 11#include <linux/serial_8250.h>
11 12
12#define UART(base, int) \ 13#include <asm/mips-boards/sead3int.h>
14
15#define UART(base) \
13{ \ 16{ \
14 .mapbase = base, \ 17 .mapbase = base, \
15 .irq = int, \ 18 .irq = -1, \
16 .uartclk = 14745600, \ 19 .uartclk = 14745600, \
17 .iotype = UPIO_MEM32, \ 20 .iotype = UPIO_MEM32, \
18 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \ 21 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \
@@ -20,8 +23,8 @@
20} 23}
21 24
22static struct plat_serial8250_port uart8250_data[] = { 25static struct plat_serial8250_port uart8250_data[] = {
23 UART(0x1f000900, MIPS_CPU_IRQ_BASE + 4), /* ttyS0 = USB */ 26 UART(0x1f000900), /* ttyS0 = USB */
24 UART(0x1f000800, MIPS_CPU_IRQ_BASE + 4), /* ttyS1 = RS232 */ 27 UART(0x1f000800), /* ttyS1 = RS232 */
25 { }, 28 { },
26}; 29};
27 30
@@ -35,6 +38,13 @@ static struct platform_device uart8250_device = {
35 38
36static int __init uart8250_init(void) 39static int __init uart8250_init(void)
37{ 40{
41 if (gic_present) {
42 uart8250_data[0].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART0;
43 uart8250_data[1].irq = MIPS_GIC_IRQ_BASE + GIC_INT_UART1;
44 } else {
45 uart8250_data[0].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART0;
46 uart8250_data[1].irq = MIPS_CPU_IRQ_BASE + CPU_INT_UART1;
47 }
38 return platform_device_register(&uart8250_device); 48 return platform_device_register(&uart8250_device);
39} 49}
40 50
diff --git a/arch/mips/mti-sead3/sead3-serial.c b/arch/mips/mti-sead3/sead3-serial.c
deleted file mode 100644
index bc52705bbee4..000000000000
--- a/arch/mips/mti-sead3/sead3-serial.c
+++ /dev/null
@@ -1,45 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/module.h>
9#include <linux/init.h>
10#include <linux/serial_8250.h>
11
12#define UART(base, int) \
13{ \
14 .mapbase = base, \
15 .irq = int, \
16 .uartclk = 14745600, \
17 .iotype = UPIO_MEM32, \
18 .flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_IOREMAP, \
19 .regshift = 2, \
20}
21
22static struct plat_serial8250_port uart8250_data[] = {
23 UART(0x1f000900, MIPS_CPU_IRQ_BASE + 4), /* ttyS0 = USB */
24 UART(0x1f000800, MIPS_CPU_IRQ_BASE + 4), /* ttyS1 = RS232 */
25 { },
26};
27
28static struct platform_device uart8250_device = {
29 .name = "serial8250",
30 .id = PLAT8250_DEV_PLATFORM,
31 .dev = {
32 .platform_data = uart8250_data,
33 },
34};
35
36static int __init uart8250_init(void)
37{
38 return platform_device_register(&uart8250_device);
39}
40
41module_init(uart8250_init);
42
43MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
44MODULE_LICENSE("GPL");
45MODULE_DESCRIPTION("8250 UART probe driver for the SEAD-3 platform");
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index 678d03d53c60..ec1dd2491f96 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -6,6 +6,7 @@
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */ 7 */
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/irqchip/mips-gic.h>
9 10
10#include <asm/cpu.h> 11#include <asm/cpu.h>
11#include <asm/setup.h> 12#include <asm/setup.h>
@@ -13,19 +14,6 @@
13#include <asm/irq.h> 14#include <asm/irq.h>
14#include <asm/mips-boards/generic.h> 15#include <asm/mips-boards/generic.h>
15 16
16static int mips_cpu_timer_irq;
17static int mips_cpu_perf_irq;
18
19static void mips_timer_dispatch(void)
20{
21 do_IRQ(mips_cpu_timer_irq);
22}
23
24static void mips_perf_dispatch(void)
25{
26 do_IRQ(mips_cpu_perf_irq);
27}
28
29static void __iomem *status_reg = (void __iomem *)0xbf000410; 17static void __iomem *status_reg = (void __iomem *)0xbf000410;
30 18
31/* 19/*
@@ -81,21 +69,20 @@ void read_persistent_clock(struct timespec *ts)
81 ts->tv_nsec = 0; 69 ts->tv_nsec = 0;
82} 70}
83 71
84static void __init plat_perf_setup(void) 72int get_c0_perfcount_int(void)
85{ 73{
86 if (cp0_perfcount_irq >= 0) { 74 if (gic_present)
87 if (cpu_has_vint) 75 return gic_get_c0_compare_int();
88 set_vi_handler(cp0_perfcount_irq, mips_perf_dispatch); 76 if (cp0_perfcount_irq >= 0)
89 mips_cpu_perf_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
90 } 78 return -1;
91} 79}
92 80
93unsigned int get_c0_compare_int(void) 81unsigned int get_c0_compare_int(void)
94{ 82{
95 if (cpu_has_vint) 83 if (gic_present)
96 set_vi_handler(cp0_compare_irq, mips_timer_dispatch); 84 return gic_get_c0_compare_int();
97 mips_cpu_timer_irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq; 85 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
98 return mips_cpu_timer_irq;
99} 86}
100 87
101void __init plat_time_init(void) 88void __init plat_time_init(void)
@@ -108,6 +95,4 @@ void __init plat_time_init(void)
108 (est_freq % 1000000) * 100 / 1000000); 95 (est_freq % 1000000) * 100 / 1000000);
109 96
110 mips_scroll_message(); 97 mips_scroll_message();
111
112 plat_perf_setup();
113} 98}
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 9c0a6782c091..070afdb297df 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -14,3 +14,4 @@ oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
14oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o 14oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
15oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o 15oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
16oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o 16oprofile-$(CONFIG_CPU_LOONGSON2) += op_model_loongson2.o
17oprofile-$(CONFIG_CPU_LOONGSON3) += op_model_loongson3.o
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
index 83a1dfd8f0e3..5e645c9a3162 100644
--- a/arch/mips/oprofile/backtrace.c
+++ b/arch/mips/oprofile/backtrace.c
@@ -65,7 +65,7 @@ static inline int is_end_of_function_marker(union mips_instruction *ip)
65 * - handle cases where the stack is adjusted inside a function 65 * - handle cases where the stack is adjusted inside a function
66 * (generally doesn't happen) 66 * (generally doesn't happen)
67 * - find optimal value for max_instr_check 67 * - find optimal value for max_instr_check
68 * - try to find a way to handle leaf functions 68 * - try to find a better way to handle leaf functions
69 */ 69 */
70 70
71static inline int unwind_user_frame(struct stackframe *old_frame, 71static inline int unwind_user_frame(struct stackframe *old_frame,
@@ -104,7 +104,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
104 } 104 }
105 105
106 if (!ra_offset || !stack_size) 106 if (!ra_offset || !stack_size)
107 return -1; 107 goto done;
108 108
109 if (ra_offset) { 109 if (ra_offset) {
110 new_frame.ra = old_frame->sp + ra_offset; 110 new_frame.ra = old_frame->sp + ra_offset;
@@ -121,6 +121,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
121 if (new_frame.sp > old_frame->sp) 121 if (new_frame.sp > old_frame->sp)
122 return -2; 122 return -2;
123 123
124done:
124 new_frame.pc = old_frame->ra; 125 new_frame.pc = old_frame->ra;
125 *old_frame = new_frame; 126 *old_frame = new_frame;
126 127
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index e74732449478..a26cbe372e06 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -18,6 +18,7 @@
18 18
19extern struct op_mips_model op_model_mipsxx_ops __weak; 19extern struct op_mips_model op_model_mipsxx_ops __weak;
20extern struct op_mips_model op_model_loongson2_ops __weak; 20extern struct op_mips_model op_model_loongson2_ops __weak;
21extern struct op_mips_model op_model_loongson3_ops __weak;
21 22
22static struct op_mips_model *model; 23static struct op_mips_model *model;
23 24
@@ -104,8 +105,17 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
104 case CPU_LOONGSON2: 105 case CPU_LOONGSON2:
105 lmodel = &op_model_loongson2_ops; 106 lmodel = &op_model_loongson2_ops;
106 break; 107 break;
108 case CPU_LOONGSON3:
109 lmodel = &op_model_loongson3_ops;
110 break;
107 }; 111 };
108 112
113 /*
114 * Always set the backtrace. This allows unsupported CPU types to still
115 * use timer-based oprofile.
116 */
117 ops->backtrace = op_mips_backtrace;
118
109 if (!lmodel) 119 if (!lmodel)
110 return -ENODEV; 120 return -ENODEV;
111 121
@@ -121,7 +131,6 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
121 ops->start = op_mips_start; 131 ops->start = op_mips_start;
122 ops->stop = op_mips_stop; 132 ops->stop = op_mips_stop;
123 ops->cpu_type = lmodel->cpu_type; 133 ops->cpu_type = lmodel->cpu_type;
124 ops->backtrace = op_mips_backtrace;
125 134
126 printk(KERN_INFO "oprofile: using %s performance monitoring.\n", 135 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
127 lmodel->cpu_type); 136 lmodel->cpu_type);
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
new file mode 100644
index 000000000000..8bcf7fc40f0d
--- /dev/null
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -0,0 +1,220 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 */
7#include <linux/init.h>
8#include <linux/cpu.h>
9#include <linux/smp.h>
10#include <linux/proc_fs.h>
11#include <linux/oprofile.h>
12#include <linux/spinlock.h>
13#include <linux/interrupt.h>
14#include <asm/uaccess.h>
15#include <irq.h>
16#include <loongson.h>
17#include "op_impl.h"
18
19#define LOONGSON3_PERFCNT_OVERFLOW (1ULL << 63)
20
21#define LOONGSON3_PERFCTRL_EXL (1UL << 0)
22#define LOONGSON3_PERFCTRL_KERNEL (1UL << 1)
23#define LOONGSON3_PERFCTRL_SUPERVISOR (1UL << 2)
24#define LOONGSON3_PERFCTRL_USER (1UL << 3)
25#define LOONGSON3_PERFCTRL_ENABLE (1UL << 4)
26#define LOONGSON3_PERFCTRL_W (1UL << 30)
27#define LOONGSON3_PERFCTRL_M (1UL << 31)
28#define LOONGSON3_PERFCTRL_EVENT(idx, event) \
29 (((event) & (idx ? 0x0f : 0x3f)) << 5)
30
31/* Loongson-3 PerfCount performance counter1 register */
32#define read_c0_perflo1() __read_64bit_c0_register($25, 0)
33#define write_c0_perflo1(val) __write_64bit_c0_register($25, 0, val)
34#define read_c0_perfhi1() __read_64bit_c0_register($25, 1)
35#define write_c0_perfhi1(val) __write_64bit_c0_register($25, 1, val)
36
37/* Loongson-3 PerfCount performance counter2 register */
38#define read_c0_perflo2() __read_64bit_c0_register($25, 2)
39#define write_c0_perflo2(val) __write_64bit_c0_register($25, 2, val)
40#define read_c0_perfhi2() __read_64bit_c0_register($25, 3)
41#define write_c0_perfhi2(val) __write_64bit_c0_register($25, 3, val)
42
43static int (*save_perf_irq)(void);
44
45static struct loongson3_register_config {
46 unsigned int control1;
47 unsigned int control2;
48 unsigned long long reset_counter1;
49 unsigned long long reset_counter2;
50 int ctr1_enable, ctr2_enable;
51} reg;
52
53static void reset_counters(void *arg)
54{
55 write_c0_perfhi1(0);
56 write_c0_perfhi2(0);
57 write_c0_perflo1(0xc0000000);
58 write_c0_perflo2(0x40000000);
59}
60
61/* Compute all of the registers in preparation for enabling profiling. */
62static void loongson3_reg_setup(struct op_counter_config *ctr)
63{
64 unsigned int control1 = 0;
65 unsigned int control2 = 0;
66
67 reg.reset_counter1 = 0;
68 reg.reset_counter2 = 0;
69 /* Compute the performance counter control word. */
70 /* For now count kernel and user mode */
71 if (ctr[0].enabled) {
72 control1 |= LOONGSON3_PERFCTRL_EVENT(0, ctr[0].event) |
73 LOONGSON3_PERFCTRL_ENABLE;
74 if (ctr[0].kernel)
75 control1 |= LOONGSON3_PERFCTRL_KERNEL;
76 if (ctr[0].user)
77 control1 |= LOONGSON3_PERFCTRL_USER;
78 reg.reset_counter1 = 0x8000000000000000ULL - ctr[0].count;
79 }
80
81 if (ctr[1].enabled) {
82 control2 |= LOONGSON3_PERFCTRL_EVENT(1, ctr[1].event) |
83 LOONGSON3_PERFCTRL_ENABLE;
84 if (ctr[1].kernel)
85 control2 |= LOONGSON3_PERFCTRL_KERNEL;
86 if (ctr[1].user)
87 control2 |= LOONGSON3_PERFCTRL_USER;
88 reg.reset_counter2 = 0x8000000000000000ULL - ctr[1].count;
89 }
90
91 if (ctr[0].enabled)
92 control1 |= LOONGSON3_PERFCTRL_EXL;
93 if (ctr[1].enabled)
94 control2 |= LOONGSON3_PERFCTRL_EXL;
95
96 reg.control1 = control1;
97 reg.control2 = control2;
98 reg.ctr1_enable = ctr[0].enabled;
99 reg.ctr2_enable = ctr[1].enabled;
100}
101
102/* Program all of the registers in preparation for enabling profiling. */
103static void loongson3_cpu_setup(void *args)
104{
105 uint64_t perfcount1, perfcount2;
106
107 perfcount1 = reg.reset_counter1;
108 perfcount2 = reg.reset_counter2;
109 write_c0_perfhi1(perfcount1);
110 write_c0_perfhi2(perfcount2);
111}
112
113static void loongson3_cpu_start(void *args)
114{
115 /* Start all counters on current CPU */
116 reg.control1 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
117 reg.control2 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
118
119 if (reg.ctr1_enable)
120 write_c0_perflo1(reg.control1);
121 if (reg.ctr2_enable)
122 write_c0_perflo2(reg.control2);
123}
124
125static void loongson3_cpu_stop(void *args)
126{
127 /* Stop all counters on current CPU */
128 write_c0_perflo1(0xc0000000);
129 write_c0_perflo2(0x40000000);
130 memset(&reg, 0, sizeof(reg));
131}
132
133static int loongson3_perfcount_handler(void)
134{
135 unsigned long flags;
136 uint64_t counter1, counter2;
137 uint32_t cause, handled = IRQ_NONE;
138 struct pt_regs *regs = get_irq_regs();
139
140 cause = read_c0_cause();
141 if (!(cause & CAUSEF_PCI))
142 return handled;
143
144 counter1 = read_c0_perfhi1();
145 counter2 = read_c0_perfhi2();
146
147 local_irq_save(flags);
148
149 if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) {
150 if (reg.ctr1_enable)
151 oprofile_add_sample(regs, 0);
152 counter1 = reg.reset_counter1;
153 }
154 if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) {
155 if (reg.ctr2_enable)
156 oprofile_add_sample(regs, 1);
157 counter2 = reg.reset_counter2;
158 }
159
160 local_irq_restore(flags);
161
162 write_c0_perfhi1(counter1);
163 write_c0_perfhi2(counter2);
164
165 if (!(cause & CAUSEF_TI))
166 handled = IRQ_HANDLED;
167
168 return handled;
169}
170
171static int loongson3_cpu_callback(struct notifier_block *nfb,
172 unsigned long action, void *hcpu)
173{
174 switch (action) {
175 case CPU_STARTING:
176 case CPU_STARTING_FROZEN:
177 write_c0_perflo1(reg.control1);
178 write_c0_perflo2(reg.control2);
179 break;
180 case CPU_DYING:
181 case CPU_DYING_FROZEN:
182 write_c0_perflo1(0xc0000000);
183 write_c0_perflo2(0x40000000);
184 break;
185 }
186
187 return NOTIFY_OK;
188}
189
190static struct notifier_block loongson3_notifier_block = {
191 .notifier_call = loongson3_cpu_callback
192};
193
194static int __init loongson3_init(void)
195{
196 on_each_cpu(reset_counters, NULL, 1);
197 register_hotcpu_notifier(&loongson3_notifier_block);
198 save_perf_irq = perf_irq;
199 perf_irq = loongson3_perfcount_handler;
200
201 return 0;
202}
203
204static void loongson3_exit(void)
205{
206 on_each_cpu(reset_counters, NULL, 1);
207 unregister_hotcpu_notifier(&loongson3_notifier_block);
208 perf_irq = save_perf_irq;
209}
210
211struct op_mips_model op_model_loongson3_ops = {
212 .reg_setup = loongson3_reg_setup,
213 .cpu_setup = loongson3_cpu_setup,
214 .init = loongson3_init,
215 .exit = loongson3_exit,
216 .cpu_start = loongson3_cpu_start,
217 .cpu_stop = loongson3_cpu_stop,
218 .cpu_type = "mips/loongson3",
219 .num_counters = 2
220};
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 42821ae2d77e..01f721a85c5b 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -11,6 +11,7 @@
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/smp.h> 12#include <linux/smp.h>
13#include <asm/irq_regs.h> 13#include <asm/irq_regs.h>
14#include <asm/time.h>
14 15
15#include "op_impl.h" 16#include "op_impl.h"
16 17
@@ -35,6 +36,7 @@
35#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13) 36#define M_PERFCTL_COUNT_ALL_THREADS (1UL << 13)
36 37
37static int (*save_perf_irq)(void); 38static int (*save_perf_irq)(void);
39static int perfcount_irq;
38 40
39/* 41/*
40 * XLR has only one set of counters per core. Designate the 42 * XLR has only one set of counters per core. Designate the
@@ -431,8 +433,16 @@ static int __init mipsxx_init(void)
431 save_perf_irq = perf_irq; 433 save_perf_irq = perf_irq;
432 perf_irq = mipsxx_perfcount_handler; 434 perf_irq = mipsxx_perfcount_handler;
433 435
434 if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq)) 436 if (get_c0_perfcount_int)
435 return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int, 437 perfcount_irq = get_c0_perfcount_int();
438 else if ((cp0_perfcount_irq >= 0) &&
439 (cp0_compare_irq != cp0_perfcount_irq))
440 perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
441 else
442 perfcount_irq = -1;
443
444 if (perfcount_irq >= 0)
445 return request_irq(perfcount_irq, mipsxx_perfcount_int,
436 0, "Perfcounter", save_perf_irq); 446 0, "Perfcounter", save_perf_irq);
437 447
438 return 0; 448 return 0;
@@ -442,8 +452,8 @@ static void mipsxx_exit(void)
442{ 452{
443 int counters = op_model_mipsxx_ops.num_counters; 453 int counters = op_model_mipsxx_ops.num_counters;
444 454
445 if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq)) 455 if (perfcount_irq >= 0)
446 free_irq(cp0_perfcount_irq, save_perf_irq); 456 free_irq(perfcount_irq, save_perf_irq);
447 457
448 counters = counters_per_cpu_to_total(counters); 458 counters = counters_per_cpu_to_total(counters);
449 on_each_cpu(reset_counters, (void *)(long)counters, 1); 459 on_each_cpu(reset_counters, (void *)(long)counters, 1);
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index 6523d558ff5a..300591c6278d 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
19obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \ 19obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
20 ops-bcm63xx.o 20 ops-bcm63xx.o
21obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o 21obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o
22obj-$(CONFIG_PCI_AR2315) += pci-ar2315.o
22obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o 23obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
23obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o 24obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
24obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o 25obj-$(CONFIG_MIPS_PCI_VIRTIO) += pci-virtio-guest.o
@@ -42,6 +43,7 @@ obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
42obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o 43obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
43obj-$(CONFIG_LANTIQ) += fixup-lantiq.o 44obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
44obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o 45obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
46obj-$(CONFIG_SOC_RT2880) += pci-rt2880.o
45obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o 47obj-$(CONFIG_SOC_RT3883) += pci-rt3883.o
46obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o 48obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
47obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o 49obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
diff --git a/arch/mips/pci/ops-bcm63xx.c b/arch/mips/pci/ops-bcm63xx.c
index 13eea696bbe7..d02eb9d16b55 100644
--- a/arch/mips/pci/ops-bcm63xx.c
+++ b/arch/mips/pci/ops-bcm63xx.c
@@ -469,7 +469,7 @@ static int bcm63xx_pcie_can_access(struct pci_bus *bus, int devfn)
469{ 469{
470 switch (bus->number) { 470 switch (bus->number) {
471 case PCIE_BUS_BRIDGE: 471 case PCIE_BUS_BRIDGE:
472 return (PCI_SLOT(devfn) == 0); 472 return PCI_SLOT(devfn) == 0;
473 case PCIE_BUS_DEVICE: 473 case PCIE_BUS_DEVICE:
474 if (PCI_SLOT(devfn) == 0) 474 if (PCI_SLOT(devfn) == 0)
475 return bcm_pcie_readl(PCIE_DLSTATUS_REG) 475 return bcm_pcie_readl(PCIE_DLSTATUS_REG)
diff --git a/arch/mips/pci/ops-nile4.c b/arch/mips/pci/ops-nile4.c
index a1a7c9f4096e..b9d1fd0ff7e2 100644
--- a/arch/mips/pci/ops-nile4.c
+++ b/arch/mips/pci/ops-nile4.c
@@ -13,8 +13,6 @@
13 13
14volatile unsigned long *const vrc_pciregs = (void *) Vrc5074_BASE; 14volatile unsigned long *const vrc_pciregs = (void *) Vrc5074_BASE;
15 15
16static DEFINE_SPINLOCK(nile4_pci_lock);
17
18static int nile4_pcibios_config_access(unsigned char access_type, 16static int nile4_pcibios_config_access(unsigned char access_type,
19 struct pci_bus *bus, unsigned int devfn, int where, u32 *val) 17 struct pci_bus *bus, unsigned int devfn, int where, u32 *val)
20{ 18{
@@ -76,7 +74,6 @@ static int nile4_pcibios_config_access(unsigned char access_type,
76static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn, 74static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn,
77 int where, int size, u32 *val) 75 int where, int size, u32 *val)
78{ 76{
79 unsigned long flags;
80 u32 data = 0; 77 u32 data = 0;
81 int err; 78 int err;
82 79
@@ -85,11 +82,8 @@ static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn,
85 else if ((size == 4) && (where & 3)) 82 else if ((size == 4) && (where & 3))
86 return PCIBIOS_BAD_REGISTER_NUMBER; 83 return PCIBIOS_BAD_REGISTER_NUMBER;
87 84
88 spin_lock_irqsave(&nile4_pci_lock, flags);
89 err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, 85 err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
90 &data); 86 &data);
91 spin_unlock_irqrestore(&nile4_pci_lock, flags);
92
93 if (err) 87 if (err)
94 return err; 88 return err;
95 89
@@ -106,7 +100,6 @@ static int nile4_pcibios_read(struct pci_bus *bus, unsigned int devfn,
106static int nile4_pcibios_write(struct pci_bus *bus, unsigned int devfn, 100static int nile4_pcibios_write(struct pci_bus *bus, unsigned int devfn,
107 int where, int size, u32 val) 101 int where, int size, u32 val)
108{ 102{
109 unsigned long flags;
110 u32 data = 0; 103 u32 data = 0;
111 int err; 104 int err;
112 105
@@ -115,11 +108,8 @@ static int nile4_pcibios_write(struct pci_bus *bus, unsigned int devfn,
115 else if ((size == 4) && (where & 3)) 108 else if ((size == 4) && (where & 3))
116 return PCIBIOS_BAD_REGISTER_NUMBER; 109 return PCIBIOS_BAD_REGISTER_NUMBER;
117 110
118 spin_lock_irqsave(&nile4_pci_lock, flags);
119 err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where, 111 err = nile4_pcibios_config_access(PCI_ACCESS_READ, bus, devfn, where,
120 &data); 112 &data);
121 spin_unlock_irqrestore(&nile4_pci_lock, flags);
122
123 if (err) 113 if (err)
124 return err; 114 return err;
125 115
diff --git a/arch/mips/pci/ops-pmcmsp.c b/arch/mips/pci/ops-pmcmsp.c
index 50034f985be1..dd2d9f7e9412 100644
--- a/arch/mips/pci/ops-pmcmsp.c
+++ b/arch/mips/pci/ops-pmcmsp.c
@@ -193,8 +193,6 @@ static void pci_proc_init(void)
193} 193}
194#endif /* CONFIG_PROC_FS && PCI_COUNTERS */ 194#endif /* CONFIG_PROC_FS && PCI_COUNTERS */
195 195
196static DEFINE_SPINLOCK(bpci_lock);
197
198/***************************************************************************** 196/*****************************************************************************
199 * 197 *
200 * STRUCT: pci_io_resource 198 * STRUCT: pci_io_resource
@@ -368,7 +366,6 @@ int msp_pcibios_config_access(unsigned char access_type,
368 struct msp_pci_regs *preg = (void *)PCI_BASE_REG; 366 struct msp_pci_regs *preg = (void *)PCI_BASE_REG;
369 unsigned char bus_num = bus->number; 367 unsigned char bus_num = bus->number;
370 unsigned char dev_fn = (unsigned char)devfn; 368 unsigned char dev_fn = (unsigned char)devfn;
371 unsigned long flags;
372 unsigned long intr; 369 unsigned long intr;
373 unsigned long value; 370 unsigned long value;
374 static char pciirqflag; 371 static char pciirqflag;
@@ -401,10 +398,7 @@ int msp_pcibios_config_access(unsigned char access_type,
401 } 398 }
402 399
403#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL) 400#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
404 local_irq_save(flags);
405 vpe_status = dvpe(); 401 vpe_status = dvpe();
406#else
407 spin_lock_irqsave(&bpci_lock, flags);
408#endif 402#endif
409 403
410 /* 404 /*
@@ -457,9 +451,6 @@ int msp_pcibios_config_access(unsigned char access_type,
457 451
458#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL) 452#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
459 evpe(vpe_status); 453 evpe(vpe_status);
460 local_irq_restore(flags);
461#else
462 spin_unlock_irqrestore(&bpci_lock, flags);
463#endif 454#endif
464 455
465 return -1; 456 return -1;
@@ -467,9 +458,6 @@ int msp_pcibios_config_access(unsigned char access_type,
467 458
468#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL) 459#if defined(CONFIG_PMC_MSP7120_GW) || defined(CONFIG_PMC_MSP7120_EVAL)
469 evpe(vpe_status); 460 evpe(vpe_status);
470 local_irq_restore(flags);
471#else
472 spin_unlock_irqrestore(&bpci_lock, flags);
473#endif 461#endif
474 462
475 return PCIBIOS_SUCCESSFUL; 463 return PCIBIOS_SUCCESSFUL;
diff --git a/arch/mips/pci/pci-ar2315.c b/arch/mips/pci/pci-ar2315.c
new file mode 100644
index 000000000000..bd2b3b60da83
--- /dev/null
+++ b/arch/mips/pci/pci-ar2315.c
@@ -0,0 +1,511 @@
1/*
2 * This program is free software; you can redistribute it and/or
3 * modify it under the terms of the GNU General Public License
4 * as published by the Free Software Foundation; either version 2
5 * of the License, or (at your option) any later version.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, see <http://www.gnu.org/licenses/>.
14 */
15
16/**
17 * Both AR2315 and AR2316 chips have PCI interface unit, which supports DMA
18 * and interrupt. PCI interface supports MMIO access method, but does not
19 * seem to support I/O ports.
20 *
21 * Read/write operation in the region 0x80000000-0xBFFFFFFF causes
22 * a memory read/write command on the PCI bus. 30 LSBs of address on
23 * the bus are taken from memory read/write request and 2 MSBs are
24 * determined by PCI unit configuration.
25 *
26 * To work with the configuration space instead of memory is necessary set
27 * the CFG_SEL bit in the PCI_MISC_CONFIG register.
28 *
29 * Devices on the bus can perform DMA requests via chip BAR1. PCI host
30 * controller BARs are programmend as if an external device is programmed.
31 * Which means that during configuration, IDSEL pin of the chip should be
32 * asserted.
33 *
34 * We know (and support) only one board that uses the PCI interface -
35 * Fonera 2.0g (FON2202). It has a USB EHCI controller connected to the
36 * AR2315 PCI bus. IDSEL pin of USB controller is connected to AD[13] line
37 * and IDSEL pin of AR2315 is connected to AD[16] line.
38 */
39
40#include <linux/types.h>
41#include <linux/pci.h>
42#include <linux/platform_device.h>
43#include <linux/kernel.h>
44#include <linux/init.h>
45#include <linux/mm.h>
46#include <linux/delay.h>
47#include <linux/bitops.h>
48#include <linux/irq.h>
49#include <linux/irqdomain.h>
50#include <linux/io.h>
51#include <asm/paccess.h>
52
53/*
54 * PCI Bus Interface Registers
55 */
56#define AR2315_PCI_1MS_REG 0x0008
57
58#define AR2315_PCI_1MS_MASK 0x3FFFF /* # of AHB clk cycles in 1ms */
59
60#define AR2315_PCI_MISC_CONFIG 0x000c
61
62#define AR2315_PCIMISC_TXD_EN 0x00000001 /* Enable TXD for fragments */
63#define AR2315_PCIMISC_CFG_SEL 0x00000002 /* Mem or Config cycles */
64#define AR2315_PCIMISC_GIG_MASK 0x0000000C /* bits 31-30 for pci req */
65#define AR2315_PCIMISC_RST_MODE 0x00000030
66#define AR2315_PCIRST_INPUT 0x00000000 /* 4:5=0 rst is input */
67#define AR2315_PCIRST_LOW 0x00000010 /* 4:5=1 rst to GND */
68#define AR2315_PCIRST_HIGH 0x00000020 /* 4:5=2 rst to VDD */
69#define AR2315_PCIGRANT_EN 0x00000000 /* 6:7=0 early grant en */
70#define AR2315_PCIGRANT_FRAME 0x00000040 /* 6:7=1 grant waits 4 frame */
71#define AR2315_PCIGRANT_IDLE 0x00000080 /* 6:7=2 grant waits 4 idle */
72#define AR2315_PCIGRANT_GAP 0x00000000 /* 6:7=2 grant waits 4 idle */
73#define AR2315_PCICACHE_DIS 0x00001000 /* PCI external access cache
74 * disable */
75
76#define AR2315_PCI_OUT_TSTAMP 0x0010
77
78#define AR2315_PCI_UNCACHE_CFG 0x0014
79
80#define AR2315_PCI_IN_EN 0x0100
81
82#define AR2315_PCI_IN_EN0 0x01 /* Enable chain 0 */
83#define AR2315_PCI_IN_EN1 0x02 /* Enable chain 1 */
84#define AR2315_PCI_IN_EN2 0x04 /* Enable chain 2 */
85#define AR2315_PCI_IN_EN3 0x08 /* Enable chain 3 */
86
87#define AR2315_PCI_IN_DIS 0x0104
88
89#define AR2315_PCI_IN_DIS0 0x01 /* Disable chain 0 */
90#define AR2315_PCI_IN_DIS1 0x02 /* Disable chain 1 */
91#define AR2315_PCI_IN_DIS2 0x04 /* Disable chain 2 */
92#define AR2315_PCI_IN_DIS3 0x08 /* Disable chain 3 */
93
94#define AR2315_PCI_IN_PTR 0x0200
95
96#define AR2315_PCI_OUT_EN 0x0400
97
98#define AR2315_PCI_OUT_EN0 0x01 /* Enable chain 0 */
99
100#define AR2315_PCI_OUT_DIS 0x0404
101
102#define AR2315_PCI_OUT_DIS0 0x01 /* Disable chain 0 */
103
104#define AR2315_PCI_OUT_PTR 0x0408
105
106/* PCI interrupt status (write one to clear) */
107#define AR2315_PCI_ISR 0x0500
108
109#define AR2315_PCI_INT_TX 0x00000001 /* Desc In Completed */
110#define AR2315_PCI_INT_TXOK 0x00000002 /* Desc In OK */
111#define AR2315_PCI_INT_TXERR 0x00000004 /* Desc In ERR */
112#define AR2315_PCI_INT_TXEOL 0x00000008 /* Desc In End-of-List */
113#define AR2315_PCI_INT_RX 0x00000010 /* Desc Out Completed */
114#define AR2315_PCI_INT_RXOK 0x00000020 /* Desc Out OK */
115#define AR2315_PCI_INT_RXERR 0x00000040 /* Desc Out ERR */
116#define AR2315_PCI_INT_RXEOL 0x00000080 /* Desc Out EOL */
117#define AR2315_PCI_INT_TXOOD 0x00000200 /* Desc In Out-of-Desc */
118#define AR2315_PCI_INT_DESCMASK 0x0000FFFF /* Desc Mask */
119#define AR2315_PCI_INT_EXT 0x02000000 /* Extern PCI INTA */
120#define AR2315_PCI_INT_ABORT 0x04000000 /* PCI bus abort event */
121
122/* PCI interrupt mask */
123#define AR2315_PCI_IMR 0x0504
124
125/* Global PCI interrupt enable */
126#define AR2315_PCI_IER 0x0508
127
128#define AR2315_PCI_IER_DISABLE 0x00 /* disable pci interrupts */
129#define AR2315_PCI_IER_ENABLE 0x01 /* enable pci interrupts */
130
131#define AR2315_PCI_HOST_IN_EN 0x0800
132#define AR2315_PCI_HOST_IN_DIS 0x0804
133#define AR2315_PCI_HOST_IN_PTR 0x0810
134#define AR2315_PCI_HOST_OUT_EN 0x0900
135#define AR2315_PCI_HOST_OUT_DIS 0x0904
136#define AR2315_PCI_HOST_OUT_PTR 0x0908
137
138/*
139 * PCI interrupts, which share IP5
140 * Keep ordered according to AR2315_PCI_INT_XXX bits
141 */
142#define AR2315_PCI_IRQ_EXT 25
143#define AR2315_PCI_IRQ_ABORT 26
144#define AR2315_PCI_IRQ_COUNT 27
145
146/* Arbitrary size of memory region to access the configuration space */
147#define AR2315_PCI_CFG_SIZE 0x00100000
148
149#define AR2315_PCI_HOST_SLOT 3
150#define AR2315_PCI_HOST_DEVID ((0xff18 << 16) | PCI_VENDOR_ID_ATHEROS)
151
152/* ??? access BAR */
153#define AR2315_PCI_HOST_MBAR0 0x10000000
154/* RAM access BAR */
155#define AR2315_PCI_HOST_MBAR1 AR2315_PCI_HOST_SDRAM_BASEADDR
156/* ??? access BAR */
157#define AR2315_PCI_HOST_MBAR2 0x30000000
158
159struct ar2315_pci_ctrl {
160 void __iomem *cfg_mem;
161 void __iomem *mmr_mem;
162 unsigned irq;
163 unsigned irq_ext;
164 struct irq_domain *domain;
165 struct pci_controller pci_ctrl;
166 struct resource mem_res;
167 struct resource io_res;
168};
169
170static inline struct ar2315_pci_ctrl *ar2315_pci_bus_to_apc(struct pci_bus *bus)
171{
172 struct pci_controller *hose = bus->sysdata;
173
174 return container_of(hose, struct ar2315_pci_ctrl, pci_ctrl);
175}
176
177static inline u32 ar2315_pci_reg_read(struct ar2315_pci_ctrl *apc, u32 reg)
178{
179 return __raw_readl(apc->mmr_mem + reg);
180}
181
182static inline void ar2315_pci_reg_write(struct ar2315_pci_ctrl *apc, u32 reg,
183 u32 val)
184{
185 __raw_writel(val, apc->mmr_mem + reg);
186}
187
188static inline void ar2315_pci_reg_mask(struct ar2315_pci_ctrl *apc, u32 reg,
189 u32 mask, u32 val)
190{
191 u32 ret = ar2315_pci_reg_read(apc, reg);
192
193 ret &= ~mask;
194 ret |= val;
195 ar2315_pci_reg_write(apc, reg, ret);
196}
197
198static int ar2315_pci_cfg_access(struct ar2315_pci_ctrl *apc, unsigned devfn,
199 int where, int size, u32 *ptr, bool write)
200{
201 int func = PCI_FUNC(devfn);
202 int dev = PCI_SLOT(devfn);
203 u32 addr = (1 << (13 + dev)) | (func << 8) | (where & ~3);
204 u32 mask = 0xffffffff >> 8 * (4 - size);
205 u32 sh = (where & 3) * 8;
206 u32 value, isr;
207
208 /* Prevent access past the remapped area */
209 if (addr >= AR2315_PCI_CFG_SIZE || dev > 18)
210 return PCIBIOS_DEVICE_NOT_FOUND;
211
212 /* Clear pending errors */
213 ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT);
214 /* Select Configuration access */
215 ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, 0,
216 AR2315_PCIMISC_CFG_SEL);
217
218 mb(); /* PCI must see space change before we begin */
219
220 value = __raw_readl(apc->cfg_mem + addr);
221
222 isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR);
223
224 if (isr & AR2315_PCI_INT_ABORT)
225 goto exit_err;
226
227 if (write) {
228 value = (value & ~(mask << sh)) | *ptr << sh;
229 __raw_writel(value, apc->cfg_mem + addr);
230 isr = ar2315_pci_reg_read(apc, AR2315_PCI_ISR);
231 if (isr & AR2315_PCI_INT_ABORT)
232 goto exit_err;
233 } else {
234 *ptr = (value >> sh) & mask;
235 }
236
237 goto exit;
238
239exit_err:
240 ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT);
241 if (!write)
242 *ptr = 0xffffffff;
243
244exit:
245 /* Select Memory access */
246 ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG, AR2315_PCIMISC_CFG_SEL,
247 0);
248
249 return isr & AR2315_PCI_INT_ABORT ? PCIBIOS_DEVICE_NOT_FOUND :
250 PCIBIOS_SUCCESSFUL;
251}
252
253static inline int ar2315_pci_local_cfg_rd(struct ar2315_pci_ctrl *apc,
254 unsigned devfn, int where, u32 *val)
255{
256 return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), val,
257 false);
258}
259
260static inline int ar2315_pci_local_cfg_wr(struct ar2315_pci_ctrl *apc,
261 unsigned devfn, int where, u32 val)
262{
263 return ar2315_pci_cfg_access(apc, devfn, where, sizeof(u32), &val,
264 true);
265}
266
267static int ar2315_pci_cfg_read(struct pci_bus *bus, unsigned devfn, int where,
268 int size, u32 *value)
269{
270 struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus);
271
272 if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT)
273 return PCIBIOS_DEVICE_NOT_FOUND;
274
275 return ar2315_pci_cfg_access(apc, devfn, where, size, value, false);
276}
277
278static int ar2315_pci_cfg_write(struct pci_bus *bus, unsigned devfn, int where,
279 int size, u32 value)
280{
281 struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(bus);
282
283 if (PCI_SLOT(devfn) == AR2315_PCI_HOST_SLOT)
284 return PCIBIOS_DEVICE_NOT_FOUND;
285
286 return ar2315_pci_cfg_access(apc, devfn, where, size, &value, true);
287}
288
289static struct pci_ops ar2315_pci_ops = {
290 .read = ar2315_pci_cfg_read,
291 .write = ar2315_pci_cfg_write,
292};
293
294static int ar2315_pci_host_setup(struct ar2315_pci_ctrl *apc)
295{
296 unsigned devfn = PCI_DEVFN(AR2315_PCI_HOST_SLOT, 0);
297 int res;
298 u32 id;
299
300 res = ar2315_pci_local_cfg_rd(apc, devfn, PCI_VENDOR_ID, &id);
301 if (res != PCIBIOS_SUCCESSFUL || id != AR2315_PCI_HOST_DEVID)
302 return -ENODEV;
303
304 /* Program MBARs */
305 ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_0,
306 AR2315_PCI_HOST_MBAR0);
307 ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_1,
308 AR2315_PCI_HOST_MBAR1);
309 ar2315_pci_local_cfg_wr(apc, devfn, PCI_BASE_ADDRESS_2,
310 AR2315_PCI_HOST_MBAR2);
311
312 /* Run */
313 ar2315_pci_local_cfg_wr(apc, devfn, PCI_COMMAND, PCI_COMMAND_MEMORY |
314 PCI_COMMAND_MASTER | PCI_COMMAND_SPECIAL |
315 PCI_COMMAND_INVALIDATE | PCI_COMMAND_PARITY |
316 PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK);
317
318 return 0;
319}
320
321static void ar2315_pci_irq_handler(unsigned irq, struct irq_desc *desc)
322{
323 struct ar2315_pci_ctrl *apc = irq_get_handler_data(irq);
324 u32 pending = ar2315_pci_reg_read(apc, AR2315_PCI_ISR) &
325 ar2315_pci_reg_read(apc, AR2315_PCI_IMR);
326 unsigned pci_irq = 0;
327
328 if (pending)
329 pci_irq = irq_find_mapping(apc->domain, __ffs(pending));
330
331 if (pci_irq)
332 generic_handle_irq(pci_irq);
333 else
334 spurious_interrupt();
335}
336
337static void ar2315_pci_irq_mask(struct irq_data *d)
338{
339 struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
340
341 ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, BIT(d->hwirq), 0);
342}
343
344static void ar2315_pci_irq_mask_ack(struct irq_data *d)
345{
346 struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
347 u32 m = BIT(d->hwirq);
348
349 ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, m, 0);
350 ar2315_pci_reg_write(apc, AR2315_PCI_ISR, m);
351}
352
353static void ar2315_pci_irq_unmask(struct irq_data *d)
354{
355 struct ar2315_pci_ctrl *apc = irq_data_get_irq_chip_data(d);
356
357 ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, 0, BIT(d->hwirq));
358}
359
360static struct irq_chip ar2315_pci_irq_chip = {
361 .name = "AR2315-PCI",
362 .irq_mask = ar2315_pci_irq_mask,
363 .irq_mask_ack = ar2315_pci_irq_mask_ack,
364 .irq_unmask = ar2315_pci_irq_unmask,
365};
366
367static int ar2315_pci_irq_map(struct irq_domain *d, unsigned irq,
368 irq_hw_number_t hw)
369{
370 irq_set_chip_and_handler(irq, &ar2315_pci_irq_chip, handle_level_irq);
371 irq_set_chip_data(irq, d->host_data);
372 return 0;
373}
374
375static struct irq_domain_ops ar2315_pci_irq_domain_ops = {
376 .map = ar2315_pci_irq_map,
377};
378
379static void ar2315_pci_irq_init(struct ar2315_pci_ctrl *apc)
380{
381 ar2315_pci_reg_mask(apc, AR2315_PCI_IER, AR2315_PCI_IER_ENABLE, 0);
382 ar2315_pci_reg_mask(apc, AR2315_PCI_IMR, (AR2315_PCI_INT_ABORT |
383 AR2315_PCI_INT_EXT), 0);
384
385 apc->irq_ext = irq_create_mapping(apc->domain, AR2315_PCI_IRQ_EXT);
386
387 irq_set_chained_handler(apc->irq, ar2315_pci_irq_handler);
388 irq_set_handler_data(apc->irq, apc);
389
390 /* Clear any pending Abort or external Interrupts
391 * and enable interrupt processing */
392 ar2315_pci_reg_write(apc, AR2315_PCI_ISR, AR2315_PCI_INT_ABORT |
393 AR2315_PCI_INT_EXT);
394 ar2315_pci_reg_mask(apc, AR2315_PCI_IER, 0, AR2315_PCI_IER_ENABLE);
395}
396
397static int ar2315_pci_probe(struct platform_device *pdev)
398{
399 struct ar2315_pci_ctrl *apc;
400 struct device *dev = &pdev->dev;
401 struct resource *res;
402 int irq, err;
403
404 apc = devm_kzalloc(dev, sizeof(*apc), GFP_KERNEL);
405 if (!apc)
406 return -ENOMEM;
407
408 irq = platform_get_irq(pdev, 0);
409 if (irq < 0)
410 return -EINVAL;
411 apc->irq = irq;
412
413 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
414 "ar2315-pci-ctrl");
415 apc->mmr_mem = devm_ioremap_resource(dev, res);
416 if (IS_ERR(apc->mmr_mem))
417 return PTR_ERR(apc->mmr_mem);
418
419 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
420 "ar2315-pci-ext");
421 if (!res)
422 return -EINVAL;
423
424 apc->mem_res.name = "AR2315 PCI mem space";
425 apc->mem_res.parent = res;
426 apc->mem_res.start = res->start;
427 apc->mem_res.end = res->end;
428 apc->mem_res.flags = IORESOURCE_MEM;
429
430 /* Remap PCI config space */
431 apc->cfg_mem = devm_ioremap_nocache(dev, res->start,
432 AR2315_PCI_CFG_SIZE);
433 if (!apc->cfg_mem) {
434 dev_err(dev, "failed to remap PCI config space\n");
435 return -ENOMEM;
436 }
437
438 /* Reset the PCI bus by setting bits 5-4 in PCI_MCFG */
439 ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG,
440 AR2315_PCIMISC_RST_MODE,
441 AR2315_PCIRST_LOW);
442 msleep(100);
443
444 /* Bring the PCI out of reset */
445 ar2315_pci_reg_mask(apc, AR2315_PCI_MISC_CONFIG,
446 AR2315_PCIMISC_RST_MODE,
447 AR2315_PCIRST_HIGH | AR2315_PCICACHE_DIS | 0x8);
448
449 ar2315_pci_reg_write(apc, AR2315_PCI_UNCACHE_CFG,
450 0x1E | /* 1GB uncached */
451 (1 << 5) | /* Enable uncached */
452 (0x2 << 30) /* Base: 0x80000000 */);
453 ar2315_pci_reg_read(apc, AR2315_PCI_UNCACHE_CFG);
454
455 msleep(500);
456
457 err = ar2315_pci_host_setup(apc);
458 if (err)
459 return err;
460
461 apc->domain = irq_domain_add_linear(NULL, AR2315_PCI_IRQ_COUNT,
462 &ar2315_pci_irq_domain_ops, apc);
463 if (!apc->domain) {
464 dev_err(dev, "failed to add IRQ domain\n");
465 return -ENOMEM;
466 }
467
468 ar2315_pci_irq_init(apc);
469
470 /* PCI controller does not support I/O ports */
471 apc->io_res.name = "AR2315 IO space";
472 apc->io_res.start = 0;
473 apc->io_res.end = 0;
474 apc->io_res.flags = IORESOURCE_IO,
475
476 apc->pci_ctrl.pci_ops = &ar2315_pci_ops;
477 apc->pci_ctrl.mem_resource = &apc->mem_res,
478 apc->pci_ctrl.io_resource = &apc->io_res,
479
480 register_pci_controller(&apc->pci_ctrl);
481
482 dev_info(dev, "register PCI controller\n");
483
484 return 0;
485}
486
487static struct platform_driver ar2315_pci_driver = {
488 .probe = ar2315_pci_probe,
489 .driver = {
490 .name = "ar2315-pci",
491 .owner = THIS_MODULE,
492 },
493};
494
495static int __init ar2315_pci_init(void)
496{
497 return platform_driver_register(&ar2315_pci_driver);
498}
499arch_initcall(ar2315_pci_init);
500
501int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
502{
503 struct ar2315_pci_ctrl *apc = ar2315_pci_bus_to_apc(dev->bus);
504
505 return slot ? 0 : apc->irq_ext;
506}
507
508int pcibios_plat_dev_init(struct pci_dev *dev)
509{
510 return 0;
511}
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
index d471a26dd5f8..2b534aea20e4 100644
--- a/arch/mips/pci/pci-ar71xx.c
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -50,7 +50,6 @@
50 50
51struct ar71xx_pci_controller { 51struct ar71xx_pci_controller {
52 void __iomem *cfg_base; 52 void __iomem *cfg_base;
53 spinlock_t lock;
54 int irq; 53 int irq;
55 int irq_base; 54 int irq_base;
56 struct pci_controller pci_ctrl; 55 struct pci_controller pci_ctrl;
@@ -182,7 +181,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
182{ 181{
183 struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); 182 struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
184 void __iomem *base = apc->cfg_base; 183 void __iomem *base = apc->cfg_base;
185 unsigned long flags;
186 u32 data; 184 u32 data;
187 int err; 185 int err;
188 int ret; 186 int ret;
@@ -190,8 +188,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
190 ret = PCIBIOS_SUCCESSFUL; 188 ret = PCIBIOS_SUCCESSFUL;
191 data = ~0; 189 data = ~0;
192 190
193 spin_lock_irqsave(&apc->lock, flags);
194
195 err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size, 191 err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
196 AR71XX_PCI_CFG_CMD_READ); 192 AR71XX_PCI_CFG_CMD_READ);
197 if (err) 193 if (err)
@@ -199,8 +195,6 @@ static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
199 else 195 else
200 data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA); 196 data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
201 197
202 spin_unlock_irqrestore(&apc->lock, flags);
203
204 *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7]; 198 *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
205 199
206 return ret; 200 return ret;
@@ -211,15 +205,12 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
211{ 205{
212 struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus); 206 struct ar71xx_pci_controller *apc = pci_bus_to_ar71xx_controller(bus);
213 void __iomem *base = apc->cfg_base; 207 void __iomem *base = apc->cfg_base;
214 unsigned long flags;
215 int err; 208 int err;
216 int ret; 209 int ret;
217 210
218 value = value << (8 * (where & 3)); 211 value = value << (8 * (where & 3));
219 ret = PCIBIOS_SUCCESSFUL; 212 ret = PCIBIOS_SUCCESSFUL;
220 213
221 spin_lock_irqsave(&apc->lock, flags);
222
223 err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size, 214 err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
224 AR71XX_PCI_CFG_CMD_WRITE); 215 AR71XX_PCI_CFG_CMD_WRITE);
225 if (err) 216 if (err)
@@ -227,8 +218,6 @@ static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
227 else 218 else
228 __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA); 219 __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
229 220
230 spin_unlock_irqrestore(&apc->lock, flags);
231
232 return ret; 221 return ret;
233} 222}
234 223
@@ -360,8 +349,6 @@ static int ar71xx_pci_probe(struct platform_device *pdev)
360 if (!apc) 349 if (!apc)
361 return -ENOMEM; 350 return -ENOMEM;
362 351
363 spin_lock_init(&apc->lock);
364
365 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base"); 352 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cfg_base");
366 apc->cfg_base = devm_ioremap_resource(&pdev->dev, res); 353 apc->cfg_base = devm_ioremap_resource(&pdev->dev, res);
367 if (IS_ERR(apc->cfg_base)) 354 if (IS_ERR(apc->cfg_base))
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
index 785b2659b519..b7a6fcbb8852 100644
--- a/arch/mips/pci/pci-ar724x.c
+++ b/arch/mips/pci/pci-ar724x.c
@@ -9,7 +9,6 @@
9 * by the Free Software Foundation. 9 * by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/spinlock.h>
13#include <linux/irq.h> 12#include <linux/irq.h>
14#include <linux/pci.h> 13#include <linux/pci.h>
15#include <linux/module.h> 14#include <linux/module.h>
@@ -48,8 +47,6 @@ struct ar724x_pci_controller {
48 bool bar0_is_cached; 47 bool bar0_is_cached;
49 u32 bar0_value; 48 u32 bar0_value;
50 49
51 spinlock_t lock;
52
53 struct pci_controller pci_controller; 50 struct pci_controller pci_controller;
54 struct resource io_res; 51 struct resource io_res;
55 struct resource mem_res; 52 struct resource mem_res;
@@ -75,7 +72,6 @@ pci_bus_to_ar724x_controller(struct pci_bus *bus)
75static int ar724x_pci_local_write(struct ar724x_pci_controller *apc, 72static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
76 int where, int size, u32 value) 73 int where, int size, u32 value)
77{ 74{
78 unsigned long flags;
79 void __iomem *base; 75 void __iomem *base;
80 u32 data; 76 u32 data;
81 int s; 77 int s;
@@ -86,8 +82,6 @@ static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
86 return PCIBIOS_DEVICE_NOT_FOUND; 82 return PCIBIOS_DEVICE_NOT_FOUND;
87 83
88 base = apc->crp_base; 84 base = apc->crp_base;
89
90 spin_lock_irqsave(&apc->lock, flags);
91 data = __raw_readl(base + (where & ~3)); 85 data = __raw_readl(base + (where & ~3));
92 86
93 switch (size) { 87 switch (size) {
@@ -105,14 +99,12 @@ static int ar724x_pci_local_write(struct ar724x_pci_controller *apc,
105 data = value; 99 data = value;
106 break; 100 break;
107 default: 101 default:
108 spin_unlock_irqrestore(&apc->lock, flags);
109 return PCIBIOS_BAD_REGISTER_NUMBER; 102 return PCIBIOS_BAD_REGISTER_NUMBER;
110 } 103 }
111 104
112 __raw_writel(data, base + (where & ~3)); 105 __raw_writel(data, base + (where & ~3));
113 /* flush write */ 106 /* flush write */
114 __raw_readl(base + (where & ~3)); 107 __raw_readl(base + (where & ~3));
115 spin_unlock_irqrestore(&apc->lock, flags);
116 108
117 return PCIBIOS_SUCCESSFUL; 109 return PCIBIOS_SUCCESSFUL;
118} 110}
@@ -121,7 +113,6 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
121 int size, uint32_t *value) 113 int size, uint32_t *value)
122{ 114{
123 struct ar724x_pci_controller *apc; 115 struct ar724x_pci_controller *apc;
124 unsigned long flags;
125 void __iomem *base; 116 void __iomem *base;
126 u32 data; 117 u32 data;
127 118
@@ -133,8 +124,6 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
133 return PCIBIOS_DEVICE_NOT_FOUND; 124 return PCIBIOS_DEVICE_NOT_FOUND;
134 125
135 base = apc->devcfg_base; 126 base = apc->devcfg_base;
136
137 spin_lock_irqsave(&apc->lock, flags);
138 data = __raw_readl(base + (where & ~3)); 127 data = __raw_readl(base + (where & ~3));
139 128
140 switch (size) { 129 switch (size) {
@@ -153,13 +142,9 @@ static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
153 case 4: 142 case 4:
154 break; 143 break;
155 default: 144 default:
156 spin_unlock_irqrestore(&apc->lock, flags);
157
158 return PCIBIOS_BAD_REGISTER_NUMBER; 145 return PCIBIOS_BAD_REGISTER_NUMBER;
159 } 146 }
160 147
161 spin_unlock_irqrestore(&apc->lock, flags);
162
163 if (where == PCI_BASE_ADDRESS_0 && size == 4 && 148 if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
164 apc->bar0_is_cached) { 149 apc->bar0_is_cached) {
165 /* use the cached value */ 150 /* use the cached value */
@@ -175,7 +160,6 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
175 int size, uint32_t value) 160 int size, uint32_t value)
176{ 161{
177 struct ar724x_pci_controller *apc; 162 struct ar724x_pci_controller *apc;
178 unsigned long flags;
179 void __iomem *base; 163 void __iomem *base;
180 u32 data; 164 u32 data;
181 int s; 165 int s;
@@ -209,8 +193,6 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
209 } 193 }
210 194
211 base = apc->devcfg_base; 195 base = apc->devcfg_base;
212
213 spin_lock_irqsave(&apc->lock, flags);
214 data = __raw_readl(base + (where & ~3)); 196 data = __raw_readl(base + (where & ~3));
215 197
216 switch (size) { 198 switch (size) {
@@ -228,15 +210,12 @@ static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
228 data = value; 210 data = value;
229 break; 211 break;
230 default: 212 default:
231 spin_unlock_irqrestore(&apc->lock, flags);
232
233 return PCIBIOS_BAD_REGISTER_NUMBER; 213 return PCIBIOS_BAD_REGISTER_NUMBER;
234 } 214 }
235 215
236 __raw_writel(data, base + (where & ~3)); 216 __raw_writel(data, base + (where & ~3));
237 /* flush write */ 217 /* flush write */
238 __raw_readl(base + (where & ~3)); 218 __raw_readl(base + (where & ~3));
239 spin_unlock_irqrestore(&apc->lock, flags);
240 219
241 return PCIBIOS_SUCCESSFUL; 220 return PCIBIOS_SUCCESSFUL;
242} 221}
@@ -380,8 +359,6 @@ static int ar724x_pci_probe(struct platform_device *pdev)
380 if (apc->irq < 0) 359 if (apc->irq < 0)
381 return -EINVAL; 360 return -EINVAL;
382 361
383 spin_lock_init(&apc->lock);
384
385 res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base"); 362 res = platform_get_resource_byname(pdev, IORESOURCE_IO, "io_base");
386 if (!res) 363 if (!res)
387 return -EINVAL; 364 return -EINVAL;
diff --git a/arch/mips/pci/pci-octeon.c b/arch/mips/pci/pci-octeon.c
index 59cccd95688b..d07e04121cc6 100644
--- a/arch/mips/pci/pci-octeon.c
+++ b/arch/mips/pci/pci-octeon.c
@@ -708,7 +708,7 @@ static int __init octeon_pci_setup(void)
708 708
709 if (IS_ERR(platform_device_register_simple("octeon_pci_edac", 709 if (IS_ERR(platform_device_register_simple("octeon_pci_edac",
710 -1, NULL, 0))) 710 -1, NULL, 0)))
711 pr_err("Registation of co_pci_edac failed!\n"); 711 pr_err("Registration of co_pci_edac failed!\n");
712 712
713 octeon_pci_dma_init(); 713 octeon_pci_dma_init();
714 714
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
new file mode 100644
index 000000000000..a4574947e698
--- /dev/null
+++ b/arch/mips/pci/pci-rt2880.c
@@ -0,0 +1,285 @@
1/*
2 * Ralink RT288x SoC PCI register definitions
3 *
4 * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
5 * Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
6 *
7 * Parts of this file are based on Ralink's 2.6.21 BSP
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 */
13
14#include <linux/types.h>
15#include <linux/pci.h>
16#include <linux/io.h>
17#include <linux/init.h>
18#include <linux/module.h>
19#include <linux/of_platform.h>
20#include <linux/of_irq.h>
21#include <linux/of_pci.h>
22
23#include <asm/mach-ralink/rt288x.h>
24
25#define RT2880_PCI_BASE 0x00440000
26#define RT288X_CPU_IRQ_PCI 4
27
28#define RT2880_PCI_MEM_BASE 0x20000000
29#define RT2880_PCI_MEM_SIZE 0x10000000
30#define RT2880_PCI_IO_BASE 0x00460000
31#define RT2880_PCI_IO_SIZE 0x00010000
32
33#define RT2880_PCI_REG_PCICFG_ADDR 0x00
34#define RT2880_PCI_REG_PCIMSK_ADDR 0x0c
35#define RT2880_PCI_REG_BAR0SETUP_ADDR 0x10
36#define RT2880_PCI_REG_IMBASEBAR0_ADDR 0x18
37#define RT2880_PCI_REG_CONFIG_ADDR 0x20
38#define RT2880_PCI_REG_CONFIG_DATA 0x24
39#define RT2880_PCI_REG_MEMBASE 0x28
40#define RT2880_PCI_REG_IOBASE 0x2c
41#define RT2880_PCI_REG_ID 0x30
42#define RT2880_PCI_REG_CLASS 0x34
43#define RT2880_PCI_REG_SUBID 0x38
44#define RT2880_PCI_REG_ARBCTL 0x80
45
46static void __iomem *rt2880_pci_base;
47static DEFINE_SPINLOCK(rt2880_pci_lock);
48
49static u32 rt2880_pci_reg_read(u32 reg)
50{
51 return readl(rt2880_pci_base + reg);
52}
53
54static void rt2880_pci_reg_write(u32 val, u32 reg)
55{
56 writel(val, rt2880_pci_base + reg);
57}
58
59static inline u32 rt2880_pci_get_cfgaddr(unsigned int bus, unsigned int slot,
60 unsigned int func, unsigned int where)
61{
62 return ((bus << 16) | (slot << 11) | (func << 8) | (where & 0xfc) |
63 0x80000000);
64}
65
66static int rt2880_pci_config_read(struct pci_bus *bus, unsigned int devfn,
67 int where, int size, u32 *val)
68{
69 unsigned long flags;
70 u32 address;
71 u32 data;
72
73 address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
74 PCI_FUNC(devfn), where);
75
76 spin_lock_irqsave(&rt2880_pci_lock, flags);
77 rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
78 data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
79 spin_unlock_irqrestore(&rt2880_pci_lock, flags);
80
81 switch (size) {
82 case 1:
83 *val = (data >> ((where & 3) << 3)) & 0xff;
84 break;
85 case 2:
86 *val = (data >> ((where & 3) << 3)) & 0xffff;
87 break;
88 case 4:
89 *val = data;
90 break;
91 }
92
93 return PCIBIOS_SUCCESSFUL;
94}
95
96static int rt2880_pci_config_write(struct pci_bus *bus, unsigned int devfn,
97 int where, int size, u32 val)
98{
99 unsigned long flags;
100 u32 address;
101 u32 data;
102
103 address = rt2880_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
104 PCI_FUNC(devfn), where);
105
106 spin_lock_irqsave(&rt2880_pci_lock, flags);
107 rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
108 data = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
109
110 switch (size) {
111 case 1:
112 data = (data & ~(0xff << ((where & 3) << 3))) |
113 (val << ((where & 3) << 3));
114 break;
115 case 2:
116 data = (data & ~(0xffff << ((where & 3) << 3))) |
117 (val << ((where & 3) << 3));
118 break;
119 case 4:
120 data = val;
121 break;
122 }
123
124 rt2880_pci_reg_write(data, RT2880_PCI_REG_CONFIG_DATA);
125 spin_unlock_irqrestore(&rt2880_pci_lock, flags);
126
127 return PCIBIOS_SUCCESSFUL;
128}
129
130static struct pci_ops rt2880_pci_ops = {
131 .read = rt2880_pci_config_read,
132 .write = rt2880_pci_config_write,
133};
134
135static struct resource rt2880_pci_mem_resource = {
136 .name = "PCI MEM space",
137 .start = RT2880_PCI_MEM_BASE,
138 .end = RT2880_PCI_MEM_BASE + RT2880_PCI_MEM_SIZE - 1,
139 .flags = IORESOURCE_MEM,
140};
141
142static struct resource rt2880_pci_io_resource = {
143 .name = "PCI IO space",
144 .start = RT2880_PCI_IO_BASE,
145 .end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1,
146 .flags = IORESOURCE_IO,
147};
148
149static struct pci_controller rt2880_pci_controller = {
150 .pci_ops = &rt2880_pci_ops,
151 .mem_resource = &rt2880_pci_mem_resource,
152 .io_resource = &rt2880_pci_io_resource,
153};
154
155static inline u32 rt2880_pci_read_u32(unsigned long reg)
156{
157 unsigned long flags;
158 u32 address;
159 u32 ret;
160
161 address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
162
163 spin_lock_irqsave(&rt2880_pci_lock, flags);
164 rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
165 ret = rt2880_pci_reg_read(RT2880_PCI_REG_CONFIG_DATA);
166 spin_unlock_irqrestore(&rt2880_pci_lock, flags);
167
168 return ret;
169}
170
171static inline void rt2880_pci_write_u32(unsigned long reg, u32 val)
172{
173 unsigned long flags;
174 u32 address;
175
176 address = rt2880_pci_get_cfgaddr(0, 0, 0, reg);
177
178 spin_lock_irqsave(&rt2880_pci_lock, flags);
179 rt2880_pci_reg_write(address, RT2880_PCI_REG_CONFIG_ADDR);
180 rt2880_pci_reg_write(val, RT2880_PCI_REG_CONFIG_DATA);
181 spin_unlock_irqrestore(&rt2880_pci_lock, flags);
182}
183
184int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
185{
186 u16 cmd;
187 int irq = -1;
188
189 if (dev->bus->number != 0)
190 return irq;
191
192 switch (PCI_SLOT(dev->devfn)) {
193 case 0x00:
194 rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
195 (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
196 break;
197 case 0x11:
198 irq = RT288X_CPU_IRQ_PCI;
199 break;
200 default:
201 pr_err("%s:%s[%d] trying to alloc unknown pci irq\n",
202 __FILE__, __func__, __LINE__);
203 BUG();
204 break;
205 }
206
207 pci_write_config_byte((struct pci_dev *) dev,
208 PCI_CACHE_LINE_SIZE, 0x14);
209 pci_write_config_byte((struct pci_dev *) dev, PCI_LATENCY_TIMER, 0xFF);
210 pci_read_config_word((struct pci_dev *) dev, PCI_COMMAND, &cmd);
211 cmd |= PCI_COMMAND_MASTER | PCI_COMMAND_IO | PCI_COMMAND_MEMORY |
212 PCI_COMMAND_INVALIDATE | PCI_COMMAND_FAST_BACK |
213 PCI_COMMAND_SERR | PCI_COMMAND_WAIT | PCI_COMMAND_PARITY;
214 pci_write_config_word((struct pci_dev *) dev, PCI_COMMAND, cmd);
215 pci_write_config_byte((struct pci_dev *) dev, PCI_INTERRUPT_LINE,
216 dev->irq);
217 return irq;
218}
219
220static int rt288x_pci_probe(struct platform_device *pdev)
221{
222 void __iomem *io_map_base;
223 int i;
224
225 rt2880_pci_base = ioremap_nocache(RT2880_PCI_BASE, PAGE_SIZE);
226
227 io_map_base = ioremap(RT2880_PCI_IO_BASE, RT2880_PCI_IO_SIZE);
228 rt2880_pci_controller.io_map_base = (unsigned long) io_map_base;
229 set_io_port_base((unsigned long) io_map_base);
230
231 ioport_resource.start = RT2880_PCI_IO_BASE;
232 ioport_resource.end = RT2880_PCI_IO_BASE + RT2880_PCI_IO_SIZE - 1;
233
234 rt2880_pci_reg_write(0, RT2880_PCI_REG_PCICFG_ADDR);
235 for (i = 0; i < 0xfffff; i++)
236 ;
237
238 rt2880_pci_reg_write(0x79, RT2880_PCI_REG_ARBCTL);
239 rt2880_pci_reg_write(0x07FF0001, RT2880_PCI_REG_BAR0SETUP_ADDR);
240 rt2880_pci_reg_write(RT2880_PCI_MEM_BASE, RT2880_PCI_REG_MEMBASE);
241 rt2880_pci_reg_write(RT2880_PCI_IO_BASE, RT2880_PCI_REG_IOBASE);
242 rt2880_pci_reg_write(0x08000000, RT2880_PCI_REG_IMBASEBAR0_ADDR);
243 rt2880_pci_reg_write(0x08021814, RT2880_PCI_REG_ID);
244 rt2880_pci_reg_write(0x00800001, RT2880_PCI_REG_CLASS);
245 rt2880_pci_reg_write(0x28801814, RT2880_PCI_REG_SUBID);
246 rt2880_pci_reg_write(0x000c0000, RT2880_PCI_REG_PCIMSK_ADDR);
247
248 rt2880_pci_write_u32(PCI_BASE_ADDRESS_0, 0x08000000);
249 (void) rt2880_pci_read_u32(PCI_BASE_ADDRESS_0);
250
251 register_pci_controller(&rt2880_pci_controller);
252 return 0;
253}
254
255int pcibios_plat_dev_init(struct pci_dev *dev)
256{
257 return 0;
258}
259
260static const struct of_device_id rt288x_pci_match[] = {
261 { .compatible = "ralink,rt288x-pci" },
262 {},
263};
264MODULE_DEVICE_TABLE(of, rt288x_pci_match);
265
266static struct platform_driver rt288x_pci_driver = {
267 .probe = rt288x_pci_probe,
268 .driver = {
269 .name = "rt288x-pci",
270 .owner = THIS_MODULE,
271 .of_match_table = rt288x_pci_match,
272 },
273};
274
275int __init pcibios_init(void)
276{
277 int ret = platform_driver_register(&rt288x_pci_driver);
278
279 if (ret)
280 pr_info("rt288x-pci: Error registering platform driver!");
281
282 return ret;
283}
284
285arch_initcall(pcibios_init);
diff --git a/arch/mips/pci/pci-rt3883.c b/arch/mips/pci/pci-rt3883.c
index 72919aeef42b..0bcc0b1cfddc 100644
--- a/arch/mips/pci/pci-rt3883.c
+++ b/arch/mips/pci/pci-rt3883.c
@@ -61,7 +61,6 @@
61 61
62struct rt3883_pci_controller { 62struct rt3883_pci_controller {
63 void __iomem *base; 63 void __iomem *base;
64 spinlock_t lock;
65 64
66 struct device_node *intc_of_node; 65 struct device_node *intc_of_node;
67 struct irq_domain *irq_domain; 66 struct irq_domain *irq_domain;
@@ -111,10 +110,8 @@ static u32 rt3883_pci_read_cfg32(struct rt3883_pci_controller *rpc,
111 110
112 address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); 111 address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
113 112
114 spin_lock_irqsave(&rpc->lock, flags);
115 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); 113 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
116 ret = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); 114 ret = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
117 spin_unlock_irqrestore(&rpc->lock, flags);
118 115
119 return ret; 116 return ret;
120} 117}
@@ -128,10 +125,8 @@ static void rt3883_pci_write_cfg32(struct rt3883_pci_controller *rpc,
128 125
129 address = rt3883_pci_get_cfgaddr(bus, slot, func, reg); 126 address = rt3883_pci_get_cfgaddr(bus, slot, func, reg);
130 127
131 spin_lock_irqsave(&rpc->lock, flags);
132 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); 128 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
133 rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA); 129 rt3883_pci_w32(rpc, val, RT3883_PCI_REG_CFGDATA);
134 spin_unlock_irqrestore(&rpc->lock, flags);
135} 130}
136 131
137static void rt3883_pci_irq_handler(unsigned int irq, struct irq_desc *desc) 132static void rt3883_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
@@ -252,10 +247,8 @@ static int rt3883_pci_config_read(struct pci_bus *bus, unsigned int devfn,
252 address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), 247 address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
253 PCI_FUNC(devfn), where); 248 PCI_FUNC(devfn), where);
254 249
255 spin_lock_irqsave(&rpc->lock, flags);
256 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); 250 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
257 data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); 251 data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
258 spin_unlock_irqrestore(&rpc->lock, flags);
259 252
260 switch (size) { 253 switch (size) {
261 case 1: 254 case 1:
@@ -288,7 +281,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn,
288 address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn), 281 address = rt3883_pci_get_cfgaddr(bus->number, PCI_SLOT(devfn),
289 PCI_FUNC(devfn), where); 282 PCI_FUNC(devfn), where);
290 283
291 spin_lock_irqsave(&rpc->lock, flags);
292 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR); 284 rt3883_pci_w32(rpc, address, RT3883_PCI_REG_CFGADDR);
293 data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA); 285 data = rt3883_pci_r32(rpc, RT3883_PCI_REG_CFGDATA);
294 286
@@ -307,7 +299,6 @@ static int rt3883_pci_config_write(struct pci_bus *bus, unsigned int devfn,
307 } 299 }
308 300
309 rt3883_pci_w32(rpc, data, RT3883_PCI_REG_CFGDATA); 301 rt3883_pci_w32(rpc, data, RT3883_PCI_REG_CFGDATA);
310 spin_unlock_irqrestore(&rpc->lock, flags);
311 302
312 return PCIBIOS_SUCCESSFUL; 303 return PCIBIOS_SUCCESSFUL;
313} 304}
diff --git a/arch/mips/pci/pci-tx4939.c b/arch/mips/pci/pci-tx4939.c
index c10fbf2a19dc..cd8ed09c4f53 100644
--- a/arch/mips/pci/pci-tx4939.c
+++ b/arch/mips/pci/pci-tx4939.c
@@ -103,5 +103,5 @@ void __init tx4939_setup_pcierr_irq(void)
103 tx4927_pcierr_interrupt, 103 tx4927_pcierr_interrupt,
104 0, "PCI error", 104 0, "PCI error",
105 (void *)TX4939_PCIC_REG)) 105 (void *)TX4939_PCIC_REG))
106 pr_warning("Failed to request irq for PCIERR\n"); 106 pr_warn("Failed to request irq for PCIERR\n");
107} 107}
diff --git a/arch/mips/pmcs-msp71xx/msp_prom.c b/arch/mips/pmcs-msp71xx/msp_prom.c
index 1c9897531660..ef620a4c82a5 100644
--- a/arch/mips/pmcs-msp71xx/msp_prom.c
+++ b/arch/mips/pmcs-msp71xx/msp_prom.c
@@ -295,7 +295,7 @@ char *prom_getenv(char *env_name)
295 295
296 while (*var) { 296 while (*var) {
297 if (strncmp(env_name, *var, i) == 0) { 297 if (strncmp(env_name, *var, i) == 0) {
298 return (*var + strlen(env_name) + 1); 298 return *var + strlen(env_name) + 1;
299 } 299 }
300 var++; 300 var++;
301 } 301 }
diff --git a/arch/mips/ralink/Kconfig b/arch/mips/ralink/Kconfig
index 77e8a9620e18..b1c52ca580f9 100644
--- a/arch/mips/ralink/Kconfig
+++ b/arch/mips/ralink/Kconfig
@@ -16,6 +16,7 @@ choice
16 config SOC_RT288X 16 config SOC_RT288X
17 bool "RT288x" 17 bool "RT288x"
18 select MIPS_L1_CACHE_SHIFT_4 18 select MIPS_L1_CACHE_SHIFT_4
19 select HW_HAS_PCI
19 20
20 config SOC_RT305X 21 config SOC_RT305X
21 bool "RT305x" 22 bool "RT305x"
@@ -26,7 +27,7 @@ choice
26 select HW_HAS_PCI 27 select HW_HAS_PCI
27 28
28 config SOC_MT7620 29 config SOC_MT7620
29 bool "MT7620" 30 bool "MT7620/8"
30 31
31endchoice 32endchoice
32 33
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
index 2c09c8aa0ae2..a6c9d0061326 100644
--- a/arch/mips/ralink/Makefile
+++ b/arch/mips/ralink/Makefile
@@ -10,9 +10,13 @@ obj-y := prom.o of.o reset.o clk.o irq.o timer.o
10 10
11obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o 11obj-$(CONFIG_CLKEVT_RT3352) += cevt-rt3352.o
12 12
13obj-$(CONFIG_RALINK_ILL_ACC) += ill_acc.o
14
13obj-$(CONFIG_SOC_RT288X) += rt288x.o 15obj-$(CONFIG_SOC_RT288X) += rt288x.o
14obj-$(CONFIG_SOC_RT305X) += rt305x.o 16obj-$(CONFIG_SOC_RT305X) += rt305x.o
15obj-$(CONFIG_SOC_RT3883) += rt3883.o 17obj-$(CONFIG_SOC_RT3883) += rt3883.o
16obj-$(CONFIG_SOC_MT7620) += mt7620.o 18obj-$(CONFIG_SOC_MT7620) += mt7620.o
17 19
18obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 20obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
21
22obj-$(CONFIG_DEBUG_FS) += bootrom.o
diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c
new file mode 100644
index 000000000000..5403468394fb
--- /dev/null
+++ b/arch/mips/ralink/bootrom.c
@@ -0,0 +1,48 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/debugfs.h>
10#include <linux/seq_file.h>
11
12#define BOOTROM_OFFSET 0x10118000
13#define BOOTROM_SIZE 0x8000
14
15static void __iomem *membase = (void __iomem *) KSEG1ADDR(BOOTROM_OFFSET);
16
17static int bootrom_show(struct seq_file *s, void *unused)
18{
19 seq_write(s, membase, BOOTROM_SIZE);
20
21 return 0;
22}
23
24static int bootrom_open(struct inode *inode, struct file *file)
25{
26 return single_open(file, bootrom_show, NULL);
27}
28
29static const struct file_operations bootrom_file_ops = {
30 .open = bootrom_open,
31 .read = seq_read,
32 .llseek = seq_lseek,
33 .release = single_release,
34};
35
36static int bootrom_setup(void)
37{
38 if (!debugfs_create_file("bootrom", 0444,
39 NULL, NULL, &bootrom_file_ops)) {
40 pr_err("Failed to create bootrom debugfs file\n");
41
42 return -EINVAL;
43 }
44
45 return 0;
46}
47
48postcore_initcall(bootrom_setup);
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
index 5d0983d47161..feb5a9bf98b4 100644
--- a/arch/mips/ralink/clk.c
+++ b/arch/mips/ralink/clk.c
@@ -56,6 +56,12 @@ unsigned long clk_get_rate(struct clk *clk)
56} 56}
57EXPORT_SYMBOL_GPL(clk_get_rate); 57EXPORT_SYMBOL_GPL(clk_get_rate);
58 58
59int clk_set_rate(struct clk *clk, unsigned long rate)
60{
61 return -1;
62}
63EXPORT_SYMBOL_GPL(clk_set_rate);
64
59void __init plat_time_init(void) 65void __init plat_time_init(void)
60{ 66{
61 struct clk *clk; 67 struct clk *clk;
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 42dfd6100a2d..8e7d8e618fb9 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -11,25 +11,6 @@
11 11
12#define RAMIPS_SYS_TYPE_LEN 32 12#define RAMIPS_SYS_TYPE_LEN 32
13 13
14struct ralink_pinmux_grp {
15 const char *name;
16 u32 mask;
17 int gpio_first;
18 int gpio_last;
19};
20
21struct ralink_pinmux {
22 struct ralink_pinmux_grp *mode;
23 struct ralink_pinmux_grp *uart;
24 int uart_shift;
25 u32 uart_mask;
26 void (*wdt_reset)(void);
27 struct ralink_pinmux_grp *pci;
28 int pci_shift;
29 u32 pci_mask;
30};
31extern struct ralink_pinmux rt_gpio_pinmux;
32
33struct ralink_soc_info { 14struct ralink_soc_info {
34 unsigned char sys_type[RAMIPS_SYS_TYPE_LEN]; 15 unsigned char sys_type[RAMIPS_SYS_TYPE_LEN];
35 unsigned char *compatible; 16 unsigned char *compatible;
diff --git a/arch/mips/ralink/early_printk.c b/arch/mips/ralink/early_printk.c
index b46d0419d09b..255d695ec8c6 100644
--- a/arch/mips/ralink/early_printk.c
+++ b/arch/mips/ralink/early_printk.c
@@ -12,21 +12,24 @@
12#include <asm/addrspace.h> 12#include <asm/addrspace.h>
13 13
14#ifdef CONFIG_SOC_RT288X 14#ifdef CONFIG_SOC_RT288X
15#define EARLY_UART_BASE 0x300c00 15#define EARLY_UART_BASE 0x300c00
16#define CHIPID_BASE 0x300004
17#elif defined(CONFIG_SOC_MT7621)
18#define EARLY_UART_BASE 0x1E000c00
19#define CHIPID_BASE 0x1E000004
16#else 20#else
17#define EARLY_UART_BASE 0x10000c00 21#define EARLY_UART_BASE 0x10000c00
22#define CHIPID_BASE 0x10000004
18#endif 23#endif
19 24
20#define UART_REG_RX 0x00 25#define MT7628_CHIP_NAME1 0x20203832
21#define UART_REG_TX 0x04 26
22#define UART_REG_IER 0x08 27#define UART_REG_TX 0x04
23#define UART_REG_IIR 0x0c 28#define UART_REG_LSR 0x14
24#define UART_REG_FCR 0x10 29#define UART_REG_LSR_RT2880 0x1c
25#define UART_REG_LCR 0x14
26#define UART_REG_MCR 0x18
27#define UART_REG_LSR 0x1c
28 30
29static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE); 31static __iomem void *uart_membase = (__iomem void *) KSEG1ADDR(EARLY_UART_BASE);
32static __iomem void *chipid_membase = (__iomem void *) KSEG1ADDR(CHIPID_BASE);
30 33
31static inline void uart_w32(u32 val, unsigned reg) 34static inline void uart_w32(u32 val, unsigned reg)
32{ 35{
@@ -38,11 +41,23 @@ static inline u32 uart_r32(unsigned reg)
38 return __raw_readl(uart_membase + reg); 41 return __raw_readl(uart_membase + reg);
39} 42}
40 43
44static inline int soc_is_mt7628(void)
45{
46 return IS_ENABLED(CONFIG_SOC_MT7620) &&
47 (__raw_readl(chipid_membase) == MT7628_CHIP_NAME1);
48}
49
41void prom_putchar(unsigned char ch) 50void prom_putchar(unsigned char ch)
42{ 51{
43 while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) 52 if (IS_ENABLED(CONFIG_SOC_MT7621) || soc_is_mt7628()) {
44 ; 53 uart_w32(ch, UART_TX);
45 uart_w32(ch, UART_REG_TX); 54 while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0)
46 while ((uart_r32(UART_REG_LSR) & UART_LSR_THRE) == 0) 55 ;
47 ; 56 } else {
57 while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
58 ;
59 uart_w32(ch, UART_REG_TX);
60 while ((uart_r32(UART_REG_LSR_RT2880) & UART_LSR_THRE) == 0)
61 ;
62 }
48} 63}
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
new file mode 100644
index 000000000000..e20b02e3ae28
--- /dev/null
+++ b/arch/mips/ralink/ill_acc.c
@@ -0,0 +1,87 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/interrupt.h>
10#include <linux/of_platform.h>
11#include <linux/of_irq.h>
12
13#include <asm/mach-ralink/ralink_regs.h>
14
15#define REG_ILL_ACC_ADDR 0x10
16#define REG_ILL_ACC_TYPE 0x14
17
18#define ILL_INT_STATUS BIT(31)
19#define ILL_ACC_WRITE BIT(30)
20#define ILL_ACC_LEN_M 0xff
21#define ILL_ACC_OFF_M 0xf
22#define ILL_ACC_OFF_S 16
23#define ILL_ACC_ID_M 0x7
24#define ILL_ACC_ID_S 8
25
26#define DRV_NAME "ill_acc"
27
28static const char * const ill_acc_ids[] = {
29 "cpu", "dma", "ppe", "pdma rx", "pdma tx", "pci/e", "wmac", "usb",
30};
31
32static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
33{
34 struct device *dev = (struct device *) _priv;
35 u32 addr = rt_memc_r32(REG_ILL_ACC_ADDR);
36 u32 type = rt_memc_r32(REG_ILL_ACC_TYPE);
37
38 dev_err(dev, "illegal %s access from %s - addr:0x%08x offset:%d len:%d\n",
39 (type & ILL_ACC_WRITE) ? ("write") : ("read"),
40 ill_acc_ids[(type >> ILL_ACC_ID_S) & ILL_ACC_ID_M],
41 addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
42 type & ILL_ACC_LEN_M);
43
44 rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE);
45
46 return IRQ_HANDLED;
47}
48
49static int __init ill_acc_of_setup(void)
50{
51 struct platform_device *pdev;
52 struct device_node *np;
53 int irq;
54
55 /* somehow this driver breaks on RT5350 */
56 if (of_machine_is_compatible("ralink,rt5350-soc"))
57 return -EINVAL;
58
59 np = of_find_compatible_node(NULL, NULL, "ralink,rt3050-memc");
60 if (!np)
61 return -EINVAL;
62
63 pdev = of_find_device_by_node(np);
64 if (!pdev) {
65 pr_err("%s: failed to lookup pdev\n", np->name);
66 return -EINVAL;
67 }
68
69 irq = irq_of_parse_and_map(np, 0);
70 if (!irq) {
71 dev_err(&pdev->dev, "failed to get irq\n");
72 return -EINVAL;
73 }
74
75 if (request_irq(irq, ill_acc_irq_handler, 0, "ill_acc", &pdev->dev)) {
76 dev_err(&pdev->dev, "failed to request irq\n");
77 return -EINVAL;
78 }
79
80 rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
81
82 dev_info(&pdev->dev, "irq registered\n");
83
84 return 0;
85}
86
87arch_initcall(ill_acc_of_setup);
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 781b3d14a489..7cf91b92e9d1 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -20,14 +20,6 @@
20 20
21#include "common.h" 21#include "common.h"
22 22
23/* INTC register offsets */
24#define INTC_REG_STATUS0 0x00
25#define INTC_REG_STATUS1 0x04
26#define INTC_REG_TYPE 0x20
27#define INTC_REG_RAW_STATUS 0x30
28#define INTC_REG_ENABLE 0x34
29#define INTC_REG_DISABLE 0x38
30
31#define INTC_INT_GLOBAL BIT(31) 23#define INTC_INT_GLOBAL BIT(31)
32 24
33#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2) 25#define RALINK_CPU_IRQ_INTC (MIPS_CPU_IRQ_BASE + 2)
@@ -44,16 +36,36 @@
44 36
45#define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9) 37#define RALINK_INTC_IRQ_PERFC (RALINK_INTC_IRQ_BASE + 9)
46 38
39enum rt_intc_regs_enum {
40 INTC_REG_STATUS0 = 0,
41 INTC_REG_STATUS1,
42 INTC_REG_TYPE,
43 INTC_REG_RAW_STATUS,
44 INTC_REG_ENABLE,
45 INTC_REG_DISABLE,
46};
47
48static u32 rt_intc_regs[] = {
49 [INTC_REG_STATUS0] = 0x00,
50 [INTC_REG_STATUS1] = 0x04,
51 [INTC_REG_TYPE] = 0x20,
52 [INTC_REG_RAW_STATUS] = 0x30,
53 [INTC_REG_ENABLE] = 0x34,
54 [INTC_REG_DISABLE] = 0x38,
55};
56
47static void __iomem *rt_intc_membase; 57static void __iomem *rt_intc_membase;
48 58
59static int rt_perfcount_irq;
60
49static inline void rt_intc_w32(u32 val, unsigned reg) 61static inline void rt_intc_w32(u32 val, unsigned reg)
50{ 62{
51 __raw_writel(val, rt_intc_membase + reg); 63 __raw_writel(val, rt_intc_membase + rt_intc_regs[reg]);
52} 64}
53 65
54static inline u32 rt_intc_r32(unsigned reg) 66static inline u32 rt_intc_r32(unsigned reg)
55{ 67{
56 return __raw_readl(rt_intc_membase + reg); 68 return __raw_readl(rt_intc_membase + rt_intc_regs[reg]);
57} 69}
58 70
59static void ralink_intc_irq_unmask(struct irq_data *d) 71static void ralink_intc_irq_unmask(struct irq_data *d)
@@ -73,6 +85,11 @@ static struct irq_chip ralink_intc_irq_chip = {
73 .irq_mask_ack = ralink_intc_irq_mask, 85 .irq_mask_ack = ralink_intc_irq_mask,
74}; 86};
75 87
88int get_c0_perfcount_int(void)
89{
90 return rt_perfcount_irq;
91}
92
76unsigned int get_c0_compare_int(void) 93unsigned int get_c0_compare_int(void)
77{ 94{
78 return CP0_LEGACY_COMPARE_IRQ; 95 return CP0_LEGACY_COMPARE_IRQ;
@@ -134,6 +151,10 @@ static int __init intc_of_init(struct device_node *node,
134 struct irq_domain *domain; 151 struct irq_domain *domain;
135 int irq; 152 int irq;
136 153
154 if (!of_property_read_u32_array(node, "ralink,intc-registers",
155 rt_intc_regs, 6))
156 pr_info("intc: using register map from devicetree\n");
157
137 irq = irq_of_parse_and_map(node, 0); 158 irq = irq_of_parse_and_map(node, 0);
138 if (!irq) 159 if (!irq)
139 panic("Failed to get INTC IRQ"); 160 panic("Failed to get INTC IRQ");
@@ -167,13 +188,13 @@ static int __init intc_of_init(struct device_node *node,
167 irq_set_handler_data(irq, domain); 188 irq_set_handler_data(irq, domain);
168 189
169 /* tell the kernel which irq is used for performance monitoring */ 190 /* tell the kernel which irq is used for performance monitoring */
170 cp0_perfcount_irq = irq_create_mapping(domain, 9); 191 rt_perfcount_irq = irq_create_mapping(domain, 9);
171 192
172 return 0; 193 return 0;
173} 194}
174 195
175static struct of_device_id __initdata of_irq_ids[] = { 196static struct of_device_id __initdata of_irq_ids[] = {
176 { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_intc_init }, 197 { .compatible = "mti,cpu-interrupt-controller", .data = mips_cpu_irq_of_init },
177 { .compatible = "ralink,rt2880-intc", .data = intc_of_init }, 198 { .compatible = "ralink,rt2880-intc", .data = intc_of_init },
178 {}, 199 {},
179}; 200};
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index a3ad56c2372d..2ea5ff6dc22e 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -17,124 +17,214 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h> 18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/mt7620.h> 19#include <asm/mach-ralink/mt7620.h>
20#include <asm/mach-ralink/pinmux.h>
20 21
21#include "common.h" 22#include "common.h"
22 23
24/* analog */
25#define PMU0_CFG 0x88
26#define PMU_SW_SET BIT(28)
27#define A_DCDC_EN BIT(24)
28#define A_SSC_PERI BIT(19)
29#define A_SSC_GEN BIT(18)
30#define A_SSC_M 0x3
31#define A_SSC_S 16
32#define A_DLY_M 0x7
33#define A_DLY_S 8
34#define A_VTUNE_M 0xff
35
36/* digital */
37#define PMU1_CFG 0x8C
38#define DIG_SW_SEL BIT(25)
39
40/* is this a MT7620 or a MT7628 */
41enum mt762x_soc_type mt762x_soc;
42
23/* does the board have sdram or ddram */ 43/* does the board have sdram or ddram */
24static int dram_type; 44static int dram_type;
25 45
26static struct ralink_pinmux_grp mode_mux[] = { 46static struct rt2880_pmx_func i2c_grp[] = { FUNC("i2c", 0, 1, 2) };
27 { 47static struct rt2880_pmx_func spi_grp[] = { FUNC("spi", 0, 3, 4) };
28 .name = "i2c", 48static struct rt2880_pmx_func uartlite_grp[] = { FUNC("uartlite", 0, 15, 2) };
29 .mask = MT7620_GPIO_MODE_I2C, 49static struct rt2880_pmx_func mdio_grp[] = { FUNC("mdio", 0, 22, 2) };
30 .gpio_first = 1, 50static struct rt2880_pmx_func rgmii1_grp[] = { FUNC("rgmii1", 0, 24, 12) };
31 .gpio_last = 2, 51static struct rt2880_pmx_func refclk_grp[] = { FUNC("spi refclk", 0, 37, 3) };
32 }, { 52static struct rt2880_pmx_func ephy_grp[] = { FUNC("ephy", 0, 40, 5) };
33 .name = "spi", 53static struct rt2880_pmx_func rgmii2_grp[] = { FUNC("rgmii2", 0, 60, 12) };
34 .mask = MT7620_GPIO_MODE_SPI, 54static struct rt2880_pmx_func wled_grp[] = { FUNC("wled", 0, 72, 1) };
35 .gpio_first = 3, 55static struct rt2880_pmx_func pa_grp[] = { FUNC("pa", 0, 18, 4) };
36 .gpio_last = 6, 56static struct rt2880_pmx_func uartf_grp[] = {
37 }, { 57 FUNC("uartf", MT7620_GPIO_MODE_UARTF, 7, 8),
38 .name = "uartlite", 58 FUNC("pcm uartf", MT7620_GPIO_MODE_PCM_UARTF, 7, 8),
39 .mask = MT7620_GPIO_MODE_UART1, 59 FUNC("pcm i2s", MT7620_GPIO_MODE_PCM_I2S, 7, 8),
40 .gpio_first = 15, 60 FUNC("i2s uartf", MT7620_GPIO_MODE_I2S_UARTF, 7, 8),
41 .gpio_last = 16, 61 FUNC("pcm gpio", MT7620_GPIO_MODE_PCM_GPIO, 11, 4),
42 }, { 62 FUNC("gpio uartf", MT7620_GPIO_MODE_GPIO_UARTF, 7, 4),
43 .name = "wdt", 63 FUNC("gpio i2s", MT7620_GPIO_MODE_GPIO_I2S, 7, 4),
44 .mask = MT7620_GPIO_MODE_WDT, 64};
45 .gpio_first = 17, 65static struct rt2880_pmx_func wdt_grp[] = {
46 .gpio_last = 17, 66 FUNC("wdt rst", 0, 17, 1),
47 }, { 67 FUNC("wdt refclk", 0, 17, 1),
48 .name = "mdio", 68 };
49 .mask = MT7620_GPIO_MODE_MDIO, 69static struct rt2880_pmx_func pcie_rst_grp[] = {
50 .gpio_first = 22, 70 FUNC("pcie rst", MT7620_GPIO_MODE_PCIE_RST, 36, 1),
51 .gpio_last = 23, 71 FUNC("pcie refclk", MT7620_GPIO_MODE_PCIE_REF, 36, 1)
52 }, { 72};
53 .name = "rgmii1", 73static struct rt2880_pmx_func nd_sd_grp[] = {
54 .mask = MT7620_GPIO_MODE_RGMII1, 74 FUNC("nand", MT7620_GPIO_MODE_NAND, 45, 15),
55 .gpio_first = 24, 75 FUNC("sd", MT7620_GPIO_MODE_SD, 45, 15)
56 .gpio_last = 35, 76};
57 }, { 77
58 .name = "spi refclk", 78static struct rt2880_pmx_group mt7620a_pinmux_data[] = {
59 .mask = MT7620_GPIO_MODE_SPI_REF_CLK, 79 GRP("i2c", i2c_grp, 1, MT7620_GPIO_MODE_I2C),
60 .gpio_first = 37, 80 GRP("uartf", uartf_grp, MT7620_GPIO_MODE_UART0_MASK,
61 .gpio_last = 39, 81 MT7620_GPIO_MODE_UART0_SHIFT),
62 }, { 82 GRP("spi", spi_grp, 1, MT7620_GPIO_MODE_SPI),
63 .name = "jtag", 83 GRP("uartlite", uartlite_grp, 1, MT7620_GPIO_MODE_UART1),
64 .mask = MT7620_GPIO_MODE_JTAG, 84 GRP_G("wdt", wdt_grp, MT7620_GPIO_MODE_WDT_MASK,
65 .gpio_first = 40, 85 MT7620_GPIO_MODE_WDT_GPIO, MT7620_GPIO_MODE_WDT_SHIFT),
66 .gpio_last = 44, 86 GRP("mdio", mdio_grp, 1, MT7620_GPIO_MODE_MDIO),
67 }, { 87 GRP("rgmii1", rgmii1_grp, 1, MT7620_GPIO_MODE_RGMII1),
68 /* shared lines with jtag */ 88 GRP("spi refclk", refclk_grp, 1, MT7620_GPIO_MODE_SPI_REF_CLK),
69 .name = "ephy", 89 GRP_G("pcie", pcie_rst_grp, MT7620_GPIO_MODE_PCIE_MASK,
70 .mask = MT7620_GPIO_MODE_EPHY, 90 MT7620_GPIO_MODE_PCIE_GPIO, MT7620_GPIO_MODE_PCIE_SHIFT),
71 .gpio_first = 40, 91 GRP_G("nd_sd", nd_sd_grp, MT7620_GPIO_MODE_ND_SD_MASK,
72 .gpio_last = 44, 92 MT7620_GPIO_MODE_ND_SD_GPIO, MT7620_GPIO_MODE_ND_SD_SHIFT),
73 }, { 93 GRP("rgmii2", rgmii2_grp, 1, MT7620_GPIO_MODE_RGMII2),
74 .name = "nand", 94 GRP("wled", wled_grp, 1, MT7620_GPIO_MODE_WLED),
75 .mask = MT7620_GPIO_MODE_JTAG, 95 GRP("ephy", ephy_grp, 1, MT7620_GPIO_MODE_EPHY),
76 .gpio_first = 45, 96 GRP("pa", pa_grp, 1, MT7620_GPIO_MODE_PA),
77 .gpio_last = 59, 97 { 0 }
78 }, { 98};
79 .name = "rgmii2", 99
80 .mask = MT7620_GPIO_MODE_RGMII2, 100static struct rt2880_pmx_func pwm1_grp_mt7628[] = {
81 .gpio_first = 60, 101 FUNC("sdcx", 3, 19, 1),
82 .gpio_last = 71, 102 FUNC("utif", 2, 19, 1),
83 }, { 103 FUNC("gpio", 1, 19, 1),
84 .name = "wled", 104 FUNC("pwm", 0, 19, 1),
85 .mask = MT7620_GPIO_MODE_WLED, 105};
86 .gpio_first = 72, 106
87 .gpio_last = 72, 107static struct rt2880_pmx_func pwm0_grp_mt7628[] = {
88 }, {0} 108 FUNC("sdcx", 3, 18, 1),
109 FUNC("utif", 2, 18, 1),
110 FUNC("gpio", 1, 18, 1),
111 FUNC("pwm", 0, 18, 1),
112};
113
114static struct rt2880_pmx_func uart2_grp_mt7628[] = {
115 FUNC("sdcx", 3, 20, 2),
116 FUNC("pwm", 2, 20, 2),
117 FUNC("gpio", 1, 20, 2),
118 FUNC("uart", 0, 20, 2),
119};
120
121static struct rt2880_pmx_func uart1_grp_mt7628[] = {
122 FUNC("sdcx", 3, 45, 2),
123 FUNC("pwm", 2, 45, 2),
124 FUNC("gpio", 1, 45, 2),
125 FUNC("uart", 0, 45, 2),
126};
127
128static struct rt2880_pmx_func i2c_grp_mt7628[] = {
129 FUNC("-", 3, 4, 2),
130 FUNC("debug", 2, 4, 2),
131 FUNC("gpio", 1, 4, 2),
132 FUNC("i2c", 0, 4, 2),
133};
134
135static struct rt2880_pmx_func refclk_grp_mt7628[] = { FUNC("reclk", 0, 36, 1) };
136static struct rt2880_pmx_func perst_grp_mt7628[] = { FUNC("perst", 0, 37, 1) };
137static struct rt2880_pmx_func wdt_grp_mt7628[] = { FUNC("wdt", 0, 15, 38) };
138static struct rt2880_pmx_func spi_grp_mt7628[] = { FUNC("spi", 0, 7, 4) };
139
140static struct rt2880_pmx_func sd_mode_grp_mt7628[] = {
141 FUNC("jtag", 3, 22, 8),
142 FUNC("utif", 2, 22, 8),
143 FUNC("gpio", 1, 22, 8),
144 FUNC("sdcx", 0, 22, 8),
145};
146
147static struct rt2880_pmx_func uart0_grp_mt7628[] = {
148 FUNC("-", 3, 12, 2),
149 FUNC("-", 2, 12, 2),
150 FUNC("gpio", 1, 12, 2),
151 FUNC("uart", 0, 12, 2),
152};
153
154static struct rt2880_pmx_func i2s_grp_mt7628[] = {
155 FUNC("antenna", 3, 0, 4),
156 FUNC("pcm", 2, 0, 4),
157 FUNC("gpio", 1, 0, 4),
158 FUNC("i2s", 0, 0, 4),
159};
160
161static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
162 FUNC("-", 3, 6, 1),
163 FUNC("refclk", 2, 6, 1),
164 FUNC("gpio", 1, 6, 1),
165 FUNC("spi", 0, 6, 1),
166};
167
168static struct rt2880_pmx_func spis_grp_mt7628[] = {
169 FUNC("pwm", 3, 14, 4),
170 FUNC("util", 2, 14, 4),
171 FUNC("gpio", 1, 14, 4),
172 FUNC("spis", 0, 14, 4),
89}; 173};
90 174
91static struct ralink_pinmux_grp uart_mux[] = { 175static struct rt2880_pmx_func gpio_grp_mt7628[] = {
92 { 176 FUNC("pcie", 3, 11, 1),
93 .name = "uartf", 177 FUNC("refclk", 2, 11, 1),
94 .mask = MT7620_GPIO_MODE_UARTF, 178 FUNC("gpio", 1, 11, 1),
95 .gpio_first = 7, 179 FUNC("gpio", 0, 11, 1),
96 .gpio_last = 14,
97 }, {
98 .name = "pcm uartf",
99 .mask = MT7620_GPIO_MODE_PCM_UARTF,
100 .gpio_first = 7,
101 .gpio_last = 14,
102 }, {
103 .name = "pcm i2s",
104 .mask = MT7620_GPIO_MODE_PCM_I2S,
105 .gpio_first = 7,
106 .gpio_last = 14,
107 }, {
108 .name = "i2s uartf",
109 .mask = MT7620_GPIO_MODE_I2S_UARTF,
110 .gpio_first = 7,
111 .gpio_last = 14,
112 }, {
113 .name = "pcm gpio",
114 .mask = MT7620_GPIO_MODE_PCM_GPIO,
115 .gpio_first = 11,
116 .gpio_last = 14,
117 }, {
118 .name = "gpio uartf",
119 .mask = MT7620_GPIO_MODE_GPIO_UARTF,
120 .gpio_first = 7,
121 .gpio_last = 10,
122 }, {
123 .name = "gpio i2s",
124 .mask = MT7620_GPIO_MODE_GPIO_I2S,
125 .gpio_first = 7,
126 .gpio_last = 10,
127 }, {
128 .name = "gpio",
129 .mask = MT7620_GPIO_MODE_GPIO,
130 }, {0}
131}; 180};
132 181
133struct ralink_pinmux rt_gpio_pinmux = { 182#define MT7628_GPIO_MODE_MASK 0x3
134 .mode = mode_mux, 183
135 .uart = uart_mux, 184#define MT7628_GPIO_MODE_PWM1 30
136 .uart_shift = MT7620_GPIO_MODE_UART0_SHIFT, 185#define MT7628_GPIO_MODE_PWM0 28
137 .uart_mask = MT7620_GPIO_MODE_UART0_MASK, 186#define MT7628_GPIO_MODE_UART2 26
187#define MT7628_GPIO_MODE_UART1 24
188#define MT7628_GPIO_MODE_I2C 20
189#define MT7628_GPIO_MODE_REFCLK 18
190#define MT7628_GPIO_MODE_PERST 16
191#define MT7628_GPIO_MODE_WDT 14
192#define MT7628_GPIO_MODE_SPI 12
193#define MT7628_GPIO_MODE_SDMODE 10
194#define MT7628_GPIO_MODE_UART0 8
195#define MT7628_GPIO_MODE_I2S 6
196#define MT7628_GPIO_MODE_CS1 4
197#define MT7628_GPIO_MODE_SPIS 2
198#define MT7628_GPIO_MODE_GPIO 0
199
200static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
201 GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
202 1, MT7628_GPIO_MODE_PWM1),
203 GRP_G("pmw1", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
204 1, MT7628_GPIO_MODE_PWM0),
205 GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
206 1, MT7628_GPIO_MODE_UART2),
207 GRP_G("uart1", uart1_grp_mt7628, MT7628_GPIO_MODE_MASK,
208 1, MT7628_GPIO_MODE_UART1),
209 GRP_G("i2c", i2c_grp_mt7628, MT7628_GPIO_MODE_MASK,
210 1, MT7628_GPIO_MODE_I2C),
211 GRP("refclk", refclk_grp_mt7628, 1, MT7628_GPIO_MODE_REFCLK),
212 GRP("perst", perst_grp_mt7628, 1, MT7628_GPIO_MODE_PERST),
213 GRP("wdt", wdt_grp_mt7628, 1, MT7628_GPIO_MODE_WDT),
214 GRP("spi", spi_grp_mt7628, 1, MT7628_GPIO_MODE_SPI),
215 GRP_G("sdmode", sd_mode_grp_mt7628, MT7628_GPIO_MODE_MASK,
216 1, MT7628_GPIO_MODE_SDMODE),
217 GRP_G("uart0", uart0_grp_mt7628, MT7628_GPIO_MODE_MASK,
218 1, MT7628_GPIO_MODE_UART0),
219 GRP_G("i2s", i2s_grp_mt7628, MT7628_GPIO_MODE_MASK,
220 1, MT7628_GPIO_MODE_I2S),
221 GRP_G("spi cs1", spi_cs1_grp_mt7628, MT7628_GPIO_MODE_MASK,
222 1, MT7628_GPIO_MODE_CS1),
223 GRP_G("spis", spis_grp_mt7628, MT7628_GPIO_MODE_MASK,
224 1, MT7628_GPIO_MODE_SPIS),
225 GRP_G("gpio", gpio_grp_mt7628, MT7628_GPIO_MODE_MASK,
226 1, MT7628_GPIO_MODE_GPIO),
227 { 0 }
138}; 228};
139 229
140static __init u32 230static __init u32
@@ -287,29 +377,42 @@ void __init ralink_clk_init(void)
287 377
288 xtal_rate = mt7620_get_xtal_rate(); 378 xtal_rate = mt7620_get_xtal_rate();
289 379
290 cpu_pll_rate = mt7620_get_cpu_pll_rate(xtal_rate);
291 pll_rate = mt7620_get_pll_rate(xtal_rate, cpu_pll_rate);
292
293 cpu_rate = mt7620_get_cpu_rate(pll_rate);
294 dram_rate = mt7620_get_dram_rate(pll_rate);
295 sys_rate = mt7620_get_sys_rate(cpu_rate);
296 periph_rate = mt7620_get_periph_rate(xtal_rate);
297
298#define RFMT(label) label ":%lu.%03luMHz " 380#define RFMT(label) label ":%lu.%03luMHz "
299#define RINT(x) ((x) / 1000000) 381#define RINT(x) ((x) / 1000000)
300#define RFRAC(x) (((x) / 1000) % 1000) 382#define RFRAC(x) (((x) / 1000) % 1000)
301 383
302 pr_debug(RFMT("XTAL") RFMT("CPU_PLL") RFMT("PLL"), 384 if (mt762x_soc == MT762X_SOC_MT7628AN) {
303 RINT(xtal_rate), RFRAC(xtal_rate), 385 if (xtal_rate == MHZ(40))
304 RINT(cpu_pll_rate), RFRAC(cpu_pll_rate), 386 cpu_rate = MHZ(580);
305 RINT(pll_rate), RFRAC(pll_rate)); 387 else
388 cpu_rate = MHZ(575);
389 dram_rate = sys_rate = cpu_rate / 3;
390 periph_rate = MHZ(40);
391
392 ralink_clk_add("10000d00.uartlite", periph_rate);
393 ralink_clk_add("10000e00.uartlite", periph_rate);
394 } else {
395 cpu_pll_rate = mt7620_get_cpu_pll_rate(xtal_rate);
396 pll_rate = mt7620_get_pll_rate(xtal_rate, cpu_pll_rate);
397
398 cpu_rate = mt7620_get_cpu_rate(pll_rate);
399 dram_rate = mt7620_get_dram_rate(pll_rate);
400 sys_rate = mt7620_get_sys_rate(cpu_rate);
401 periph_rate = mt7620_get_periph_rate(xtal_rate);
402
403 pr_debug(RFMT("XTAL") RFMT("CPU_PLL") RFMT("PLL"),
404 RINT(xtal_rate), RFRAC(xtal_rate),
405 RINT(cpu_pll_rate), RFRAC(cpu_pll_rate),
406 RINT(pll_rate), RFRAC(pll_rate));
407
408 ralink_clk_add("10000500.uart", periph_rate);
409 }
306 410
307 pr_debug(RFMT("CPU") RFMT("DRAM") RFMT("SYS") RFMT("PERIPH"), 411 pr_debug(RFMT("CPU") RFMT("DRAM") RFMT("SYS") RFMT("PERIPH"),
308 RINT(cpu_rate), RFRAC(cpu_rate), 412 RINT(cpu_rate), RFRAC(cpu_rate),
309 RINT(dram_rate), RFRAC(dram_rate), 413 RINT(dram_rate), RFRAC(dram_rate),
310 RINT(sys_rate), RFRAC(sys_rate), 414 RINT(sys_rate), RFRAC(sys_rate),
311 RINT(periph_rate), RFRAC(periph_rate)); 415 RINT(periph_rate), RFRAC(periph_rate));
312
313#undef RFRAC 416#undef RFRAC
314#undef RINT 417#undef RINT
315#undef RFMT 418#undef RFMT
@@ -317,9 +420,9 @@ void __init ralink_clk_init(void)
317 ralink_clk_add("cpu", cpu_rate); 420 ralink_clk_add("cpu", cpu_rate);
318 ralink_clk_add("10000100.timer", periph_rate); 421 ralink_clk_add("10000100.timer", periph_rate);
319 ralink_clk_add("10000120.watchdog", periph_rate); 422 ralink_clk_add("10000120.watchdog", periph_rate);
320 ralink_clk_add("10000500.uart", periph_rate);
321 ralink_clk_add("10000b00.spi", sys_rate); 423 ralink_clk_add("10000b00.spi", sys_rate);
322 ralink_clk_add("10000c00.uartlite", periph_rate); 424 ralink_clk_add("10000c00.uartlite", periph_rate);
425 ralink_clk_add("10180000.wmac", xtal_rate);
323} 426}
324 427
325void __init ralink_of_remap(void) 428void __init ralink_of_remap(void)
@@ -331,6 +434,52 @@ void __init ralink_of_remap(void)
331 panic("Failed to remap core resources"); 434 panic("Failed to remap core resources");
332} 435}
333 436
437static __init void
438mt7620_dram_init(struct ralink_soc_info *soc_info)
439{
440 switch (dram_type) {
441 case SYSCFG0_DRAM_TYPE_SDRAM:
442 pr_info("Board has SDRAM\n");
443 soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
444 soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
445 break;
446
447 case SYSCFG0_DRAM_TYPE_DDR1:
448 pr_info("Board has DDR1\n");
449 soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
450 soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
451 break;
452
453 case SYSCFG0_DRAM_TYPE_DDR2:
454 pr_info("Board has DDR2\n");
455 soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
456 soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
457 break;
458 default:
459 BUG();
460 }
461}
462
463static __init void
464mt7628_dram_init(struct ralink_soc_info *soc_info)
465{
466 switch (dram_type) {
467 case SYSCFG0_DRAM_TYPE_DDR1_MT7628:
468 pr_info("Board has DDR1\n");
469 soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
470 soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
471 break;
472
473 case SYSCFG0_DRAM_TYPE_DDR2_MT7628:
474 pr_info("Board has DDR2\n");
475 soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
476 soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
477 break;
478 default:
479 BUG();
480 }
481}
482
334void prom_soc_init(struct ralink_soc_info *soc_info) 483void prom_soc_init(struct ralink_soc_info *soc_info)
335{ 484{
336 void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE); 485 void __iomem *sysc = (void __iomem *) KSEG1ADDR(MT7620_SYSC_BASE);
@@ -339,22 +488,36 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
339 u32 n1; 488 u32 n1;
340 u32 rev; 489 u32 rev;
341 u32 cfg0; 490 u32 cfg0;
491 u32 pmu0;
492 u32 pmu1;
493 u32 bga;
342 494
343 n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0); 495 n0 = __raw_readl(sysc + SYSC_REG_CHIP_NAME0);
344 n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1); 496 n1 = __raw_readl(sysc + SYSC_REG_CHIP_NAME1);
345 497 rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
346 if (n0 == MT7620N_CHIP_NAME0 && n1 == MT7620N_CHIP_NAME1) { 498 bga = (rev >> CHIP_REV_PKG_SHIFT) & CHIP_REV_PKG_MASK;
347 name = "MT7620N"; 499
348 soc_info->compatible = "ralink,mt7620n-soc"; 500 if (n0 == MT7620_CHIP_NAME0 && n1 == MT7620_CHIP_NAME1) {
349 } else if (n0 == MT7620A_CHIP_NAME0 && n1 == MT7620A_CHIP_NAME1) { 501 if (bga) {
350 name = "MT7620A"; 502 mt762x_soc = MT762X_SOC_MT7620A;
351 soc_info->compatible = "ralink,mt7620a-soc"; 503 name = "MT7620A";
504 soc_info->compatible = "ralink,mt7620a-soc";
505 } else {
506 mt762x_soc = MT762X_SOC_MT7620N;
507 name = "MT7620N";
508 soc_info->compatible = "ralink,mt7620n-soc";
509#ifdef CONFIG_PCI
510 panic("mt7620n is only supported for non pci kernels");
511#endif
512 }
513 } else if (n0 == MT7620_CHIP_NAME0 && n1 == MT7628_CHIP_NAME1) {
514 mt762x_soc = MT762X_SOC_MT7628AN;
515 name = "MT7628AN";
516 soc_info->compatible = "ralink,mt7628an-soc";
352 } else { 517 } else {
353 panic("mt7620: unknown SoC, n0:%08x n1:%08x", n0, n1); 518 panic("mt762x: unknown SoC, n0:%08x n1:%08x\n", n0, n1);
354 } 519 }
355 520
356 rev = __raw_readl(sysc + SYSC_REG_CHIP_REV);
357
358 snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN, 521 snprintf(soc_info->sys_type, RAMIPS_SYS_TYPE_LEN,
359 "Ralink %s ver:%u eco:%u", 522 "Ralink %s ver:%u eco:%u",
360 name, 523 name,
@@ -364,26 +527,22 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
364 cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0); 527 cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
365 dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK; 528 dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) & SYSCFG0_DRAM_TYPE_MASK;
366 529
367 switch (dram_type) {
368 case SYSCFG0_DRAM_TYPE_SDRAM:
369 pr_info("Board has SDRAM\n");
370 soc_info->mem_size_min = MT7620_SDRAM_SIZE_MIN;
371 soc_info->mem_size_max = MT7620_SDRAM_SIZE_MAX;
372 break;
373
374 case SYSCFG0_DRAM_TYPE_DDR1:
375 pr_info("Board has DDR1\n");
376 soc_info->mem_size_min = MT7620_DDR1_SIZE_MIN;
377 soc_info->mem_size_max = MT7620_DDR1_SIZE_MAX;
378 break;
379
380 case SYSCFG0_DRAM_TYPE_DDR2:
381 pr_info("Board has DDR2\n");
382 soc_info->mem_size_min = MT7620_DDR2_SIZE_MIN;
383 soc_info->mem_size_max = MT7620_DDR2_SIZE_MAX;
384 break;
385 default:
386 BUG();
387 }
388 soc_info->mem_base = MT7620_DRAM_BASE; 530 soc_info->mem_base = MT7620_DRAM_BASE;
531 if (mt762x_soc == MT762X_SOC_MT7628AN)
532 mt7628_dram_init(soc_info);
533 else
534 mt7620_dram_init(soc_info);
535
536 pmu0 = __raw_readl(sysc + PMU0_CFG);
537 pmu1 = __raw_readl(sysc + PMU1_CFG);
538
539 pr_info("Analog PMU set to %s control\n",
540 (pmu0 & PMU_SW_SET) ? ("sw") : ("hw"));
541 pr_info("Digital PMU set to %s control\n",
542 (pmu1 & DIG_SW_SEL) ? ("sw") : ("hw"));
543
544 if (mt762x_soc == MT762X_SOC_MT7628AN)
545 rt2880_pinmux_data = mt7628an_pinmux_data;
546 else
547 rt2880_pinmux_data = mt7620a_pinmux_data;
389} 548}
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index 7c4598cb6de8..0d30dcd63246 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -53,6 +53,17 @@ void __init device_tree_init(void)
53 unflatten_and_copy_device_tree(); 53 unflatten_and_copy_device_tree();
54} 54}
55 55
56static int memory_dtb;
57
58static int __init early_init_dt_find_memory(unsigned long node,
59 const char *uname, int depth, void *data)
60{
61 if (depth == 1 && !strcmp(uname, "memory@0"))
62 memory_dtb = 1;
63
64 return 0;
65}
66
56void __init plat_mem_setup(void) 67void __init plat_mem_setup(void)
57{ 68{
58 set_io_port_base(KSEG1); 69 set_io_port_base(KSEG1);
@@ -63,7 +74,12 @@ void __init plat_mem_setup(void)
63 */ 74 */
64 __dt_setup_arch(__dtb_start); 75 __dt_setup_arch(__dtb_start);
65 76
66 if (soc_info.mem_size) 77 strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
78
79 of_scan_flat_dt(early_init_dt_find_memory, NULL);
80 if (memory_dtb)
81 of_scan_flat_dt(early_init_dt_scan_memory, NULL);
82 else if (soc_info.mem_size)
67 add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M, 83 add_memory_region(soc_info.mem_base, soc_info.mem_size * SZ_1M,
68 BOOT_MEM_RAM); 84 BOOT_MEM_RAM);
69 else 85 else
@@ -74,19 +90,9 @@ void __init plat_mem_setup(void)
74 90
75static int __init plat_of_setup(void) 91static int __init plat_of_setup(void)
76{ 92{
77 static struct of_device_id of_ids[3]; 93 __dt_register_buses(soc_info.compatible, "palmbus");
78 int len = sizeof(of_ids[0].compatible);
79
80 if (!of_have_populated_dt())
81 panic("device tree not present");
82
83 strlcpy(of_ids[0].compatible, soc_info.compatible, len);
84 strlcpy(of_ids[1].compatible, "palmbus", len);
85
86 if (of_platform_populate(NULL, of_ids, NULL, NULL))
87 panic("failed to populate DT");
88 94
89 /* make sure ithat the reset controller is setup early */ 95 /* make sure that the reset controller is setup early */
90 ralink_rst_init(); 96 ralink_rst_init();
91 97
92 return 0; 98 return 0;
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 9c64f029d047..09419f67da39 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -18,6 +18,7 @@
18#include "common.h" 18#include "common.h"
19 19
20struct ralink_soc_info soc_info; 20struct ralink_soc_info soc_info;
21struct rt2880_pmx_group *rt2880_pinmux_data = NULL;
21 22
22const char *get_system_type(void) 23const char *get_system_type(void)
23{ 24{
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index f87de1ab2198..738cec865f41 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -17,46 +17,27 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h> 18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/rt288x.h> 19#include <asm/mach-ralink/rt288x.h>
20#include <asm/mach-ralink/pinmux.h>
20 21
21#include "common.h" 22#include "common.h"
22 23
23static struct ralink_pinmux_grp mode_mux[] = { 24static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
24 { 25static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
25 .name = "i2c", 26static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 7, 8) };
26 .mask = RT2880_GPIO_MODE_I2C, 27static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
27 .gpio_first = 1, 28static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
28 .gpio_last = 2, 29static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
29 }, { 30static struct rt2880_pmx_func pci_func[] = { FUNC("pci", 0, 40, 32) };
30 .name = "spi", 31
31 .mask = RT2880_GPIO_MODE_SPI, 32static struct rt2880_pmx_group rt2880_pinmux_data_act[] = {
32 .gpio_first = 3, 33 GRP("i2c", i2c_func, 1, RT2880_GPIO_MODE_I2C),
33 .gpio_last = 6, 34 GRP("spi", spi_func, 1, RT2880_GPIO_MODE_SPI),
34 }, { 35 GRP("uartlite", uartlite_func, 1, RT2880_GPIO_MODE_UART0),
35 .name = "uartlite", 36 GRP("jtag", jtag_func, 1, RT2880_GPIO_MODE_JTAG),
36 .mask = RT2880_GPIO_MODE_UART0, 37 GRP("mdio", mdio_func, 1, RT2880_GPIO_MODE_MDIO),
37 .gpio_first = 7, 38 GRP("sdram", sdram_func, 1, RT2880_GPIO_MODE_SDRAM),
38 .gpio_last = 14, 39 GRP("pci", pci_func, 1, RT2880_GPIO_MODE_PCI),
39 }, { 40 { 0 }
40 .name = "jtag",
41 .mask = RT2880_GPIO_MODE_JTAG,
42 .gpio_first = 17,
43 .gpio_last = 21,
44 }, {
45 .name = "mdio",
46 .mask = RT2880_GPIO_MODE_MDIO,
47 .gpio_first = 22,
48 .gpio_last = 23,
49 }, {
50 .name = "sdram",
51 .mask = RT2880_GPIO_MODE_SDRAM,
52 .gpio_first = 24,
53 .gpio_last = 39,
54 }, {
55 .name = "pci",
56 .mask = RT2880_GPIO_MODE_PCI,
57 .gpio_first = 40,
58 .gpio_last = 71,
59 }, {0}
60}; 41};
61 42
62static void rt288x_wdt_reset(void) 43static void rt288x_wdt_reset(void)
@@ -69,14 +50,9 @@ static void rt288x_wdt_reset(void)
69 rt_sysc_w32(t, SYSC_REG_CLKCFG); 50 rt_sysc_w32(t, SYSC_REG_CLKCFG);
70} 51}
71 52
72struct ralink_pinmux rt_gpio_pinmux = {
73 .mode = mode_mux,
74 .wdt_reset = rt288x_wdt_reset,
75};
76
77void __init ralink_clk_init(void) 53void __init ralink_clk_init(void)
78{ 54{
79 unsigned long cpu_rate; 55 unsigned long cpu_rate, wmac_rate = 40000000;
80 u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG); 56 u32 t = rt_sysc_r32(SYSC_REG_SYSTEM_CONFIG);
81 t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK); 57 t = ((t >> SYSTEM_CONFIG_CPUCLK_SHIFT) & SYSTEM_CONFIG_CPUCLK_MASK);
82 58
@@ -101,6 +77,7 @@ void __init ralink_clk_init(void)
101 ralink_clk_add("300500.uart", cpu_rate / 2); 77 ralink_clk_add("300500.uart", cpu_rate / 2);
102 ralink_clk_add("300c00.uartlite", cpu_rate / 2); 78 ralink_clk_add("300c00.uartlite", cpu_rate / 2);
103 ralink_clk_add("400000.ethernet", cpu_rate / 2); 79 ralink_clk_add("400000.ethernet", cpu_rate / 2);
80 ralink_clk_add("480000.wmac", wmac_rate);
104} 81}
105 82
106void __init ralink_of_remap(void) 83void __init ralink_of_remap(void)
@@ -140,4 +117,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
140 soc_info->mem_base = RT2880_SDRAM_BASE; 117 soc_info->mem_base = RT2880_SDRAM_BASE;
141 soc_info->mem_size_min = RT2880_MEM_SIZE_MIN; 118 soc_info->mem_size_min = RT2880_MEM_SIZE_MIN;
142 soc_info->mem_size_max = RT2880_MEM_SIZE_MAX; 119 soc_info->mem_size_max = RT2880_MEM_SIZE_MAX;
120
121 rt2880_pinmux_data = rt2880_pinmux_data_act;
143} 122}
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index bb82a82da9e7..c40776ab67db 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -17,90 +17,78 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h> 18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/rt305x.h> 19#include <asm/mach-ralink/rt305x.h>
20#include <asm/mach-ralink/pinmux.h>
20 21
21#include "common.h" 22#include "common.h"
22 23
23enum rt305x_soc_type rt305x_soc; 24enum rt305x_soc_type rt305x_soc;
24 25
25static struct ralink_pinmux_grp mode_mux[] = { 26static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
26 { 27static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
27 .name = "i2c", 28static struct rt2880_pmx_func uartf_func[] = {
28 .mask = RT305X_GPIO_MODE_I2C, 29 FUNC("uartf", RT305X_GPIO_MODE_UARTF, 7, 8),
29 .gpio_first = RT305X_GPIO_I2C_SD, 30 FUNC("pcm uartf", RT305X_GPIO_MODE_PCM_UARTF, 7, 8),
30 .gpio_last = RT305X_GPIO_I2C_SCLK, 31 FUNC("pcm i2s", RT305X_GPIO_MODE_PCM_I2S, 7, 8),
31 }, { 32 FUNC("i2s uartf", RT305X_GPIO_MODE_I2S_UARTF, 7, 8),
32 .name = "spi", 33 FUNC("pcm gpio", RT305X_GPIO_MODE_PCM_GPIO, 11, 4),
33 .mask = RT305X_GPIO_MODE_SPI, 34 FUNC("gpio uartf", RT305X_GPIO_MODE_GPIO_UARTF, 7, 4),
34 .gpio_first = RT305X_GPIO_SPI_EN, 35 FUNC("gpio i2s", RT305X_GPIO_MODE_GPIO_I2S, 7, 4),
35 .gpio_last = RT305X_GPIO_SPI_CLK, 36};
36 }, { 37static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
37 .name = "uartlite", 38static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
38 .mask = RT305X_GPIO_MODE_UART1, 39static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
39 .gpio_first = RT305X_GPIO_UART1_TXD, 40static struct rt2880_pmx_func rt5350_led_func[] = { FUNC("led", 0, 22, 5) };
40 .gpio_last = RT305X_GPIO_UART1_RXD, 41static struct rt2880_pmx_func rt5350_cs1_func[] = {
41 }, { 42 FUNC("spi_cs1", 0, 27, 1),
42 .name = "jtag", 43 FUNC("wdg_cs1", 1, 27, 1),
43 .mask = RT305X_GPIO_MODE_JTAG, 44};
44 .gpio_first = RT305X_GPIO_JTAG_TDO, 45static struct rt2880_pmx_func sdram_func[] = { FUNC("sdram", 0, 24, 16) };
45 .gpio_last = RT305X_GPIO_JTAG_TDI, 46static struct rt2880_pmx_func rt3352_rgmii_func[] = {
46 }, { 47 FUNC("rgmii", 0, 24, 12)
47 .name = "mdio", 48};
48 .mask = RT305X_GPIO_MODE_MDIO, 49static struct rt2880_pmx_func rgmii_func[] = { FUNC("rgmii", 0, 40, 12) };
49 .gpio_first = RT305X_GPIO_MDIO_MDC, 50static struct rt2880_pmx_func rt3352_lna_func[] = { FUNC("lna", 0, 36, 2) };
50 .gpio_last = RT305X_GPIO_MDIO_MDIO, 51static struct rt2880_pmx_func rt3352_pa_func[] = { FUNC("pa", 0, 38, 2) };
51 }, { 52static struct rt2880_pmx_func rt3352_led_func[] = { FUNC("led", 0, 40, 5) };
52 .name = "sdram", 53
53 .mask = RT305X_GPIO_MODE_SDRAM, 54static struct rt2880_pmx_group rt3050_pinmux_data[] = {
54 .gpio_first = RT305X_GPIO_SDRAM_MD16, 55 GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
55 .gpio_last = RT305X_GPIO_SDRAM_MD31, 56 GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
56 }, { 57 GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
57 .name = "rgmii", 58 RT305X_GPIO_MODE_UART0_SHIFT),
58 .mask = RT305X_GPIO_MODE_RGMII, 59 GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
59 .gpio_first = RT305X_GPIO_GE0_TXD0, 60 GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
60 .gpio_last = RT305X_GPIO_GE0_RXCLK, 61 GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
61 }, {0} 62 GRP("rgmii", rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
63 GRP("sdram", sdram_func, 1, RT305X_GPIO_MODE_SDRAM),
64 { 0 }
62}; 65};
63 66
64static struct ralink_pinmux_grp uart_mux[] = { 67static struct rt2880_pmx_group rt3352_pinmux_data[] = {
65 { 68 GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
66 .name = "uartf", 69 GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
67 .mask = RT305X_GPIO_MODE_UARTF, 70 GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
68 .gpio_first = RT305X_GPIO_7, 71 RT305X_GPIO_MODE_UART0_SHIFT),
69 .gpio_last = RT305X_GPIO_14, 72 GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
70 }, { 73 GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
71 .name = "pcm uartf", 74 GRP("mdio", mdio_func, 1, RT305X_GPIO_MODE_MDIO),
72 .mask = RT305X_GPIO_MODE_PCM_UARTF, 75 GRP("rgmii", rt3352_rgmii_func, 1, RT305X_GPIO_MODE_RGMII),
73 .gpio_first = RT305X_GPIO_7, 76 GRP("lna", rt3352_lna_func, 1, RT3352_GPIO_MODE_LNA),
74 .gpio_last = RT305X_GPIO_14, 77 GRP("pa", rt3352_pa_func, 1, RT3352_GPIO_MODE_PA),
75 }, { 78 GRP("led", rt3352_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
76 .name = "pcm i2s", 79 { 0 }
77 .mask = RT305X_GPIO_MODE_PCM_I2S, 80};
78 .gpio_first = RT305X_GPIO_7, 81
79 .gpio_last = RT305X_GPIO_14, 82static struct rt2880_pmx_group rt5350_pinmux_data[] = {
80 }, { 83 GRP("i2c", i2c_func, 1, RT305X_GPIO_MODE_I2C),
81 .name = "i2s uartf", 84 GRP("spi", spi_func, 1, RT305X_GPIO_MODE_SPI),
82 .mask = RT305X_GPIO_MODE_I2S_UARTF, 85 GRP("uartf", uartf_func, RT305X_GPIO_MODE_UART0_MASK,
83 .gpio_first = RT305X_GPIO_7, 86 RT305X_GPIO_MODE_UART0_SHIFT),
84 .gpio_last = RT305X_GPIO_14, 87 GRP("uartlite", uartlite_func, 1, RT305X_GPIO_MODE_UART1),
85 }, { 88 GRP("jtag", jtag_func, 1, RT305X_GPIO_MODE_JTAG),
86 .name = "pcm gpio", 89 GRP("led", rt5350_led_func, 1, RT5350_GPIO_MODE_PHY_LED),
87 .mask = RT305X_GPIO_MODE_PCM_GPIO, 90 GRP("spi_cs1", rt5350_cs1_func, 2, RT5350_GPIO_MODE_SPI_CS1),
88 .gpio_first = RT305X_GPIO_10, 91 { 0 }
89 .gpio_last = RT305X_GPIO_14,
90 }, {
91 .name = "gpio uartf",
92 .mask = RT305X_GPIO_MODE_GPIO_UARTF,
93 .gpio_first = RT305X_GPIO_7,
94 .gpio_last = RT305X_GPIO_10,
95 }, {
96 .name = "gpio i2s",
97 .mask = RT305X_GPIO_MODE_GPIO_I2S,
98 .gpio_first = RT305X_GPIO_7,
99 .gpio_last = RT305X_GPIO_10,
100 }, {
101 .name = "gpio",
102 .mask = RT305X_GPIO_MODE_GPIO,
103 }, {0}
104}; 92};
105 93
106static void rt305x_wdt_reset(void) 94static void rt305x_wdt_reset(void)
@@ -114,14 +102,6 @@ static void rt305x_wdt_reset(void)
114 rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG); 102 rt_sysc_w32(t, SYSC_REG_SYSTEM_CONFIG);
115} 103}
116 104
117struct ralink_pinmux rt_gpio_pinmux = {
118 .mode = mode_mux,
119 .uart = uart_mux,
120 .uart_shift = RT305X_GPIO_MODE_UART0_SHIFT,
121 .uart_mask = RT305X_GPIO_MODE_UART0_MASK,
122 .wdt_reset = rt305x_wdt_reset,
123};
124
125static unsigned long rt5350_get_mem_size(void) 105static unsigned long rt5350_get_mem_size(void)
126{ 106{
127 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE); 107 void __iomem *sysc = (void __iomem *) KSEG1ADDR(RT305X_SYSC_BASE);
@@ -290,11 +270,14 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
290 soc_info->mem_base = RT305X_SDRAM_BASE; 270 soc_info->mem_base = RT305X_SDRAM_BASE;
291 if (soc_is_rt5350()) { 271 if (soc_is_rt5350()) {
292 soc_info->mem_size = rt5350_get_mem_size(); 272 soc_info->mem_size = rt5350_get_mem_size();
273 rt2880_pinmux_data = rt5350_pinmux_data;
293 } else if (soc_is_rt305x() || soc_is_rt3350()) { 274 } else if (soc_is_rt305x() || soc_is_rt3350()) {
294 soc_info->mem_size_min = RT305X_MEM_SIZE_MIN; 275 soc_info->mem_size_min = RT305X_MEM_SIZE_MIN;
295 soc_info->mem_size_max = RT305X_MEM_SIZE_MAX; 276 soc_info->mem_size_max = RT305X_MEM_SIZE_MAX;
277 rt2880_pinmux_data = rt3050_pinmux_data;
296 } else if (soc_is_rt3352()) { 278 } else if (soc_is_rt3352()) {
297 soc_info->mem_size_min = RT3352_MEM_SIZE_MIN; 279 soc_info->mem_size_min = RT3352_MEM_SIZE_MIN;
298 soc_info->mem_size_max = RT3352_MEM_SIZE_MAX; 280 soc_info->mem_size_max = RT3352_MEM_SIZE_MAX;
281 rt2880_pinmux_data = rt3352_pinmux_data;
299 } 282 }
300} 283}
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index b474ac284b83..86a535c770d8 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -17,132 +17,50 @@
17#include <asm/mipsregs.h> 17#include <asm/mipsregs.h>
18#include <asm/mach-ralink/ralink_regs.h> 18#include <asm/mach-ralink/ralink_regs.h>
19#include <asm/mach-ralink/rt3883.h> 19#include <asm/mach-ralink/rt3883.h>
20#include <asm/mach-ralink/pinmux.h>
20 21
21#include "common.h" 22#include "common.h"
22 23
23static struct ralink_pinmux_grp mode_mux[] = { 24static struct rt2880_pmx_func i2c_func[] = { FUNC("i2c", 0, 1, 2) };
24 { 25static struct rt2880_pmx_func spi_func[] = { FUNC("spi", 0, 3, 4) };
25 .name = "i2c", 26static struct rt2880_pmx_func uartf_func[] = {
26 .mask = RT3883_GPIO_MODE_I2C, 27 FUNC("uartf", RT3883_GPIO_MODE_UARTF, 7, 8),
27 .gpio_first = RT3883_GPIO_I2C_SD, 28 FUNC("pcm uartf", RT3883_GPIO_MODE_PCM_UARTF, 7, 8),
28 .gpio_last = RT3883_GPIO_I2C_SCLK, 29 FUNC("pcm i2s", RT3883_GPIO_MODE_PCM_I2S, 7, 8),
29 }, { 30 FUNC("i2s uartf", RT3883_GPIO_MODE_I2S_UARTF, 7, 8),
30 .name = "spi", 31 FUNC("pcm gpio", RT3883_GPIO_MODE_PCM_GPIO, 11, 4),
31 .mask = RT3883_GPIO_MODE_SPI, 32 FUNC("gpio uartf", RT3883_GPIO_MODE_GPIO_UARTF, 7, 4),
32 .gpio_first = RT3883_GPIO_SPI_CS0, 33 FUNC("gpio i2s", RT3883_GPIO_MODE_GPIO_I2S, 7, 4),
33 .gpio_last = RT3883_GPIO_SPI_MISO,
34 }, {
35 .name = "uartlite",
36 .mask = RT3883_GPIO_MODE_UART1,
37 .gpio_first = RT3883_GPIO_UART1_TXD,
38 .gpio_last = RT3883_GPIO_UART1_RXD,
39 }, {
40 .name = "jtag",
41 .mask = RT3883_GPIO_MODE_JTAG,
42 .gpio_first = RT3883_GPIO_JTAG_TDO,
43 .gpio_last = RT3883_GPIO_JTAG_TCLK,
44 }, {
45 .name = "mdio",
46 .mask = RT3883_GPIO_MODE_MDIO,
47 .gpio_first = RT3883_GPIO_MDIO_MDC,
48 .gpio_last = RT3883_GPIO_MDIO_MDIO,
49 }, {
50 .name = "ge1",
51 .mask = RT3883_GPIO_MODE_GE1,
52 .gpio_first = RT3883_GPIO_GE1_TXD0,
53 .gpio_last = RT3883_GPIO_GE1_RXCLK,
54 }, {
55 .name = "ge2",
56 .mask = RT3883_GPIO_MODE_GE2,
57 .gpio_first = RT3883_GPIO_GE2_TXD0,
58 .gpio_last = RT3883_GPIO_GE2_RXCLK,
59 }, {
60 .name = "pci",
61 .mask = RT3883_GPIO_MODE_PCI,
62 .gpio_first = RT3883_GPIO_PCI_AD0,
63 .gpio_last = RT3883_GPIO_PCI_AD31,
64 }, {
65 .name = "lna a",
66 .mask = RT3883_GPIO_MODE_LNA_A,
67 .gpio_first = RT3883_GPIO_LNA_PE_A0,
68 .gpio_last = RT3883_GPIO_LNA_PE_A2,
69 }, {
70 .name = "lna g",
71 .mask = RT3883_GPIO_MODE_LNA_G,
72 .gpio_first = RT3883_GPIO_LNA_PE_G0,
73 .gpio_last = RT3883_GPIO_LNA_PE_G2,
74 }, {0}
75}; 34};
76 35static struct rt2880_pmx_func uartlite_func[] = { FUNC("uartlite", 0, 15, 2) };
77static struct ralink_pinmux_grp uart_mux[] = { 36static struct rt2880_pmx_func jtag_func[] = { FUNC("jtag", 0, 17, 5) };
78 { 37static struct rt2880_pmx_func mdio_func[] = { FUNC("mdio", 0, 22, 2) };
79 .name = "uartf", 38static struct rt2880_pmx_func lna_a_func[] = { FUNC("lna a", 0, 32, 3) };
80 .mask = RT3883_GPIO_MODE_UARTF, 39static struct rt2880_pmx_func lna_g_func[] = { FUNC("lna a", 0, 35, 3) };
81 .gpio_first = RT3883_GPIO_7, 40static struct rt2880_pmx_func pci_func[] = {
82 .gpio_last = RT3883_GPIO_14, 41 FUNC("pci-dev", 0, 40, 32),
83 }, { 42 FUNC("pci-host2", 1, 40, 32),
84 .name = "pcm uartf", 43 FUNC("pci-host1", 2, 40, 32),
85 .mask = RT3883_GPIO_MODE_PCM_UARTF, 44 FUNC("pci-fnc", 3, 40, 32)
86 .gpio_first = RT3883_GPIO_7,
87 .gpio_last = RT3883_GPIO_14,
88 }, {
89 .name = "pcm i2s",
90 .mask = RT3883_GPIO_MODE_PCM_I2S,
91 .gpio_first = RT3883_GPIO_7,
92 .gpio_last = RT3883_GPIO_14,
93 }, {
94 .name = "i2s uartf",
95 .mask = RT3883_GPIO_MODE_I2S_UARTF,
96 .gpio_first = RT3883_GPIO_7,
97 .gpio_last = RT3883_GPIO_14,
98 }, {
99 .name = "pcm gpio",
100 .mask = RT3883_GPIO_MODE_PCM_GPIO,
101 .gpio_first = RT3883_GPIO_11,
102 .gpio_last = RT3883_GPIO_14,
103 }, {
104 .name = "gpio uartf",
105 .mask = RT3883_GPIO_MODE_GPIO_UARTF,
106 .gpio_first = RT3883_GPIO_7,
107 .gpio_last = RT3883_GPIO_10,
108 }, {
109 .name = "gpio i2s",
110 .mask = RT3883_GPIO_MODE_GPIO_I2S,
111 .gpio_first = RT3883_GPIO_7,
112 .gpio_last = RT3883_GPIO_10,
113 }, {
114 .name = "gpio",
115 .mask = RT3883_GPIO_MODE_GPIO,
116 }, {0}
117}; 45};
118 46static struct rt2880_pmx_func ge1_func[] = { FUNC("ge1", 0, 72, 12) };
119static struct ralink_pinmux_grp pci_mux[] = { 47static struct rt2880_pmx_func ge2_func[] = { FUNC("ge1", 0, 84, 12) };
120 { 48
121 .name = "pci-dev", 49static struct rt2880_pmx_group rt3883_pinmux_data[] = {
122 .mask = 0, 50 GRP("i2c", i2c_func, 1, RT3883_GPIO_MODE_I2C),
123 .gpio_first = RT3883_GPIO_PCI_AD0, 51 GRP("spi", spi_func, 1, RT3883_GPIO_MODE_SPI),
124 .gpio_last = RT3883_GPIO_PCI_AD31, 52 GRP("uartf", uartf_func, RT3883_GPIO_MODE_UART0_MASK,
125 }, { 53 RT3883_GPIO_MODE_UART0_SHIFT),
126 .name = "pci-host2", 54 GRP("uartlite", uartlite_func, 1, RT3883_GPIO_MODE_UART1),
127 .mask = 1, 55 GRP("jtag", jtag_func, 1, RT3883_GPIO_MODE_JTAG),
128 .gpio_first = RT3883_GPIO_PCI_AD0, 56 GRP("mdio", mdio_func, 1, RT3883_GPIO_MODE_MDIO),
129 .gpio_last = RT3883_GPIO_PCI_AD31, 57 GRP("lna a", lna_a_func, 1, RT3883_GPIO_MODE_LNA_A),
130 }, { 58 GRP("lna g", lna_g_func, 1, RT3883_GPIO_MODE_LNA_G),
131 .name = "pci-host1", 59 GRP("pci", pci_func, RT3883_GPIO_MODE_PCI_MASK,
132 .mask = 2, 60 RT3883_GPIO_MODE_PCI_SHIFT),
133 .gpio_first = RT3883_GPIO_PCI_AD0, 61 GRP("ge1", ge1_func, 1, RT3883_GPIO_MODE_GE1),
134 .gpio_last = RT3883_GPIO_PCI_AD31, 62 GRP("ge2", ge2_func, 1, RT3883_GPIO_MODE_GE2),
135 }, { 63 { 0 }
136 .name = "pci-fnc",
137 .mask = 3,
138 .gpio_first = RT3883_GPIO_PCI_AD0,
139 .gpio_last = RT3883_GPIO_PCI_AD31,
140 }, {
141 .name = "pci-gpio",
142 .mask = 7,
143 .gpio_first = RT3883_GPIO_PCI_AD0,
144 .gpio_last = RT3883_GPIO_PCI_AD31,
145 }, {0}
146}; 64};
147 65
148static void rt3883_wdt_reset(void) 66static void rt3883_wdt_reset(void)
@@ -155,17 +73,6 @@ static void rt3883_wdt_reset(void)
155 rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1); 73 rt_sysc_w32(t, RT3883_SYSC_REG_SYSCFG1);
156} 74}
157 75
158struct ralink_pinmux rt_gpio_pinmux = {
159 .mode = mode_mux,
160 .uart = uart_mux,
161 .uart_shift = RT3883_GPIO_MODE_UART0_SHIFT,
162 .uart_mask = RT3883_GPIO_MODE_UART0_MASK,
163 .wdt_reset = rt3883_wdt_reset,
164 .pci = pci_mux,
165 .pci_shift = RT3883_GPIO_MODE_PCI_SHIFT,
166 .pci_mask = RT3883_GPIO_MODE_PCI_MASK,
167};
168
169void __init ralink_clk_init(void) 76void __init ralink_clk_init(void)
170{ 77{
171 unsigned long cpu_rate, sys_rate; 78 unsigned long cpu_rate, sys_rate;
@@ -204,6 +111,7 @@ void __init ralink_clk_init(void)
204 ralink_clk_add("10000b00.spi", sys_rate); 111 ralink_clk_add("10000b00.spi", sys_rate);
205 ralink_clk_add("10000c00.uartlite", 40000000); 112 ralink_clk_add("10000c00.uartlite", 40000000);
206 ralink_clk_add("10100000.ethernet", sys_rate); 113 ralink_clk_add("10100000.ethernet", sys_rate);
114 ralink_clk_add("10180000.wmac", 40000000);
207} 115}
208 116
209void __init ralink_of_remap(void) 117void __init ralink_of_remap(void)
@@ -243,4 +151,6 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
243 soc_info->mem_base = RT3883_SDRAM_BASE; 151 soc_info->mem_base = RT3883_SDRAM_BASE;
244 soc_info->mem_size_min = RT3883_MEM_SIZE_MIN; 152 soc_info->mem_size_min = RT3883_MEM_SIZE_MIN;
245 soc_info->mem_size_max = RT3883_MEM_SIZE_MAX; 153 soc_info->mem_size_max = RT3883_MEM_SIZE_MAX;
154
155 rt2880_pinmux_data = rt3883_pinmux_data;
246} 156}
diff --git a/arch/mips/rb532/gpio.c b/arch/mips/rb532/gpio.c
index a18007613c30..5aa3df853082 100644
--- a/arch/mips/rb532/gpio.c
+++ b/arch/mips/rb532/gpio.c
@@ -79,7 +79,7 @@ static inline void rb532_set_bit(unsigned bitval,
79 */ 79 */
80static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr) 80static inline int rb532_get_bit(unsigned offset, void __iomem *ioaddr)
81{ 81{
82 return (readl(ioaddr) & (1 << offset)); 82 return readl(ioaddr) & (1 << offset);
83} 83}
84 84
85/* 85/*
diff --git a/arch/mips/rb532/prom.c b/arch/mips/rb532/prom.c
index a757ded437cd..657210e767c2 100644
--- a/arch/mips/rb532/prom.c
+++ b/arch/mips/rb532/prom.c
@@ -122,8 +122,8 @@ void __init prom_setup_cmdline(void)
122void __init prom_init(void) 122void __init prom_init(void)
123{ 123{
124 struct ddr_ram __iomem *ddr; 124 struct ddr_ram __iomem *ddr;
125 phys_t memsize; 125 phys_addr_t memsize;
126 phys_t ddrbase; 126 phys_addr_t ddrbase;
127 127
128 ddr = ioremap_nocache(ddr_reg[0].start, 128 ddr = ioremap_nocache(ddr_reg[0].start,
129 ddr_reg[0].end - ddr_reg[0].start); 129 ddr_reg[0].end - ddr_reg[0].start);
@@ -133,8 +133,8 @@ void __init prom_init(void)
133 return; 133 return;
134 } 134 }
135 135
136 ddrbase = (phys_t)&ddr->ddrbase; 136 ddrbase = (phys_addr_t)&ddr->ddrbase;
137 memsize = (phys_t)&ddr->ddrmask; 137 memsize = (phys_addr_t)&ddr->ddrmask;
138 memsize = 0 - memsize; 138 memsize = 0 - memsize;
139 139
140 prom_setup_cmdline(); 140 prom_setup_cmdline();
diff --git a/arch/mips/sgi-ip22/ip22-mc.c b/arch/mips/sgi-ip22/ip22-mc.c
index 7cec0a4e527d..6b009c45abed 100644
--- a/arch/mips/sgi-ip22/ip22-mc.c
+++ b/arch/mips/sgi-ip22/ip22-mc.c
@@ -24,14 +24,12 @@ EXPORT_SYMBOL(sgimc);
24 24
25static inline unsigned long get_bank_addr(unsigned int memconfig) 25static inline unsigned long get_bank_addr(unsigned int memconfig)
26{ 26{
27 return ((memconfig & SGIMC_MCONFIG_BASEADDR) << 27 return (memconfig & SGIMC_MCONFIG_BASEADDR) << ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22);
28 ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 24 : 22));
29} 28}
30 29
31static inline unsigned long get_bank_size(unsigned int memconfig) 30static inline unsigned long get_bank_size(unsigned int memconfig)
32{ 31{
33 return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) << 32 return ((memconfig & SGIMC_MCONFIG_RMASK) + 0x0100) << ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14);
34 ((sgimc->systemid & SGIMC_SYSID_MASKREV) >= 5 ? 16 : 14);
35} 33}
36 34
37static inline unsigned int get_bank_config(int bank) 35static inline unsigned int get_bank_config(int bank)
diff --git a/arch/mips/sgi-ip22/ip28-berr.c b/arch/mips/sgi-ip22/ip28-berr.c
index 3f47346608d7..712cc0f6a58d 100644
--- a/arch/mips/sgi-ip22/ip28-berr.c
+++ b/arch/mips/sgi-ip22/ip28-berr.c
@@ -338,7 +338,7 @@ static int check_microtlb(u32 hi, u32 lo, unsigned long vaddr)
338 PHYS_TO_XKSEG_UNCACHED(pte); 338 PHYS_TO_XKSEG_UNCACHED(pte);
339 a = (a & 0x3f) << 6; /* PFN */ 339 a = (a & 0x3f) << 6; /* PFN */
340 a += vaddr & ((1 << pgsz) - 1); 340 a += vaddr & ((1 << pgsz) - 1);
341 return (cpu_err_addr == a); 341 return cpu_err_addr == a;
342 } 342 }
343 } 343 }
344 } 344 }
@@ -351,7 +351,7 @@ static int check_vdma_memaddr(void)
351 u32 a = sgimc->maddronly; 351 u32 a = sgimc->maddronly;
352 352
353 if (!(sgimc->dma_ctrl & 0x100)) /* Xlate-bit clear ? */ 353 if (!(sgimc->dma_ctrl & 0x100)) /* Xlate-bit clear ? */
354 return (cpu_err_addr == a); 354 return cpu_err_addr == a;
355 355
356 if (check_microtlb(sgimc->dtlb_hi0, sgimc->dtlb_lo0, a) || 356 if (check_microtlb(sgimc->dtlb_hi0, sgimc->dtlb_lo0, a) ||
357 check_microtlb(sgimc->dtlb_hi1, sgimc->dtlb_lo1, a) || 357 check_microtlb(sgimc->dtlb_hi1, sgimc->dtlb_lo1, a) ||
@@ -367,7 +367,7 @@ static int check_vdma_gioaddr(void)
367 if (gio_err_stat & GIO_ERRMASK) { 367 if (gio_err_stat & GIO_ERRMASK) {
368 u32 a = sgimc->gio_dma_trans; 368 u32 a = sgimc->gio_dma_trans;
369 a = (sgimc->gmaddronly & ~a) | (sgimc->gio_dma_sbits & a); 369 a = (sgimc->gmaddronly & ~a) | (sgimc->gio_dma_sbits & a);
370 return (gio_err_addr == a); 370 return gio_err_addr == a;
371 } 371 }
372 return 0; 372 return 0;
373} 373}
diff --git a/arch/mips/sgi-ip27/ip27-klnuma.c b/arch/mips/sgi-ip27/ip27-klnuma.c
index 7a53b1e28a93..ecbb62f339c5 100644
--- a/arch/mips/sgi-ip27/ip27-klnuma.c
+++ b/arch/mips/sgi-ip27/ip27-klnuma.c
@@ -125,8 +125,7 @@ unsigned long node_getfirstfree(cnodeid_t cnode)
125#endif 125#endif
126 offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase; 126 offset = PAGE_ALIGN((unsigned long)(&_end)) - loadbase;
127 if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask))) 127 if ((cnode == 0) || (cpu_isset(cnode, ktext_repmask)))
128 return (TO_NODE(nasid, offset) >> PAGE_SHIFT); 128 return TO_NODE(nasid, offset) >> PAGE_SHIFT;
129 else 129 else
130 return (KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> 130 return KDM_TO_PHYS(PAGE_ALIGN(SYMMON_STK_ADDR(nasid, 0))) >> PAGE_SHIFT;
131 PAGE_SHIFT);
132} 131}
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index a304bcc37e4f..0b68469e063f 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -42,8 +42,7 @@ static int fine_mode;
42 42
43static int is_fine_dirmode(void) 43static int is_fine_dirmode(void)
44{ 44{
45 return (((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) 45 return ((LOCAL_HUB_L(NI_STATUS_REV_ID) & NSRI_REGIONSIZE_MASK) >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE;
46 >> NSRI_REGIONSIZE_SHFT) & REGIONSIZE_FINE);
47} 46}
48 47
49static hubreg_t get_region(cnodeid_t cnode) 48static hubreg_t get_region(cnodeid_t cnode)
@@ -288,7 +287,7 @@ static unsigned long __init slot_psize_compute(cnodeid_t node, int slot)
288 if (size <= 128) { 287 if (size <= 128) {
289 if (slot % 4 == 0) { 288 if (slot % 4 == 0) {
290 size <<= 20; /* size in bytes */ 289 size <<= 20; /* size in bytes */
291 return(size >> PAGE_SHIFT); 290 return size >> PAGE_SHIFT;
292 } else 291 } else
293 return 0; 292 return 0;
294 } else { 293 } else {
diff --git a/arch/mips/sibyte/common/cfe.c b/arch/mips/sibyte/common/cfe.c
index 588e1806a1a3..c1a11a11db7f 100644
--- a/arch/mips/sibyte/common/cfe.c
+++ b/arch/mips/sibyte/common/cfe.c
@@ -38,7 +38,7 @@
38#define MAX_RAM_SIZE (~0ULL) 38#define MAX_RAM_SIZE (~0ULL)
39#else 39#else
40#ifdef CONFIG_HIGHMEM 40#ifdef CONFIG_HIGHMEM
41#ifdef CONFIG_64BIT_PHYS_ADDR 41#ifdef CONFIG_PHYS_ADDR_T_64BIT
42#define MAX_RAM_SIZE (~0ULL) 42#define MAX_RAM_SIZE (~0ULL)
43#else 43#else
44#define MAX_RAM_SIZE (0xffffffffULL) 44#define MAX_RAM_SIZE (0xffffffffULL)
@@ -49,8 +49,8 @@
49#endif 49#endif
50 50
51#define SIBYTE_MAX_MEM_REGIONS 8 51#define SIBYTE_MAX_MEM_REGIONS 8
52phys_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS]; 52phys_addr_t board_mem_region_addrs[SIBYTE_MAX_MEM_REGIONS];
53phys_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS]; 53phys_addr_t board_mem_region_sizes[SIBYTE_MAX_MEM_REGIONS];
54unsigned int board_mem_region_count; 54unsigned int board_mem_region_count;
55 55
56int cfe_cons_handle; 56int cfe_cons_handle;
@@ -96,7 +96,7 @@ static void __noreturn cfe_linux_halt(void)
96 96
97static __init void prom_meminit(void) 97static __init void prom_meminit(void)
98{ 98{
99 u64 addr, size, type; /* regardless of 64BIT_PHYS_ADDR */ 99 u64 addr, size, type; /* regardless of PHYS_ADDR_T_64BIT */
100 int mem_flags = 0; 100 int mem_flags = 0;
101 unsigned int idx; 101 unsigned int idx;
102 int rd_flag; 102 int rd_flag;
diff --git a/arch/mips/sibyte/swarm/platform.c b/arch/mips/sibyte/swarm/platform.c
index 9480c14ec66a..1cecdcf85cf1 100644
--- a/arch/mips/sibyte/swarm/platform.c
+++ b/arch/mips/sibyte/swarm/platform.c
@@ -50,7 +50,7 @@ static struct platform_device swarm_pata_device = {
50static int __init swarm_pata_init(void) 50static int __init swarm_pata_init(void)
51{ 51{
52 u8 __iomem *base; 52 u8 __iomem *base;
53 phys_t offset, size; 53 phys_addr_t offset, size;
54 struct resource *r; 54 struct resource *r;
55 55
56 if (!SIBYTE_HAVE_IDE) 56 if (!SIBYTE_HAVE_IDE)
diff --git a/arch/mips/sibyte/swarm/rtc_m41t81.c b/arch/mips/sibyte/swarm/rtc_m41t81.c
index b732600b47f5..e62466445f08 100644
--- a/arch/mips/sibyte/swarm/rtc_m41t81.c
+++ b/arch/mips/sibyte/swarm/rtc_m41t81.c
@@ -109,7 +109,7 @@ static int m41t81_read(uint8_t addr)
109 return -1; 109 return -1;
110 } 110 }
111 111
112 return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff); 112 return __raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff;
113} 113}
114 114
115static int m41t81_write(uint8_t addr, int b) 115static int m41t81_write(uint8_t addr, int b)
@@ -229,5 +229,5 @@ int m41t81_probe(void)
229 tmp = m41t81_read(M41T81REG_SC); 229 tmp = m41t81_read(M41T81REG_SC);
230 m41t81_write(M41T81REG_SC, tmp & 0x7f); 230 m41t81_write(M41T81REG_SC, tmp & 0x7f);
231 231
232 return (m41t81_read(M41T81REG_SC) != -1); 232 return m41t81_read(M41T81REG_SC) != -1;
233} 233}
diff --git a/arch/mips/sibyte/swarm/rtc_xicor1241.c b/arch/mips/sibyte/swarm/rtc_xicor1241.c
index 178a824b28d4..50a82c495427 100644
--- a/arch/mips/sibyte/swarm/rtc_xicor1241.c
+++ b/arch/mips/sibyte/swarm/rtc_xicor1241.c
@@ -84,7 +84,7 @@ static int xicor_read(uint8_t addr)
84 return -1; 84 return -1;
85 } 85 }
86 86
87 return (__raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff); 87 return __raw_readq(SMB_CSR(R_SMB_DATA)) & 0xff;
88} 88}
89 89
90static int xicor_write(uint8_t addr, int b) 90static int xicor_write(uint8_t addr, int b)
@@ -206,5 +206,5 @@ unsigned long xicor_get_time(void)
206 206
207int xicor_probe(void) 207int xicor_probe(void)
208{ 208{
209 return (xicor_read(X1241REG_SC) != -1); 209 return xicor_read(X1241REG_SC) != -1;
210} 210}
diff --git a/arch/mips/sibyte/swarm/setup.c b/arch/mips/sibyte/swarm/setup.c
index 3462c831d0ea..494fb0a475ac 100644
--- a/arch/mips/sibyte/swarm/setup.c
+++ b/arch/mips/sibyte/swarm/setup.c
@@ -76,7 +76,7 @@ int swarm_be_handler(struct pt_regs *regs, int is_fixup)
76 printk("DBE physical address: %010Lx\n", 76 printk("DBE physical address: %010Lx\n",
77 __read_64bit_c0_register($26, 1)); 77 __read_64bit_c0_register($26, 1));
78 } 78 }
79 return (is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL); 79 return is_fixup ? MIPS_BE_FIXUP : MIPS_BE_FATAL;
80} 80}
81 81
82enum swarm_rtc_type { 82enum swarm_rtc_type {
diff --git a/arch/mips/txx9/generic/setup_tx4927.c b/arch/mips/txx9/generic/setup_tx4927.c
index e714d6ce9a82..a4664cb6c1e1 100644
--- a/arch/mips/txx9/generic/setup_tx4927.c
+++ b/arch/mips/txx9/generic/setup_tx4927.c
@@ -29,8 +29,8 @@ static void __init tx4927_wdr_init(void)
29{ 29{
30 /* report watchdog reset status */ 30 /* report watchdog reset status */
31 if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDRST) 31 if (____raw_readq(&tx4927_ccfgptr->ccfg) & TX4927_CCFG_WDRST)
32 pr_warning("Watchdog reset detected at 0x%lx\n", 32 pr_warn("Watchdog reset detected at 0x%lx\n",
33 read_c0_errorepc()); 33 read_c0_errorepc());
34 /* clear WatchDogReset (W1C) */ 34 /* clear WatchDogReset (W1C) */
35 tx4927_ccfg_set(TX4927_CCFG_WDRST); 35 tx4927_ccfg_set(TX4927_CCFG_WDRST);
36 /* do reset on watchdog */ 36 /* do reset on watchdog */
diff --git a/arch/mips/txx9/generic/setup_tx4938.c b/arch/mips/txx9/generic/setup_tx4938.c
index 0a3bf2dfaba1..58cdb2aba5e1 100644
--- a/arch/mips/txx9/generic/setup_tx4938.c
+++ b/arch/mips/txx9/generic/setup_tx4938.c
@@ -31,8 +31,8 @@ static void __init tx4938_wdr_init(void)
31{ 31{
32 /* report watchdog reset status */ 32 /* report watchdog reset status */
33 if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDRST) 33 if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDRST)
34 pr_warning("Watchdog reset detected at 0x%lx\n", 34 pr_warn("Watchdog reset detected at 0x%lx\n",
35 read_c0_errorepc()); 35 read_c0_errorepc());
36 /* clear WatchDogReset (W1C) */ 36 /* clear WatchDogReset (W1C) */
37 tx4938_ccfg_set(TX4938_CCFG_WDRST); 37 tx4938_ccfg_set(TX4938_CCFG_WDRST);
38 /* do reset on watchdog */ 38 /* do reset on watchdog */
diff --git a/arch/mips/txx9/generic/setup_tx4939.c b/arch/mips/txx9/generic/setup_tx4939.c
index b7eccbd17bf7..e3733cde50d6 100644
--- a/arch/mips/txx9/generic/setup_tx4939.c
+++ b/arch/mips/txx9/generic/setup_tx4939.c
@@ -35,8 +35,8 @@ static void __init tx4939_wdr_init(void)
35{ 35{
36 /* report watchdog reset status */ 36 /* report watchdog reset status */
37 if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST) 37 if (____raw_readq(&tx4939_ccfgptr->ccfg) & TX4939_CCFG_WDRST)
38 pr_warning("Watchdog reset detected at 0x%lx\n", 38 pr_warn("Watchdog reset detected at 0x%lx\n",
39 read_c0_errorepc()); 39 read_c0_errorepc());
40 /* clear WatchDogReset (W1C) */ 40 /* clear WatchDogReset (W1C) */
41 tx4939_ccfg_set(TX4939_CCFG_WDRST); 41 tx4939_ccfg_set(TX4939_CCFG_WDRST);
42 /* do reset on watchdog */ 42 /* do reset on watchdog */
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 88eace4e28c3..af696874248b 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -88,6 +88,7 @@ config PPC
88 select ARCH_MIGHT_HAVE_PC_PARPORT 88 select ARCH_MIGHT_HAVE_PC_PARPORT
89 select ARCH_MIGHT_HAVE_PC_SERIO 89 select ARCH_MIGHT_HAVE_PC_SERIO
90 select BINFMT_ELF 90 select BINFMT_ELF
91 select ARCH_BINFMT_ELF_RANDOMIZE_PIE
91 select OF 92 select OF
92 select OF_EARLY_FLATTREE 93 select OF_EARLY_FLATTREE
93 select OF_RESERVED_MEM 94 select OF_RESERVED_MEM
@@ -148,6 +149,8 @@ config PPC
148 select HAVE_ARCH_AUDITSYSCALL 149 select HAVE_ARCH_AUDITSYSCALL
149 select ARCH_SUPPORTS_ATOMIC_RMW 150 select ARCH_SUPPORTS_ATOMIC_RMW
150 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN 151 select DCACHE_WORD_ACCESS if PPC64 && CPU_LITTLE_ENDIAN
152 select NO_BOOTMEM
153 select HAVE_GENERIC_RCU_GUP
151 154
152config GENERIC_CSUM 155config GENERIC_CSUM
153 def_bool CPU_LITTLE_ENDIAN 156 def_bool CPU_LITTLE_ENDIAN
@@ -549,7 +552,7 @@ config PPC_4K_PAGES
549 bool "4k page size" 552 bool "4k page size"
550 553
551config PPC_16K_PAGES 554config PPC_16K_PAGES
552 bool "16k page size" if 44x 555 bool "16k page size" if 44x || PPC_8xx
553 556
554config PPC_64K_PAGES 557config PPC_64K_PAGES
555 bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64 558 bool "64k page size" if 44x || PPC_STD_MMU_64 || PPC_BOOK3E_64
diff --git a/arch/powerpc/boot/dts/b4860emu.dts b/arch/powerpc/boot/dts/b4860emu.dts
index 85646b4f96e1..2aa5cd318ce8 100644
--- a/arch/powerpc/boot/dts/b4860emu.dts
+++ b/arch/powerpc/boot/dts/b4860emu.dts
@@ -193,9 +193,9 @@
193 fsl,liodn-bits = <12>; 193 fsl,liodn-bits = <12>;
194 }; 194 };
195 195
196 clockgen: global-utilities@e1000 { 196/include/ "fsl/qoriq-clockgen2.dtsi"
197 global-utilities@e1000 {
197 compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0"; 198 compatible = "fsl,b4-clockgen", "fsl,qoriq-clockgen-2.0";
198 reg = <0xe1000 0x1000>;
199 }; 199 };
200 200
201/include/ "fsl/qoriq-dma-0.dtsi" 201/include/ "fsl/qoriq-dma-0.dtsi"
diff --git a/arch/powerpc/boot/dts/b4qds.dtsi b/arch/powerpc/boot/dts/b4qds.dtsi
index 8b47edcfabf0..e5bde0b85135 100644
--- a/arch/powerpc/boot/dts/b4qds.dtsi
+++ b/arch/powerpc/boot/dts/b4qds.dtsi
@@ -152,6 +152,29 @@
152 reg = <0x68>; 152 reg = <0x68>;
153 }; 153 };
154 }; 154 };
155
156 i2c@2 {
157 #address-cells = <1>;
158 #size-cells = <0>;
159 reg = <0x2>;
160
161 ina220@40 {
162 compatible = "ti,ina220";
163 reg = <0x40>;
164 shunt-resistor = <1000>;
165 };
166 };
167
168 i2c@3 {
169 #address-cells = <1>;
170 #size-cells = <0>;
171 reg = <0x3>;
172
173 adt7461@4c {
174 compatible = "adi,adt7461";
175 reg = <0x4c>;
176 };
177 };
155 }; 178 };
156 }; 179 };
157 180
diff --git a/arch/powerpc/boot/dts/bsc9131rdb.dtsi b/arch/powerpc/boot/dts/bsc9131rdb.dtsi
index 9e6c01339ccc..45efcbadb23c 100644
--- a/arch/powerpc/boot/dts/bsc9131rdb.dtsi
+++ b/arch/powerpc/boot/dts/bsc9131rdb.dtsi
@@ -40,31 +40,6 @@
40 compatible = "fsl,ifc-nand"; 40 compatible = "fsl,ifc-nand";
41 reg = <0x0 0x0 0x4000>; 41 reg = <0x0 0x0 0x4000>;
42 42
43 partition@0 {
44 /* This location must not be altered */
45 /* 3MB for u-boot Bootloader Image */
46 reg = <0x0 0x00300000>;
47 label = "NAND U-Boot Image";
48 read-only;
49 };
50
51 partition@300000 {
52 /* 1MB for DTB Image */
53 reg = <0x00300000 0x00100000>;
54 label = "NAND DTB Image";
55 };
56
57 partition@400000 {
58 /* 8MB for Linux Kernel Image */
59 reg = <0x00400000 0x00800000>;
60 label = "NAND Linux Kernel Image";
61 };
62
63 partition@c00000 {
64 /* Rest space for Root file System Image */
65 reg = <0x00c00000 0x07400000>;
66 label = "NAND RFS Image";
67 };
68 }; 43 };
69}; 44};
70 45
@@ -82,31 +57,6 @@
82 reg = <0>; 57 reg = <0>;
83 spi-max-frequency = <50000000>; 58 spi-max-frequency = <50000000>;
84 59
85 /* 512KB for u-boot Bootloader Image */
86 partition@0 {
87 reg = <0x0 0x00080000>;
88 label = "SPI Flash U-Boot Image";
89 read-only;
90 };
91
92 /* 512KB for DTB Image */
93 partition@80000 {
94 reg = <0x00080000 0x00080000>;
95 label = "SPI Flash DTB Image";
96 };
97
98 /* 4MB for Linux Kernel Image */
99 partition@100000 {
100 reg = <0x00100000 0x00400000>;
101 label = "SPI Flash Kernel Image";
102 };
103
104 /*11MB for RFS Image */
105 partition@500000 {
106 reg = <0x00500000 0x00B00000>;
107 label = "SPI Flash RFS Image";
108 };
109
110 }; 60 };
111 }; 61 };
112 62
diff --git a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
index d67894459ac8..86161ae6c966 100644
--- a/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4420si-post.dtsi
@@ -80,33 +80,9 @@
80 compatible = "fsl,b4420-device-config", "fsl,qoriq-device-config-2.0"; 80 compatible = "fsl,b4420-device-config", "fsl,qoriq-device-config-2.0";
81 }; 81 };
82 82
83 clockgen: global-utilities@e1000 { 83/include/ "qoriq-clockgen2.dtsi"
84 global-utilities@e1000 {
84 compatible = "fsl,b4420-clockgen", "fsl,qoriq-clockgen-2.0"; 85 compatible = "fsl,b4420-clockgen", "fsl,qoriq-clockgen-2.0";
85 ranges = <0x0 0xe1000 0x1000>;
86 #address-cells = <1>;
87 #size-cells = <1>;
88
89 sysclk: sysclk {
90 #clock-cells = <0>;
91 compatible = "fsl,qoriq-sysclk-2.0";
92 clock-output-names = "sysclk";
93 };
94
95 pll0: pll0@800 {
96 #clock-cells = <1>;
97 reg = <0x800 0x4>;
98 compatible = "fsl,qoriq-core-pll-2.0";
99 clocks = <&sysclk>;
100 clock-output-names = "pll0", "pll0-div2", "pll0-div4";
101 };
102
103 pll1: pll1@820 {
104 #clock-cells = <1>;
105 reg = <0x820 0x4>;
106 compatible = "fsl,qoriq-core-pll-2.0";
107 clocks = <&sysclk>;
108 clock-output-names = "pll1", "pll1-div2", "pll1-div4";
109 };
110 86
111 mux0: mux0@0 { 87 mux0: mux0@0 {
112 #clock-cells = <0>; 88 #clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
index 582381dba1d7..65100b9636b7 100644
--- a/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/b4860si-post.dtsi
@@ -124,33 +124,9 @@
124 compatible = "fsl,b4860-device-config", "fsl,qoriq-device-config-2.0"; 124 compatible = "fsl,b4860-device-config", "fsl,qoriq-device-config-2.0";
125 }; 125 };
126 126
127 clockgen: global-utilities@e1000 { 127/include/ "qoriq-clockgen2.dtsi"
128 global-utilities@e1000 {
128 compatible = "fsl,b4860-clockgen", "fsl,qoriq-clockgen-2.0"; 129 compatible = "fsl,b4860-clockgen", "fsl,qoriq-clockgen-2.0";
129 ranges = <0x0 0xe1000 0x1000>;
130 #address-cells = <1>;
131 #size-cells = <1>;
132
133 sysclk: sysclk {
134 #clock-cells = <0>;
135 compatible = "fsl,qoriq-sysclk-2.0";
136 clock-output-names = "sysclk";
137 };
138
139 pll0: pll0@800 {
140 #clock-cells = <1>;
141 reg = <0x800 0x4>;
142 compatible = "fsl,qoriq-core-pll-2.0";
143 clocks = <&sysclk>;
144 clock-output-names = "pll0", "pll0-div2", "pll0-div4";
145 };
146
147 pll1: pll1@820 {
148 #clock-cells = <1>;
149 reg = <0x820 0x4>;
150 compatible = "fsl,qoriq-core-pll-2.0";
151 clocks = <&sysclk>;
152 clock-output-names = "pll1", "pll1-div2", "pll1-div4";
153 };
154 130
155 mux0: mux0@0 { 131 mux0: mux0@0 {
156 #clock-cells = <0>; 132 #clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
index 69ce1026c948..efd74db4f9b0 100644
--- a/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p2041si-post.dtsi
@@ -305,53 +305,9 @@
305 #sleep-cells = <2>; 305 #sleep-cells = <2>;
306 }; 306 };
307 307
308 clockgen: global-utilities@e1000 { 308/include/ "qoriq-clockgen1.dtsi"
309 global-utilities@e1000 {
309 compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0"; 310 compatible = "fsl,p2041-clockgen", "fsl,qoriq-clockgen-1.0";
310 ranges = <0x0 0xe1000 0x1000>;
311 reg = <0xe1000 0x1000>;
312 clock-frequency = <0>;
313 #address-cells = <1>;
314 #size-cells = <1>;
315
316 sysclk: sysclk {
317 #clock-cells = <0>;
318 compatible = "fsl,qoriq-sysclk-1.0";
319 clock-output-names = "sysclk";
320 };
321
322 pll0: pll0@800 {
323 #clock-cells = <1>;
324 reg = <0x800 0x4>;
325 compatible = "fsl,qoriq-core-pll-1.0";
326 clocks = <&sysclk>;
327 clock-output-names = "pll0", "pll0-div2";
328 };
329
330 pll1: pll1@820 {
331 #clock-cells = <1>;
332 reg = <0x820 0x4>;
333 compatible = "fsl,qoriq-core-pll-1.0";
334 clocks = <&sysclk>;
335 clock-output-names = "pll1", "pll1-div2";
336 };
337
338 mux0: mux0@0 {
339 #clock-cells = <0>;
340 reg = <0x0 0x4>;
341 compatible = "fsl,qoriq-core-mux-1.0";
342 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
343 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
344 clock-output-names = "cmux0";
345 };
346
347 mux1: mux1@20 {
348 #clock-cells = <0>;
349 reg = <0x20 0x4>;
350 compatible = "fsl,qoriq-core-mux-1.0";
351 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
352 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
353 clock-output-names = "cmux1";
354 };
355 311
356 mux2: mux2@40 { 312 mux2: mux2@40 {
357 #clock-cells = <0>; 313 #clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
index cd63cb1b1042..d7425ef1ae41 100644
--- a/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p3041si-post.dtsi
@@ -332,53 +332,9 @@
332 #sleep-cells = <2>; 332 #sleep-cells = <2>;
333 }; 333 };
334 334
335 clockgen: global-utilities@e1000 { 335/include/ "qoriq-clockgen1.dtsi"
336 global-utilities@e1000 {
336 compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0"; 337 compatible = "fsl,p3041-clockgen", "fsl,qoriq-clockgen-1.0";
337 ranges = <0x0 0xe1000 0x1000>;
338 reg = <0xe1000 0x1000>;
339 clock-frequency = <0>;
340 #address-cells = <1>;
341 #size-cells = <1>;
342
343 sysclk: sysclk {
344 #clock-cells = <0>;
345 compatible = "fsl,qoriq-sysclk-1.0";
346 clock-output-names = "sysclk";
347 };
348
349 pll0: pll0@800 {
350 #clock-cells = <1>;
351 reg = <0x800 0x4>;
352 compatible = "fsl,qoriq-core-pll-1.0";
353 clocks = <&sysclk>;
354 clock-output-names = "pll0", "pll0-div2";
355 };
356
357 pll1: pll1@820 {
358 #clock-cells = <1>;
359 reg = <0x820 0x4>;
360 compatible = "fsl,qoriq-core-pll-1.0";
361 clocks = <&sysclk>;
362 clock-output-names = "pll1", "pll1-div2";
363 };
364
365 mux0: mux0@0 {
366 #clock-cells = <0>;
367 reg = <0x0 0x4>;
368 compatible = "fsl,qoriq-core-mux-1.0";
369 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
370 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
371 clock-output-names = "cmux0";
372 };
373
374 mux1: mux1@20 {
375 #clock-cells = <0>;
376 reg = <0x20 0x4>;
377 compatible = "fsl,qoriq-core-mux-1.0";
378 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
379 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
380 clock-output-names = "cmux1";
381 };
382 338
383 mux2: mux2@40 { 339 mux2: mux2@40 {
384 #clock-cells = <0>; 340 #clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
index 12947ccddf25..7005a4a4cef0 100644
--- a/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p4080si-post.dtsi
@@ -352,35 +352,9 @@
352 #sleep-cells = <2>; 352 #sleep-cells = <2>;
353 }; 353 };
354 354
355 clockgen: global-utilities@e1000 { 355/include/ "qoriq-clockgen1.dtsi"
356 global-utilities@e1000 {
356 compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0"; 357 compatible = "fsl,p4080-clockgen", "fsl,qoriq-clockgen-1.0";
357 ranges = <0x0 0xe1000 0x1000>;
358 reg = <0xe1000 0x1000>;
359 clock-frequency = <0>;
360 #address-cells = <1>;
361 #size-cells = <1>;
362
363 sysclk: sysclk {
364 #clock-cells = <0>;
365 compatible = "fsl,qoriq-sysclk-1.0";
366 clock-output-names = "sysclk";
367 };
368
369 pll0: pll0@800 {
370 #clock-cells = <1>;
371 reg = <0x800 0x4>;
372 compatible = "fsl,qoriq-core-pll-1.0";
373 clocks = <&sysclk>;
374 clock-output-names = "pll0", "pll0-div2";
375 };
376
377 pll1: pll1@820 {
378 #clock-cells = <1>;
379 reg = <0x820 0x4>;
380 compatible = "fsl,qoriq-core-pll-1.0";
381 clocks = <&sysclk>;
382 clock-output-names = "pll1", "pll1-div2";
383 };
384 358
385 pll2: pll2@840 { 359 pll2: pll2@840 {
386 #clock-cells = <1>; 360 #clock-cells = <1>;
@@ -398,24 +372,6 @@
398 clock-output-names = "pll3", "pll3-div2"; 372 clock-output-names = "pll3", "pll3-div2";
399 }; 373 };
400 374
401 mux0: mux0@0 {
402 #clock-cells = <0>;
403 reg = <0x0 0x4>;
404 compatible = "fsl,qoriq-core-mux-1.0";
405 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
406 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
407 clock-output-names = "cmux0";
408 };
409
410 mux1: mux1@20 {
411 #clock-cells = <0>;
412 reg = <0x20 0x4>;
413 compatible = "fsl,qoriq-core-mux-1.0";
414 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
415 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
416 clock-output-names = "cmux1";
417 };
418
419 mux2: mux2@40 { 375 mux2: mux2@40 {
420 #clock-cells = <0>; 376 #clock-cells = <0>;
421 reg = <0x40 0x4>; 377 reg = <0x40 0x4>;
diff --git a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
index 4c4a2b0436b2..55834211bd28 100644
--- a/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5020si-post.dtsi
@@ -337,53 +337,9 @@
337 #sleep-cells = <2>; 337 #sleep-cells = <2>;
338 }; 338 };
339 339
340 clockgen: global-utilities@e1000 { 340/include/ "qoriq-clockgen1.dtsi"
341 global-utilities@e1000 {
341 compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; 342 compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0";
342 ranges = <0x0 0xe1000 0x1000>;
343 reg = <0xe1000 0x1000>;
344 clock-frequency = <0>;
345 #address-cells = <1>;
346 #size-cells = <1>;
347
348 sysclk: sysclk {
349 #clock-cells = <0>;
350 compatible = "fsl,qoriq-sysclk-1.0";
351 clock-output-names = "sysclk";
352 };
353
354 pll0: pll0@800 {
355 #clock-cells = <1>;
356 reg = <0x800 0x4>;
357 compatible = "fsl,qoriq-core-pll-1.0";
358 clocks = <&sysclk>;
359 clock-output-names = "pll0", "pll0-div2";
360 };
361
362 pll1: pll1@820 {
363 #clock-cells = <1>;
364 reg = <0x820 0x4>;
365 compatible = "fsl,qoriq-core-pll-1.0";
366 clocks = <&sysclk>;
367 clock-output-names = "pll1", "pll1-div2";
368 };
369
370 mux0: mux0@0 {
371 #clock-cells = <0>;
372 reg = <0x0 0x4>;
373 compatible = "fsl,qoriq-core-mux-1.0";
374 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
375 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
376 clock-output-names = "cmux0";
377 };
378
379 mux1: mux1@20 {
380 #clock-cells = <0>;
381 reg = <0x20 0x4>;
382 compatible = "fsl,qoriq-core-mux-1.0";
383 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
384 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
385 clock-output-names = "cmux1";
386 };
387 }; 343 };
388 344
389 rcpm: global-utilities@e2000 { 345 rcpm: global-utilities@e2000 {
diff --git a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
index 67296fdd9698..6e4cd6ce363c 100644
--- a/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/p5040si-post.dtsi
@@ -297,53 +297,9 @@
297 #sleep-cells = <2>; 297 #sleep-cells = <2>;
298 }; 298 };
299 299
300 clockgen: global-utilities@e1000 { 300/include/ "qoriq-clockgen1.dtsi"
301 global-utilities@e1000 {
301 compatible = "fsl,p5040-clockgen", "fsl,qoriq-clockgen-1.0"; 302 compatible = "fsl,p5040-clockgen", "fsl,qoriq-clockgen-1.0";
302 ranges = <0x0 0xe1000 0x1000>;
303 reg = <0xe1000 0x1000>;
304 clock-frequency = <0>;
305 #address-cells = <1>;
306 #size-cells = <1>;
307
308 sysclk: sysclk {
309 #clock-cells = <0>;
310 compatible = "fsl,qoriq-sysclk-1.0";
311 clock-output-names = "sysclk";
312 };
313
314 pll0: pll0@800 {
315 #clock-cells = <1>;
316 reg = <0x800 0x4>;
317 compatible = "fsl,qoriq-core-pll-1.0";
318 clocks = <&sysclk>;
319 clock-output-names = "pll0", "pll0-div2";
320 };
321
322 pll1: pll1@820 {
323 #clock-cells = <1>;
324 reg = <0x820 0x4>;
325 compatible = "fsl,qoriq-core-pll-1.0";
326 clocks = <&sysclk>;
327 clock-output-names = "pll1", "pll1-div2";
328 };
329
330 mux0: mux0@0 {
331 #clock-cells = <0>;
332 reg = <0x0 0x4>;
333 compatible = "fsl,qoriq-core-mux-1.0";
334 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
335 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
336 clock-output-names = "cmux0";
337 };
338
339 mux1: mux1@20 {
340 #clock-cells = <0>;
341 reg = <0x20 0x4>;
342 compatible = "fsl,qoriq-core-mux-1.0";
343 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
344 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
345 clock-output-names = "cmux1";
346 };
347 303
348 mux2: mux2@40 { 304 mux2: mux2@40 {
349 #clock-cells = <0>; 305 #clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi
new file mode 100644
index 000000000000..4ece1edbff63
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen1.dtsi
@@ -0,0 +1,85 @@
1/*
2 * QorIQ clock control device tree stub [ controller @ offset 0xe1000 ]
3 *
4 * Copyright 2014 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35global-utilities@e1000 {
36 compatible = "fsl,qoriq-clockgen-1.0";
37 ranges = <0x0 0xe1000 0x1000>;
38 reg = <0xe1000 0x1000>;
39 clock-frequency = <0>;
40 #address-cells = <1>;
41 #size-cells = <1>;
42
43 sysclk: sysclk {
44 #clock-cells = <0>;
45 compatible = "fsl,qoriq-sysclk-1.0", "fixed-clock";
46 clock-output-names = "sysclk";
47 };
48 pll0: pll0@800 {
49 #clock-cells = <1>;
50 reg = <0x800 0x4>;
51 compatible = "fsl,qoriq-core-pll-1.0";
52 clocks = <&sysclk>;
53 clock-output-names = "pll0", "pll0-div2";
54 };
55 pll1: pll1@820 {
56 #clock-cells = <1>;
57 reg = <0x820 0x4>;
58 compatible = "fsl,qoriq-core-pll-1.0";
59 clocks = <&sysclk>;
60 clock-output-names = "pll1", "pll1-div2";
61 };
62 mux0: mux0@0 {
63 #clock-cells = <0>;
64 reg = <0x0 0x4>;
65 compatible = "fsl,qoriq-core-mux-1.0";
66 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
67 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
68 clock-output-names = "cmux0";
69 };
70 mux1: mux1@20 {
71 #clock-cells = <0>;
72 reg = <0x20 0x4>;
73 compatible = "fsl,qoriq-core-mux-1.0";
74 clocks = <&pll0 0>, <&pll0 1>, <&pll1 0>, <&pll1 1>;
75 clock-names = "pll0", "pll0-div2", "pll1", "pll1-div2";
76 clock-output-names = "cmux1";
77 };
78 platform_pll: platform-pll@c00 {
79 #clock-cells = <1>;
80 reg = <0xc00 0x4>;
81 compatible = "fsl,qoriq-platform-pll-1.0";
82 clocks = <&sysclk>;
83 clock-output-names = "platform-pll", "platform-pll-div2";
84 };
85};
diff --git a/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi
new file mode 100644
index 000000000000..48e0b6e4ce33
--- /dev/null
+++ b/arch/powerpc/boot/dts/fsl/qoriq-clockgen2.dtsi
@@ -0,0 +1,68 @@
1/*
2 * QorIQ clock control device tree stub [ controller @ offset 0xe1000 ]
3 *
4 * Copyright 2014 Freescale Semiconductor Inc.
5 *
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * * Neither the name of Freescale Semiconductor nor the
14 * names of its contributors may be used to endorse or promote products
15 * derived from this software without specific prior written permission.
16 *
17 *
18 * ALTERNATIVELY, this software may be distributed under the terms of the
19 * GNU General Public License ("GPL") as published by the Free Software
20 * Foundation, either version 2 of that License or (at your option) any
21 * later version.
22 *
23 * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY
24 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
25 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
26 * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY
27 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
28 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
29 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
30 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
32 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 */
34
35global-utilities@e1000 {
36 compatible = "fsl,qoriq-clockgen-2.0";
37 ranges = <0x0 0xe1000 0x1000>;
38 reg = <0xe1000 0x1000>;
39 #address-cells = <1>;
40 #size-cells = <1>;
41
42 sysclk: sysclk {
43 #clock-cells = <0>;
44 compatible = "fsl,qoriq-sysclk-2.0", "fixed-clock";
45 clock-output-names = "sysclk";
46 };
47 pll0: pll0@800 {
48 #clock-cells = <1>;
49 reg = <0x800 0x4>;
50 compatible = "fsl,qoriq-core-pll-2.0";
51 clocks = <&sysclk>;
52 clock-output-names = "pll0", "pll0-div2", "pll0-div4";
53 };
54 pll1: pll1@820 {
55 #clock-cells = <1>;
56 reg = <0x820 0x4>;
57 compatible = "fsl,qoriq-core-pll-2.0";
58 clocks = <&sysclk>;
59 clock-output-names = "pll1", "pll1-div2", "pll1-div4";
60 };
61 platform_pll: platform-pll@c00 {
62 #clock-cells = <1>;
63 reg = <0xc00 0x4>;
64 compatible = "fsl,qoriq-platform-pll-2.0";
65 clocks = <&sysclk>;
66 clock-output-names = "platform-pll", "platform-pll-div2";
67 };
68};
diff --git a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
index 12e597eea3c8..15ae462e758f 100644
--- a/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t1040si-post.dtsi
@@ -281,35 +281,9 @@
281 fsl,liodn-bits = <12>; 281 fsl,liodn-bits = <12>;
282 }; 282 };
283 283
284 clockgen: global-utilities@e1000 { 284/include/ "qoriq-clockgen2.dtsi"
285 global-utilities@e1000 {
285 compatible = "fsl,t1040-clockgen", "fsl,qoriq-clockgen-2.0"; 286 compatible = "fsl,t1040-clockgen", "fsl,qoriq-clockgen-2.0";
286 ranges = <0x0 0xe1000 0x1000>;
287 reg = <0xe1000 0x1000>;
288 #address-cells = <1>;
289 #size-cells = <1>;
290
291 sysclk: sysclk {
292 #clock-cells = <0>;
293 compatible = "fsl,qoriq-sysclk-2.0";
294 clock-output-names = "sysclk", "fixed-clock";
295 };
296
297
298 pll0: pll0@800 {
299 #clock-cells = <1>;
300 reg = <0x800 4>;
301 compatible = "fsl,qoriq-core-pll-2.0";
302 clocks = <&sysclk>;
303 clock-output-names = "pll0", "pll0-div2", "pll0-div4";
304 };
305
306 pll1: pll1@820 {
307 #clock-cells = <1>;
308 reg = <0x820 4>;
309 compatible = "fsl,qoriq-core-pll-2.0";
310 clocks = <&sysclk>;
311 clock-output-names = "pll1", "pll1-div2", "pll1-div4";
312 };
313 287
314 mux0: mux0@0 { 288 mux0: mux0@0 {
315 #clock-cells = <0>; 289 #clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
index aecee9690a88..1ce91e3485a9 100644
--- a/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t2081si-post.dtsi
@@ -305,34 +305,9 @@
305 fsl,liodn-bits = <12>; 305 fsl,liodn-bits = <12>;
306 }; 306 };
307 307
308 clockgen: global-utilities@e1000 { 308/include/ "qoriq-clockgen2.dtsi"
309 global-utilities@e1000 {
309 compatible = "fsl,t2080-clockgen", "fsl,qoriq-clockgen-2.0"; 310 compatible = "fsl,t2080-clockgen", "fsl,qoriq-clockgen-2.0";
310 ranges = <0x0 0xe1000 0x1000>;
311 reg = <0xe1000 0x1000>;
312 #address-cells = <1>;
313 #size-cells = <1>;
314
315 sysclk: sysclk {
316 #clock-cells = <0>;
317 compatible = "fsl,qoriq-sysclk-2.0";
318 clock-output-names = "sysclk", "fixed-clock";
319 };
320
321 pll0: pll0@800 {
322 #clock-cells = <1>;
323 reg = <0x800 4>;
324 compatible = "fsl,qoriq-core-pll-2.0";
325 clocks = <&sysclk>;
326 clock-output-names = "pll0", "pll0-div2", "pll0-div4";
327 };
328
329 pll1: pll1@820 {
330 #clock-cells = <1>;
331 reg = <0x820 4>;
332 compatible = "fsl,qoriq-core-pll-2.0";
333 clocks = <&sysclk>;
334 clock-output-names = "pll1", "pll1-div2", "pll1-div4";
335 };
336 311
337 mux0: mux0@0 { 312 mux0: mux0@0 {
338 #clock-cells = <0>; 313 #clock-cells = <0>;
diff --git a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
index 7e2fc7cdce48..0e96fcabe812 100644
--- a/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
+++ b/arch/powerpc/boot/dts/fsl/t4240si-post.dtsi
@@ -368,34 +368,9 @@
368 fsl,liodn-bits = <12>; 368 fsl,liodn-bits = <12>;
369 }; 369 };
370 370
371 clockgen: global-utilities@e1000 { 371/include/ "qoriq-clockgen2.dtsi"
372 global-utilities@e1000 {
372 compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0"; 373 compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0";
373 ranges = <0x0 0xe1000 0x1000>;
374 reg = <0xe1000 0x1000>;
375 #address-cells = <1>;
376 #size-cells = <1>;
377
378 sysclk: sysclk {
379 #clock-cells = <0>;
380 compatible = "fsl,qoriq-sysclk-2.0";
381 clock-output-names = "sysclk";
382 };
383
384 pll0: pll0@800 {
385 #clock-cells = <1>;
386 reg = <0x800 0x4>;
387 compatible = "fsl,qoriq-core-pll-2.0";
388 clocks = <&sysclk>;
389 clock-output-names = "pll0", "pll0-div2", "pll0-div4";
390 };
391
392 pll1: pll1@820 {
393 #clock-cells = <1>;
394 reg = <0x820 0x4>;
395 compatible = "fsl,qoriq-core-pll-2.0";
396 clocks = <&sysclk>;
397 clock-output-names = "pll1", "pll1-div2", "pll1-div4";
398 };
399 374
400 pll2: pll2@840 { 375 pll2: pll2@840 {
401 #clock-cells = <1>; 376 #clock-cells = <1>;
diff --git a/arch/powerpc/boot/dts/p3041ds.dts b/arch/powerpc/boot/dts/p3041ds.dts
index 2fed3bc0b990..394ea9c943c9 100644
--- a/arch/powerpc/boot/dts/p3041ds.dts
+++ b/arch/powerpc/boot/dts/p3041ds.dts
@@ -98,6 +98,26 @@
98 reg = <0x68>; 98 reg = <0x68>;
99 interrupts = <0x1 0x1 0 0>; 99 interrupts = <0x1 0x1 0 0>;
100 }; 100 };
101 ina220@40 {
102 compatible = "ti,ina220";
103 reg = <0x40>;
104 shunt-resistor = <1000>;
105 };
106 ina220@41 {
107 compatible = "ti,ina220";
108 reg = <0x41>;
109 shunt-resistor = <1000>;
110 };
111 ina220@44 {
112 compatible = "ti,ina220";
113 reg = <0x44>;
114 shunt-resistor = <1000>;
115 };
116 ina220@45 {
117 compatible = "ti,ina220";
118 reg = <0x45>;
119 shunt-resistor = <1000>;
120 };
101 adt7461@4c { 121 adt7461@4c {
102 compatible = "adi,adt7461"; 122 compatible = "adi,adt7461";
103 reg = <0x4c>; 123 reg = <0x4c>;
diff --git a/arch/powerpc/boot/dts/p5020ds.dts b/arch/powerpc/boot/dts/p5020ds.dts
index 2869fea717dd..b7f3057cd894 100644
--- a/arch/powerpc/boot/dts/p5020ds.dts
+++ b/arch/powerpc/boot/dts/p5020ds.dts
@@ -98,6 +98,26 @@
98 reg = <0x68>; 98 reg = <0x68>;
99 interrupts = <0x1 0x1 0 0>; 99 interrupts = <0x1 0x1 0 0>;
100 }; 100 };
101 ina220@40 {
102 compatible = "ti,ina220";
103 reg = <0x40>;
104 shunt-resistor = <1000>;
105 };
106 ina220@41 {
107 compatible = "ti,ina220";
108 reg = <0x41>;
109 shunt-resistor = <1000>;
110 };
111 ina220@44 {
112 compatible = "ti,ina220";
113 reg = <0x44>;
114 shunt-resistor = <1000>;
115 };
116 ina220@45 {
117 compatible = "ti,ina220";
118 reg = <0x45>;
119 shunt-resistor = <1000>;
120 };
101 adt7461@4c { 121 adt7461@4c {
102 compatible = "adi,adt7461"; 122 compatible = "adi,adt7461";
103 reg = <0x4c>; 123 reg = <0x4c>;
diff --git a/arch/powerpc/boot/dts/p5040ds.dts b/arch/powerpc/boot/dts/p5040ds.dts
index 860b5ccf76c0..7e04bf487c04 100644
--- a/arch/powerpc/boot/dts/p5040ds.dts
+++ b/arch/powerpc/boot/dts/p5040ds.dts
@@ -95,6 +95,26 @@
95 reg = <0x68>; 95 reg = <0x68>;
96 interrupts = <0x1 0x1 0 0>; 96 interrupts = <0x1 0x1 0 0>;
97 }; 97 };
98 ina220@40 {
99 compatible = "ti,ina220";
100 reg = <0x40>;
101 shunt-resistor = <1000>;
102 };
103 ina220@41 {
104 compatible = "ti,ina220";
105 reg = <0x41>;
106 shunt-resistor = <1000>;
107 };
108 ina220@44 {
109 compatible = "ti,ina220";
110 reg = <0x44>;
111 shunt-resistor = <1000>;
112 };
113 ina220@45 {
114 compatible = "ti,ina220";
115 reg = <0x45>;
116 shunt-resistor = <1000>;
117 };
98 adt7461@4c { 118 adt7461@4c {
99 compatible = "adi,adt7461"; 119 compatible = "adi,adt7461";
100 reg = <0x4c>; 120 reg = <0x4c>;
diff --git a/arch/powerpc/boot/dts/t104xrdb.dtsi b/arch/powerpc/boot/dts/t104xrdb.dtsi
index 1cf0f3c5f7e5..187add885cae 100644
--- a/arch/powerpc/boot/dts/t104xrdb.dtsi
+++ b/arch/powerpc/boot/dts/t104xrdb.dtsi
@@ -83,6 +83,13 @@
83 }; 83 };
84 }; 84 };
85 85
86 i2c@118000 {
87 adt7461@4c {
88 compatible = "adi,adt7461";
89 reg = <0x4c>;
90 };
91 };
92
86 i2c@118100 { 93 i2c@118100 {
87 pca9546@77 { 94 pca9546@77 {
88 compatible = "nxp,pca9546"; 95 compatible = "nxp,pca9546";
diff --git a/arch/powerpc/boot/dts/t208xqds.dtsi b/arch/powerpc/boot/dts/t208xqds.dtsi
index 555dc6e03d89..59061834d54e 100644
--- a/arch/powerpc/boot/dts/t208xqds.dtsi
+++ b/arch/powerpc/boot/dts/t208xqds.dtsi
@@ -169,6 +169,17 @@
169 shunt-resistor = <1000>; 169 shunt-resistor = <1000>;
170 }; 170 };
171 }; 171 };
172
173 i2c@3 {
174 #address-cells = <1>;
175 #size-cells = <0>;
176 reg = <0x3>;
177
178 adt7461@4c {
179 compatible = "adi,adt7461";
180 reg = <0x4c>;
181 };
182 };
172 }; 183 };
173 }; 184 };
174 185
diff --git a/arch/powerpc/boot/dts/t4240emu.dts b/arch/powerpc/boot/dts/t4240emu.dts
index bc12127a03fb..decaf357db9c 100644
--- a/arch/powerpc/boot/dts/t4240emu.dts
+++ b/arch/powerpc/boot/dts/t4240emu.dts
@@ -250,9 +250,9 @@
250 fsl,liodn-bits = <12>; 250 fsl,liodn-bits = <12>;
251 }; 251 };
252 252
253 clockgen: global-utilities@e1000 { 253/include/ "fsl/qoriq-clockgen2.dtsi"
254 global-utilities@e1000 {
254 compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0"; 255 compatible = "fsl,t4240-clockgen", "fsl,qoriq-clockgen-2.0";
255 reg = <0xe1000 0x1000>;
256 }; 256 };
257 257
258/include/ "fsl/qoriq-dma-0.dtsi" 258/include/ "fsl/qoriq-dma-0.dtsi"
diff --git a/arch/powerpc/boot/main.c b/arch/powerpc/boot/main.c
index d367a0aece2a..d80161b633f4 100644
--- a/arch/powerpc/boot/main.c
+++ b/arch/powerpc/boot/main.c
@@ -144,13 +144,24 @@ static char cmdline[BOOT_COMMAND_LINE_SIZE]
144 144
145static void prep_cmdline(void *chosen) 145static void prep_cmdline(void *chosen)
146{ 146{
147 unsigned int getline_timeout = 5000;
148 int v;
149 int n;
150
151 /* Wait-for-input time */
152 n = getprop(chosen, "linux,cmdline-timeout", &v, sizeof(v));
153 if (n == sizeof(v))
154 getline_timeout = v;
155
147 if (cmdline[0] == '\0') 156 if (cmdline[0] == '\0')
148 getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1); 157 getprop(chosen, "bootargs", cmdline, BOOT_COMMAND_LINE_SIZE-1);
149 158
150 printf("\n\rLinux/PowerPC load: %s", cmdline); 159 printf("\n\rLinux/PowerPC load: %s", cmdline);
160
151 /* If possible, edit the command line */ 161 /* If possible, edit the command line */
152 if (console_ops.edit_cmdline) 162 if (console_ops.edit_cmdline && getline_timeout)
153 console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE); 163 console_ops.edit_cmdline(cmdline, BOOT_COMMAND_LINE_SIZE, getline_timeout);
164
154 printf("\n\r"); 165 printf("\n\r");
155 166
156 /* Put the command line back into the devtree for the kernel */ 167 /* Put the command line back into the devtree for the kernel */
diff --git a/arch/powerpc/boot/ops.h b/arch/powerpc/boot/ops.h
index 8aad3c55aeda..5e75e1c5518e 100644
--- a/arch/powerpc/boot/ops.h
+++ b/arch/powerpc/boot/ops.h
@@ -58,7 +58,7 @@ extern struct dt_ops dt_ops;
58struct console_ops { 58struct console_ops {
59 int (*open)(void); 59 int (*open)(void);
60 void (*write)(const char *buf, int len); 60 void (*write)(const char *buf, int len);
61 void (*edit_cmdline)(char *buf, int len); 61 void (*edit_cmdline)(char *buf, int len, unsigned int getline_timeout);
62 void (*close)(void); 62 void (*close)(void);
63 void *data; 63 void *data;
64}; 64};
diff --git a/arch/powerpc/boot/serial.c b/arch/powerpc/boot/serial.c
index f2156f07571f..167ee9433de6 100644
--- a/arch/powerpc/boot/serial.c
+++ b/arch/powerpc/boot/serial.c
@@ -33,7 +33,7 @@ static void serial_write(const char *buf, int len)
33 scdp->putc(*buf++); 33 scdp->putc(*buf++);
34} 34}
35 35
36static void serial_edit_cmdline(char *buf, int len) 36static void serial_edit_cmdline(char *buf, int len, unsigned int timeout)
37{ 37{
38 int timer = 0, count; 38 int timer = 0, count;
39 char ch, *cp; 39 char ch, *cp;
@@ -44,7 +44,7 @@ static void serial_edit_cmdline(char *buf, int len)
44 cp = &buf[count]; 44 cp = &buf[count];
45 count++; 45 count++;
46 46
47 while (timer++ < 5*1000) { 47 do {
48 if (scdp->tstc()) { 48 if (scdp->tstc()) {
49 while (((ch = scdp->getc()) != '\n') && (ch != '\r')) { 49 while (((ch = scdp->getc()) != '\n') && (ch != '\r')) {
50 /* Test for backspace/delete */ 50 /* Test for backspace/delete */
@@ -70,7 +70,7 @@ static void serial_edit_cmdline(char *buf, int len)
70 break; /* Exit 'timer' loop */ 70 break; /* Exit 'timer' loop */
71 } 71 }
72 udelay(1000); /* 1 msec */ 72 udelay(1000); /* 1 msec */
73 } 73 } while (timer++ < timeout);
74 *cp = 0; 74 *cp = 0;
75} 75}
76 76
diff --git a/arch/powerpc/configs/corenet32_smp_defconfig b/arch/powerpc/configs/corenet32_smp_defconfig
index 688e9e4d29a1..611efe99faeb 100644
--- a/arch/powerpc/configs/corenet32_smp_defconfig
+++ b/arch/powerpc/configs/corenet32_smp_defconfig
@@ -144,6 +144,7 @@ CONFIG_RTC_DRV_DS1374=y
144CONFIG_RTC_DRV_DS3232=y 144CONFIG_RTC_DRV_DS3232=y
145CONFIG_UIO=y 145CONFIG_UIO=y
146CONFIG_STAGING=y 146CONFIG_STAGING=y
147CONFIG_MEMORY=y
147CONFIG_VIRT_DRIVERS=y 148CONFIG_VIRT_DRIVERS=y
148CONFIG_FSL_HV_MANAGER=y 149CONFIG_FSL_HV_MANAGER=y
149CONFIG_EXT2_FS=y 150CONFIG_EXT2_FS=y
diff --git a/arch/powerpc/configs/corenet64_smp_defconfig b/arch/powerpc/configs/corenet64_smp_defconfig
index 6db97e4414b2..be24a18c0d96 100644
--- a/arch/powerpc/configs/corenet64_smp_defconfig
+++ b/arch/powerpc/configs/corenet64_smp_defconfig
@@ -118,6 +118,7 @@ CONFIG_FSL_DMA=y
118CONFIG_VIRT_DRIVERS=y 118CONFIG_VIRT_DRIVERS=y
119CONFIG_FSL_HV_MANAGER=y 119CONFIG_FSL_HV_MANAGER=y
120CONFIG_FSL_CORENET_CF=y 120CONFIG_FSL_CORENET_CF=y
121CONFIG_MEMORY=y
121CONFIG_EXT2_FS=y 122CONFIG_EXT2_FS=y
122CONFIG_EXT3_FS=y 123CONFIG_EXT3_FS=y
123CONFIG_ISO9660_FS=m 124CONFIG_ISO9660_FS=m
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig
index d2c415489f72..02395fab19bd 100644
--- a/arch/powerpc/configs/mpc85xx_defconfig
+++ b/arch/powerpc/configs/mpc85xx_defconfig
@@ -215,6 +215,7 @@ CONFIG_RTC_DRV_DS3232=y
215CONFIG_RTC_DRV_CMOS=y 215CONFIG_RTC_DRV_CMOS=y
216CONFIG_DMADEVICES=y 216CONFIG_DMADEVICES=y
217CONFIG_FSL_DMA=y 217CONFIG_FSL_DMA=y
218CONFIG_MEMORY=y
218# CONFIG_NET_DMA is not set 219# CONFIG_NET_DMA is not set
219CONFIG_EXT2_FS=y 220CONFIG_EXT2_FS=y
220CONFIG_EXT3_FS=y 221CONFIG_EXT3_FS=y
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig
index 87460083dbc7..b5d1b82a1b43 100644
--- a/arch/powerpc/configs/mpc85xx_smp_defconfig
+++ b/arch/powerpc/configs/mpc85xx_smp_defconfig
@@ -216,6 +216,7 @@ CONFIG_RTC_DRV_DS3232=y
216CONFIG_RTC_DRV_CMOS=y 216CONFIG_RTC_DRV_CMOS=y
217CONFIG_DMADEVICES=y 217CONFIG_DMADEVICES=y
218CONFIG_FSL_DMA=y 218CONFIG_FSL_DMA=y
219CONFIG_MEMORY=y
219# CONFIG_NET_DMA is not set 220# CONFIG_NET_DMA is not set
220CONFIG_EXT2_FS=y 221CONFIG_EXT2_FS=y
221CONFIG_EXT3_FS=y 222CONFIG_EXT3_FS=y
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h
index bd3bd573d0ae..59abc620f8e8 100644
--- a/arch/powerpc/include/asm/bitops.h
+++ b/arch/powerpc/include/asm/bitops.h
@@ -14,9 +14,9 @@
14 * 14 *
15 * The bitop functions are defined to work on unsigned longs, so for a 15 * The bitop functions are defined to work on unsigned longs, so for a
16 * ppc64 system the bits end up numbered: 16 * ppc64 system the bits end up numbered:
17 * |63..............0|127............64|191...........128|255...........196| 17 * |63..............0|127............64|191...........128|255...........192|
18 * and on ppc32: 18 * and on ppc32:
19 * |31.....0|63....31|95....64|127...96|159..128|191..160|223..192|255..224| 19 * |31.....0|63....32|95....64|127...96|159..128|191..160|223..192|255..224|
20 * 20 *
21 * There are a few little-endian macros used mostly for filesystem 21 * There are a few little-endian macros used mostly for filesystem
22 * bitmaps, these work on similar bit arrays layouts, but 22 * bitmaps, these work on similar bit arrays layouts, but
@@ -213,7 +213,7 @@ static __inline__ unsigned long ffz(unsigned long x)
213 return __ilog2(x & -x); 213 return __ilog2(x & -x);
214} 214}
215 215
216static __inline__ int __ffs(unsigned long x) 216static __inline__ unsigned long __ffs(unsigned long x)
217{ 217{
218 return __ilog2(x & -x); 218 return __ilog2(x & -x);
219} 219}
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index daa5af91163c..22d5a7da9e68 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -448,13 +448,9 @@ extern const char *powerpc_base_platform;
448 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX) 448 CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_DABRX)
449#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) 449#define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2)
450 450
451#define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \
452 CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN | \
453 CPU_FTR_ICSWX | CPU_FTR_DABRX )
454
455#ifdef __powerpc64__ 451#ifdef __powerpc64__
456#ifdef CONFIG_PPC_BOOK3E 452#ifdef CONFIG_PPC_BOOK3E
457#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500 | CPU_FTRS_A2) 453#define CPU_FTRS_POSSIBLE (CPU_FTRS_E6500 | CPU_FTRS_E5500)
458#else 454#else
459#define CPU_FTRS_POSSIBLE \ 455#define CPU_FTRS_POSSIBLE \
460 (CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \ 456 (CPU_FTRS_POWER4 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | \
@@ -505,13 +501,13 @@ enum {
505 501
506#ifdef __powerpc64__ 502#ifdef __powerpc64__
507#ifdef CONFIG_PPC_BOOK3E 503#ifdef CONFIG_PPC_BOOK3E
508#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500 & CPU_FTRS_A2) 504#define CPU_FTRS_ALWAYS (CPU_FTRS_E6500 & CPU_FTRS_E5500)
509#else 505#else
510#define CPU_FTRS_ALWAYS \ 506#define CPU_FTRS_ALWAYS \
511 (CPU_FTRS_POWER4 & CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \ 507 (CPU_FTRS_POWER4 & CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & \
512 CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \ 508 CPU_FTRS_POWER6 & CPU_FTRS_POWER7 & CPU_FTRS_CELL & \
513 CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \ 509 CPU_FTRS_PA6T & CPU_FTRS_POWER8 & CPU_FTRS_POWER8E & \
514 CPU_FTRS_POWER8_DD1 & CPU_FTRS_POSSIBLE) 510 CPU_FTRS_POWER8_DD1 & ~CPU_FTR_HVMODE & CPU_FTRS_POSSIBLE)
515#endif 511#endif
516#else 512#else
517enum { 513enum {
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index ca07f9c27335..0652ebe117af 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -39,6 +39,7 @@ struct device_node;
39#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */ 39#define EEH_PROBE_MODE_DEV 0x04 /* From PCI device */
40#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */ 40#define EEH_PROBE_MODE_DEVTREE 0x08 /* From device tree */
41#define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */ 41#define EEH_ENABLE_IO_FOR_LOG 0x10 /* Enable IO for log */
42#define EEH_EARLY_DUMP_LOG 0x20 /* Dump log immediately */
42 43
43/* 44/*
44 * Delay for PE reset, all in ms 45 * Delay for PE reset, all in ms
@@ -72,6 +73,7 @@ struct device_node;
72#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ 73#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
73#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ 74#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
74#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */ 75#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */
76#define EEH_PE_RESET (1 << 3) /* PE reset in progress */
75 77
76#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ 78#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
77#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */ 79#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
index 888d8f3f2524..57d289acb803 100644
--- a/arch/powerpc/include/asm/elf.h
+++ b/arch/powerpc/include/asm/elf.h
@@ -28,8 +28,7 @@
28 the loader. We need to make sure that it is out of the way of the program 28 the loader. We need to make sure that it is out of the way of the program
29 that it will "exec", and that there is sufficient room for the brk. */ 29 that it will "exec", and that there is sufficient room for the brk. */
30 30
31extern unsigned long randomize_et_dyn(unsigned long base); 31#define ELF_ET_DYN_BASE 0x20000000
32#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
33 32
34#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0) 33#define ELF_CORE_EFLAGS (is_elf2_task() ? 2 : 0)
35 34
diff --git a/arch/powerpc/include/asm/fsl_guts.h b/arch/powerpc/include/asm/fsl_guts.h
index 77ced0b3d81d..43b6bb1a4a9c 100644
--- a/arch/powerpc/include/asm/fsl_guts.h
+++ b/arch/powerpc/include/asm/fsl_guts.h
@@ -68,7 +68,10 @@ struct ccsr_guts {
68 u8 res0b4[0xc0 - 0xb4]; 68 u8 res0b4[0xc0 - 0xb4];
69 __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register 69 __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register
70 Called 'elbcvselcr' on 86xx SOCs */ 70 Called 'elbcvselcr' on 86xx SOCs */
71 u8 res0c4[0x224 - 0xc4]; 71 u8 res0c4[0x100 - 0xc4];
72 __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers
73 There are 16 registers */
74 u8 res140[0x224 - 0x140];
72 __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ 75 __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */
73 __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ 76 __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */
74 u8 res22c[0x604 - 0x22c]; 77 u8 res22c[0x604 - 0x22c];
diff --git a/arch/powerpc/include/asm/hardirq.h b/arch/powerpc/include/asm/hardirq.h
index 1bbb3013d6aa..8add8b861e8d 100644
--- a/arch/powerpc/include/asm/hardirq.h
+++ b/arch/powerpc/include/asm/hardirq.h
@@ -21,7 +21,12 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
21 21
22#define __ARCH_IRQ_STAT 22#define __ARCH_IRQ_STAT
23 23
24#define local_softirq_pending() __get_cpu_var(irq_stat).__softirq_pending 24#define local_softirq_pending() __this_cpu_read(irq_stat.__softirq_pending)
25
26#define __ARCH_SET_SOFTIRQ_PENDING
27
28#define set_softirq_pending(x) __this_cpu_write(irq_stat.__softirq_pending, (x))
29#define or_softirq_pending(x) __this_cpu_or(irq_stat.__softirq_pending, (x))
25 30
26static inline void ack_bad_irq(unsigned int irq) 31static inline void ack_bad_irq(unsigned int irq)
27{ 32{
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 766b77d527ac..1d53a65b4ec1 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -48,7 +48,7 @@ static inline unsigned int hugepd_shift(hugepd_t hpd)
48#endif /* CONFIG_PPC_BOOK3S_64 */ 48#endif /* CONFIG_PPC_BOOK3S_64 */
49 49
50 50
51static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, 51static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
52 unsigned pdshift) 52 unsigned pdshift)
53{ 53{
54 /* 54 /*
@@ -58,9 +58,9 @@ static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr,
58 */ 58 */
59 unsigned long idx = 0; 59 unsigned long idx = 0;
60 60
61 pte_t *dir = hugepd_page(*hpdp); 61 pte_t *dir = hugepd_page(hpd);
62#ifndef CONFIG_PPC_FSL_BOOK3E 62#ifndef CONFIG_PPC_FSL_BOOK3E
63 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(*hpdp); 63 idx = (addr & ((1UL << pdshift) - 1)) >> hugepd_shift(hpd);
64#endif 64#endif
65 65
66 return dir + idx; 66 return dir + idx;
@@ -193,7 +193,7 @@ static inline void flush_hugetlb_page(struct vm_area_struct *vma,
193} 193}
194 194
195#define hugepd_shift(x) 0 195#define hugepd_shift(x) 0
196static inline pte_t *hugepte_offset(hugepd_t *hpdp, unsigned long addr, 196static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr,
197 unsigned pdshift) 197 unsigned pdshift)
198{ 198{
199 return 0; 199 return 0;
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h
index 9eaf301ac952..a8d2ef30d473 100644
--- a/arch/powerpc/include/asm/io.h
+++ b/arch/powerpc/include/asm/io.h
@@ -855,9 +855,6 @@ static inline void * bus_to_virt(unsigned long address)
855 855
856#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set) 856#define clrsetbits_8(addr, clear, set) clrsetbits(8, addr, clear, set)
857 857
858void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
859 size_t size, unsigned long flags);
860
861#endif /* __KERNEL__ */ 858#endif /* __KERNEL__ */
862 859
863#endif /* _ASM_POWERPC_IO_H */ 860#endif /* _ASM_POWERPC_IO_H */
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h
index 307347f8ddbd..c8175a3fe560 100644
--- a/arch/powerpc/include/asm/machdep.h
+++ b/arch/powerpc/include/asm/machdep.h
@@ -42,7 +42,7 @@ struct machdep_calls {
42 unsigned long newpp, 42 unsigned long newpp,
43 unsigned long vpn, 43 unsigned long vpn,
44 int bpsize, int apsize, 44 int bpsize, int apsize,
45 int ssize, int local); 45 int ssize, unsigned long flags);
46 void (*hpte_updateboltedpp)(unsigned long newpp, 46 void (*hpte_updateboltedpp)(unsigned long newpp,
47 unsigned long ea, 47 unsigned long ea,
48 int psize, int ssize); 48 int psize, int ssize);
@@ -60,7 +60,7 @@ struct machdep_calls {
60 void (*hugepage_invalidate)(unsigned long vsid, 60 void (*hugepage_invalidate)(unsigned long vsid,
61 unsigned long addr, 61 unsigned long addr,
62 unsigned char *hpte_slot_array, 62 unsigned char *hpte_slot_array,
63 int psize, int ssize); 63 int psize, int ssize, int local);
64 /* special for kexec, to be called in real mode, linear mapping is 64 /* special for kexec, to be called in real mode, linear mapping is
65 * destroyed as well */ 65 * destroyed as well */
66 void (*hpte_clear_all)(void); 66 void (*hpte_clear_all)(void);
@@ -142,7 +142,6 @@ struct machdep_calls {
142#endif 142#endif
143 143
144 void (*restart)(char *cmd); 144 void (*restart)(char *cmd);
145 void (*power_off)(void);
146 void (*halt)(void); 145 void (*halt)(void);
147 void (*panic)(char *str); 146 void (*panic)(char *str);
148 void (*cpu_die)(void); 147 void (*cpu_die)(void);
@@ -292,10 +291,6 @@ struct machdep_calls {
292#ifdef CONFIG_ARCH_RANDOM 291#ifdef CONFIG_ARCH_RANDOM
293 int (*get_random_long)(unsigned long *v); 292 int (*get_random_long)(unsigned long *v);
294#endif 293#endif
295
296#ifdef CONFIG_MEMORY_HOTREMOVE
297 int (*remove_memory)(u64, u64);
298#endif
299}; 294};
300 295
301extern void e500_idle(void); 296extern void e500_idle(void);
@@ -343,16 +338,6 @@ extern sys_ctrler_t sys_ctrler;
343 338
344#endif /* CONFIG_PPC_PMAC */ 339#endif /* CONFIG_PPC_PMAC */
345 340
346
347/* Functions to produce codes on the leds.
348 * The SRC code should be unique for the message category and should
349 * be limited to the lower 24 bits (the upper 8 are set by these funcs),
350 * and (for boot & dump) should be sorted numerically in the order
351 * the events occur.
352 */
353/* Print a boot progress message. */
354void ppc64_boot_msg(unsigned int src, const char *msg);
355
356static inline void log_error(char *buf, unsigned int err_type, int fatal) 341static inline void log_error(char *buf, unsigned int err_type, int fatal)
357{ 342{
358 if (ppc_md.log_error) 343 if (ppc_md.log_error)
diff --git a/arch/powerpc/include/asm/mmu-8xx.h b/arch/powerpc/include/asm/mmu-8xx.h
index 3d11d3ce79ec..986b9e1e1044 100644
--- a/arch/powerpc/include/asm/mmu-8xx.h
+++ b/arch/powerpc/include/asm/mmu-8xx.h
@@ -56,6 +56,7 @@
56 * additional information from the MI_EPN, and MI_TWC registers. 56 * additional information from the MI_EPN, and MI_TWC registers.
57 */ 57 */
58#define SPRN_MI_RPN 790 58#define SPRN_MI_RPN 790
59#define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
59 60
60/* Define an RPN value for mapping kernel memory to large virtual 61/* Define an RPN value for mapping kernel memory to large virtual
61 * pages for boot initialization. This has real page number of 0, 62 * pages for boot initialization. This has real page number of 0,
@@ -129,6 +130,7 @@
129 * additional information from the MD_EPN, and MD_TWC registers. 130 * additional information from the MD_EPN, and MD_TWC registers.
130 */ 131 */
131#define SPRN_MD_RPN 798 132#define SPRN_MD_RPN 798
133#define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */
132 134
133/* This is a temporary storage register that could be used to save 135/* This is a temporary storage register that could be used to save
134 * a processor working register during a tablewalk. 136 * a processor working register during a tablewalk.
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h
index aeebc94b2bce..4f13c3ed7acf 100644
--- a/arch/powerpc/include/asm/mmu-hash64.h
+++ b/arch/powerpc/include/asm/mmu-hash64.h
@@ -316,27 +316,33 @@ static inline unsigned long hpt_hash(unsigned long vpn,
316 return hash & 0x7fffffffffUL; 316 return hash & 0x7fffffffffUL;
317} 317}
318 318
319#define HPTE_LOCAL_UPDATE 0x1
320#define HPTE_NOHPTE_UPDATE 0x2
321
319extern int __hash_page_4K(unsigned long ea, unsigned long access, 322extern int __hash_page_4K(unsigned long ea, unsigned long access,
320 unsigned long vsid, pte_t *ptep, unsigned long trap, 323 unsigned long vsid, pte_t *ptep, unsigned long trap,
321 unsigned int local, int ssize, int subpage_prot); 324 unsigned long flags, int ssize, int subpage_prot);
322extern int __hash_page_64K(unsigned long ea, unsigned long access, 325extern int __hash_page_64K(unsigned long ea, unsigned long access,
323 unsigned long vsid, pte_t *ptep, unsigned long trap, 326 unsigned long vsid, pte_t *ptep, unsigned long trap,
324 unsigned int local, int ssize); 327 unsigned long flags, int ssize);
325struct mm_struct; 328struct mm_struct;
326unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap); 329unsigned int hash_page_do_lazy_icache(unsigned int pp, pte_t pte, int trap);
327extern int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap); 330extern int hash_page_mm(struct mm_struct *mm, unsigned long ea,
328extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); 331 unsigned long access, unsigned long trap,
332 unsigned long flags);
333extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
334 unsigned long dsisr);
329int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 335int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
330 pte_t *ptep, unsigned long trap, int local, int ssize, 336 pte_t *ptep, unsigned long trap, unsigned long flags,
331 unsigned int shift, unsigned int mmu_psize); 337 int ssize, unsigned int shift, unsigned int mmu_psize);
332#ifdef CONFIG_TRANSPARENT_HUGEPAGE 338#ifdef CONFIG_TRANSPARENT_HUGEPAGE
333extern int __hash_page_thp(unsigned long ea, unsigned long access, 339extern int __hash_page_thp(unsigned long ea, unsigned long access,
334 unsigned long vsid, pmd_t *pmdp, unsigned long trap, 340 unsigned long vsid, pmd_t *pmdp, unsigned long trap,
335 int local, int ssize, unsigned int psize); 341 unsigned long flags, int ssize, unsigned int psize);
336#else 342#else
337static inline int __hash_page_thp(unsigned long ea, unsigned long access, 343static inline int __hash_page_thp(unsigned long ea, unsigned long access,
338 unsigned long vsid, pmd_t *pmdp, 344 unsigned long vsid, pmd_t *pmdp,
339 unsigned long trap, int local, 345 unsigned long trap, unsigned long flags,
340 int ssize, unsigned int psize) 346 int ssize, unsigned int psize)
341{ 347{
342 BUG(); 348 BUG();
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 9124b0ede1fc..5cd8d2fddba9 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -154,6 +154,10 @@ struct opal_sg_list {
154#define OPAL_HANDLE_HMI 98 154#define OPAL_HANDLE_HMI 98
155#define OPAL_REGISTER_DUMP_REGION 101 155#define OPAL_REGISTER_DUMP_REGION 101
156#define OPAL_UNREGISTER_DUMP_REGION 102 156#define OPAL_UNREGISTER_DUMP_REGION 102
157#define OPAL_WRITE_TPO 103
158#define OPAL_READ_TPO 104
159#define OPAL_IPMI_SEND 107
160#define OPAL_IPMI_RECV 108
157 161
158#ifndef __ASSEMBLY__ 162#ifndef __ASSEMBLY__
159 163
@@ -284,62 +288,6 @@ enum OpalMessageType {
284 OPAL_MSG_TYPE_MAX, 288 OPAL_MSG_TYPE_MAX,
285}; 289};
286 290
287/* Machine check related definitions */
288enum OpalMCE_Version {
289 OpalMCE_V1 = 1,
290};
291
292enum OpalMCE_Severity {
293 OpalMCE_SEV_NO_ERROR = 0,
294 OpalMCE_SEV_WARNING = 1,
295 OpalMCE_SEV_ERROR_SYNC = 2,
296 OpalMCE_SEV_FATAL = 3,
297};
298
299enum OpalMCE_Disposition {
300 OpalMCE_DISPOSITION_RECOVERED = 0,
301 OpalMCE_DISPOSITION_NOT_RECOVERED = 1,
302};
303
304enum OpalMCE_Initiator {
305 OpalMCE_INITIATOR_UNKNOWN = 0,
306 OpalMCE_INITIATOR_CPU = 1,
307};
308
309enum OpalMCE_ErrorType {
310 OpalMCE_ERROR_TYPE_UNKNOWN = 0,
311 OpalMCE_ERROR_TYPE_UE = 1,
312 OpalMCE_ERROR_TYPE_SLB = 2,
313 OpalMCE_ERROR_TYPE_ERAT = 3,
314 OpalMCE_ERROR_TYPE_TLB = 4,
315};
316
317enum OpalMCE_UeErrorType {
318 OpalMCE_UE_ERROR_INDETERMINATE = 0,
319 OpalMCE_UE_ERROR_IFETCH = 1,
320 OpalMCE_UE_ERROR_PAGE_TABLE_WALK_IFETCH = 2,
321 OpalMCE_UE_ERROR_LOAD_STORE = 3,
322 OpalMCE_UE_ERROR_PAGE_TABLE_WALK_LOAD_STORE = 4,
323};
324
325enum OpalMCE_SlbErrorType {
326 OpalMCE_SLB_ERROR_INDETERMINATE = 0,
327 OpalMCE_SLB_ERROR_PARITY = 1,
328 OpalMCE_SLB_ERROR_MULTIHIT = 2,
329};
330
331enum OpalMCE_EratErrorType {
332 OpalMCE_ERAT_ERROR_INDETERMINATE = 0,
333 OpalMCE_ERAT_ERROR_PARITY = 1,
334 OpalMCE_ERAT_ERROR_MULTIHIT = 2,
335};
336
337enum OpalMCE_TlbErrorType {
338 OpalMCE_TLB_ERROR_INDETERMINATE = 0,
339 OpalMCE_TLB_ERROR_PARITY = 1,
340 OpalMCE_TLB_ERROR_MULTIHIT = 2,
341};
342
343enum OpalThreadStatus { 291enum OpalThreadStatus {
344 OPAL_THREAD_INACTIVE = 0x0, 292 OPAL_THREAD_INACTIVE = 0x0,
345 OPAL_THREAD_STARTED = 0x1, 293 OPAL_THREAD_STARTED = 0x1,
@@ -452,52 +400,15 @@ struct opal_msg {
452 __be64 params[8]; 400 __be64 params[8];
453}; 401};
454 402
455struct opal_machine_check_event { 403enum {
456 enum OpalMCE_Version version:8; /* 0x00 */ 404 OPAL_IPMI_MSG_FORMAT_VERSION_1 = 1,
457 uint8_t in_use; /* 0x01 */ 405};
458 enum OpalMCE_Severity severity:8; /* 0x02 */
459 enum OpalMCE_Initiator initiator:8; /* 0x03 */
460 enum OpalMCE_ErrorType error_type:8; /* 0x04 */
461 enum OpalMCE_Disposition disposition:8; /* 0x05 */
462 uint8_t reserved_1[2]; /* 0x06 */
463 uint64_t gpr3; /* 0x08 */
464 uint64_t srr0; /* 0x10 */
465 uint64_t srr1; /* 0x18 */
466 union { /* 0x20 */
467 struct {
468 enum OpalMCE_UeErrorType ue_error_type:8;
469 uint8_t effective_address_provided;
470 uint8_t physical_address_provided;
471 uint8_t reserved_1[5];
472 uint64_t effective_address;
473 uint64_t physical_address;
474 uint8_t reserved_2[8];
475 } ue_error;
476
477 struct {
478 enum OpalMCE_SlbErrorType slb_error_type:8;
479 uint8_t effective_address_provided;
480 uint8_t reserved_1[6];
481 uint64_t effective_address;
482 uint8_t reserved_2[16];
483 } slb_error;
484
485 struct {
486 enum OpalMCE_EratErrorType erat_error_type:8;
487 uint8_t effective_address_provided;
488 uint8_t reserved_1[6];
489 uint64_t effective_address;
490 uint8_t reserved_2[16];
491 } erat_error;
492 406
493 struct { 407struct opal_ipmi_msg {
494 enum OpalMCE_TlbErrorType tlb_error_type:8; 408 uint8_t version;
495 uint8_t effective_address_provided; 409 uint8_t netfn;
496 uint8_t reserved_1[6]; 410 uint8_t cmd;
497 uint64_t effective_address; 411 uint8_t data[];
498 uint8_t reserved_2[16];
499 } tlb_error;
500 } u;
501}; 412};
502 413
503/* FSP memory errors handling */ 414/* FSP memory errors handling */
@@ -819,6 +730,9 @@ int64_t opal_rtc_read(__be32 *year_month_day,
819 __be64 *hour_minute_second_millisecond); 730 __be64 *hour_minute_second_millisecond);
820int64_t opal_rtc_write(uint32_t year_month_day, 731int64_t opal_rtc_write(uint32_t year_month_day,
821 uint64_t hour_minute_second_millisecond); 732 uint64_t hour_minute_second_millisecond);
733int64_t opal_tpo_read(uint64_t token, __be32 *year_mon_day, __be32 *hour_min);
734int64_t opal_tpo_write(uint64_t token, uint32_t year_mon_day,
735 uint32_t hour_min);
822int64_t opal_cec_power_down(uint64_t request); 736int64_t opal_cec_power_down(uint64_t request);
823int64_t opal_cec_reboot(void); 737int64_t opal_cec_reboot(void);
824int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset); 738int64_t opal_read_nvram(uint64_t buffer, uint64_t size, uint64_t offset);
@@ -963,6 +877,10 @@ int64_t opal_handle_hmi(void);
963int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end); 877int64_t opal_register_dump_region(uint32_t id, uint64_t start, uint64_t end);
964int64_t opal_unregister_dump_region(uint32_t id); 878int64_t opal_unregister_dump_region(uint32_t id);
965int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number); 879int64_t opal_pci_set_phb_cxl_mode(uint64_t phb_id, uint64_t mode, uint64_t pe_number);
880int64_t opal_ipmi_send(uint64_t interface, struct opal_ipmi_msg *msg,
881 uint64_t msg_len);
882int64_t opal_ipmi_recv(uint64_t interface, struct opal_ipmi_msg *msg,
883 uint64_t *msg_len);
966 884
967/* Internal functions */ 885/* Internal functions */
968extern int early_init_dt_scan_opal(unsigned long node, const char *uname, 886extern int early_init_dt_scan_opal(unsigned long node, const char *uname,
@@ -992,8 +910,6 @@ extern int opal_async_wait_response(uint64_t token, struct opal_msg *msg);
992extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data); 910extern int opal_get_sensor_data(u32 sensor_hndl, u32 *sensor_data);
993 911
994struct rtc_time; 912struct rtc_time;
995extern int opal_set_rtc_time(struct rtc_time *tm);
996extern void opal_get_rtc_time(struct rtc_time *tm);
997extern unsigned long opal_get_boot_time(void); 913extern unsigned long opal_get_boot_time(void);
998extern void opal_nvram_init(void); 914extern void opal_nvram_init(void);
999extern void opal_flash_init(void); 915extern void opal_flash_init(void);
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h
index a5139ea6910b..24a386cbb928 100644
--- a/arch/powerpc/include/asm/paca.h
+++ b/arch/powerpc/include/asm/paca.h
@@ -42,7 +42,6 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
42#define get_slb_shadow() (get_paca()->slb_shadow_ptr) 42#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
43 43
44struct task_struct; 44struct task_struct;
45struct opal_machine_check_event;
46 45
47/* 46/*
48 * Defines the layout of the paca. 47 * Defines the layout of the paca.
@@ -153,12 +152,6 @@ struct paca_struct {
153 u64 tm_scratch; /* TM scratch area for reclaim */ 152 u64 tm_scratch; /* TM scratch area for reclaim */
154#endif 153#endif
155 154
156#ifdef CONFIG_PPC_POWERNV
157 /* Pointer to OPAL machine check event structure set by the
158 * early exception handler for use by high level C handler
159 */
160 struct opal_machine_check_event *opal_mc_evt;
161#endif
162#ifdef CONFIG_PPC_BOOK3S_64 155#ifdef CONFIG_PPC_BOOK3S_64
163 /* Exclusive emergency stack pointer for machine check exception. */ 156 /* Exclusive emergency stack pointer for machine check exception. */
164 void *mc_emergency_sp; 157 void *mc_emergency_sp;
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 26fe1ae15212..69c059887a2c 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -379,12 +379,14 @@ static inline int hugepd_ok(hugepd_t hpd)
379} 379}
380#endif 380#endif
381 381
382#define is_hugepd(pdep) (hugepd_ok(*((hugepd_t *)(pdep)))) 382#define is_hugepd(hpd) (hugepd_ok(hpd))
383#define pgd_huge pgd_huge
383int pgd_huge(pgd_t pgd); 384int pgd_huge(pgd_t pgd);
384#else /* CONFIG_HUGETLB_PAGE */ 385#else /* CONFIG_HUGETLB_PAGE */
385#define is_hugepd(pdep) 0 386#define is_hugepd(pdep) 0
386#define pgd_huge(pgd) 0 387#define pgd_huge(pgd) 0
387#endif /* CONFIG_HUGETLB_PAGE */ 388#endif /* CONFIG_HUGETLB_PAGE */
389#define __hugepd(x) ((hugepd_t) { (x) })
388 390
389struct page; 391struct page;
390extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg); 392extern void clear_user_page(void *page, unsigned long vaddr, struct page *pg);
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h
index 945e47adf7db..234e07c47803 100644
--- a/arch/powerpc/include/asm/pgtable-ppc32.h
+++ b/arch/powerpc/include/asm/pgtable-ppc32.h
@@ -170,6 +170,25 @@ static inline unsigned long pte_update(pte_t *p,
170#ifdef PTE_ATOMIC_UPDATES 170#ifdef PTE_ATOMIC_UPDATES
171 unsigned long old, tmp; 171 unsigned long old, tmp;
172 172
173#ifdef CONFIG_PPC_8xx
174 unsigned long tmp2;
175
176 __asm__ __volatile__("\
1771: lwarx %0,0,%4\n\
178 andc %1,%0,%5\n\
179 or %1,%1,%6\n\
180 /* 0x200 == Extended encoding, bit 22 */ \
181 /* Bit 22 has to be 1 if neither _PAGE_USER nor _PAGE_RW are set */ \
182 rlwimi %1,%1,32-2,0x200\n /* get _PAGE_USER */ \
183 rlwinm %3,%1,32-1,0x200\n /* get _PAGE_RW */ \
184 or %1,%3,%1\n\
185 xori %1,%1,0x200\n"
186" stwcx. %1,0,%4\n\
187 bne- 1b"
188 : "=&r" (old), "=&r" (tmp), "=m" (*p), "=&r" (tmp2)
189 : "r" (p), "r" (clr), "r" (set), "m" (*p)
190 : "cc" );
191#else /* CONFIG_PPC_8xx */
173 __asm__ __volatile__("\ 192 __asm__ __volatile__("\
1741: lwarx %0,0,%3\n\ 1931: lwarx %0,0,%3\n\
175 andc %1,%0,%4\n\ 194 andc %1,%0,%4\n\
@@ -180,6 +199,7 @@ static inline unsigned long pte_update(pte_t *p,
180 : "=&r" (old), "=&r" (tmp), "=m" (*p) 199 : "=&r" (old), "=&r" (tmp), "=m" (*p)
181 : "r" (p), "r" (clr), "r" (set), "m" (*p) 200 : "r" (p), "r" (clr), "r" (set), "m" (*p)
182 : "cc" ); 201 : "cc" );
202#endif /* CONFIG_PPC_8xx */
183#else /* PTE_ATOMIC_UPDATES */ 203#else /* PTE_ATOMIC_UPDATES */
184 unsigned long old = pte_val(*p); 204 unsigned long old = pte_val(*p);
185 *p = __pte((old & ~clr) | set); 205 *p = __pte((old & ~clr) | set);
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-4k.h b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
index 7b935683f268..132ee1d482c2 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-4k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-4k.h
@@ -57,7 +57,21 @@
57#define pgd_present(pgd) (pgd_val(pgd) != 0) 57#define pgd_present(pgd) (pgd_val(pgd) != 0)
58#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0) 58#define pgd_clear(pgdp) (pgd_val(*(pgdp)) = 0)
59#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS) 59#define pgd_page_vaddr(pgd) (pgd_val(pgd) & ~PGD_MASKED_BITS)
60#define pgd_page(pgd) virt_to_page(pgd_page_vaddr(pgd)) 60
61#ifndef __ASSEMBLY__
62
63static inline pte_t pgd_pte(pgd_t pgd)
64{
65 return __pte(pgd_val(pgd));
66}
67
68static inline pgd_t pte_pgd(pte_t pte)
69{
70 return __pgd(pte_val(pte));
71}
72extern struct page *pgd_page(pgd_t pgd);
73
74#endif /* !__ASSEMBLY__ */
61 75
62#define pud_offset(pgdp, addr) \ 76#define pud_offset(pgdp, addr) \
63 (((pud_t *) pgd_page_vaddr(*(pgdp))) + \ 77 (((pud_t *) pgd_page_vaddr(*(pgdp))) + \
diff --git a/arch/powerpc/include/asm/pgtable-ppc64-64k.h b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
index a56b82fb0609..1de35bbd02a6 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64-64k.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64-64k.h
@@ -38,4 +38,7 @@
38/* Bits to mask out from a PGD/PUD to get to the PMD page */ 38/* Bits to mask out from a PGD/PUD to get to the PMD page */
39#define PUD_MASKED_BITS 0x1ff 39#define PUD_MASKED_BITS 0x1ff
40 40
41#define pgd_pte(pgd) (pud_pte(((pud_t){ pgd })))
42#define pte_pgd(pte) ((pgd_t)pte_pud(pte))
43
41#endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */ 44#endif /* _ASM_POWERPC_PGTABLE_PPC64_64K_H */
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h
index 9b4b1904efae..b9dcc936e2d1 100644
--- a/arch/powerpc/include/asm/pgtable-ppc64.h
+++ b/arch/powerpc/include/asm/pgtable-ppc64.h
@@ -152,7 +152,7 @@
152#define pmd_none(pmd) (!pmd_val(pmd)) 152#define pmd_none(pmd) (!pmd_val(pmd))
153#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ 153#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \
154 || (pmd_val(pmd) & PMD_BAD_BITS)) 154 || (pmd_val(pmd) & PMD_BAD_BITS))
155#define pmd_present(pmd) (pmd_val(pmd) != 0) 155#define pmd_present(pmd) (!pmd_none(pmd))
156#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0) 156#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
157#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) 157#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS)
158extern struct page *pmd_page(pmd_t pmd); 158extern struct page *pmd_page(pmd_t pmd);
@@ -164,9 +164,21 @@ extern struct page *pmd_page(pmd_t pmd);
164#define pud_present(pud) (pud_val(pud) != 0) 164#define pud_present(pud) (pud_val(pud) != 0)
165#define pud_clear(pudp) (pud_val(*(pudp)) = 0) 165#define pud_clear(pudp) (pud_val(*(pudp)) = 0)
166#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS) 166#define pud_page_vaddr(pud) (pud_val(pud) & ~PUD_MASKED_BITS)
167#define pud_page(pud) virt_to_page(pud_page_vaddr(pud))
168 167
168extern struct page *pud_page(pud_t pud);
169
170static inline pte_t pud_pte(pud_t pud)
171{
172 return __pte(pud_val(pud));
173}
174
175static inline pud_t pte_pud(pte_t pte)
176{
177 return __pud(pte_val(pte));
178}
179#define pud_write(pud) pte_write(pud_pte(pud))
169#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);}) 180#define pgd_set(pgdp, pudp) ({pgd_val(*(pgdp)) = (unsigned long)(pudp);})
181#define pgd_write(pgd) pte_write(pgd_pte(pgd))
170 182
171/* 183/*
172 * Find an entry in a page-table-directory. We combine the address region 184 * Find an entry in a page-table-directory. We combine the address region
@@ -422,7 +434,22 @@ extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
422 pmd_t *pmdp, pmd_t pmd); 434 pmd_t *pmdp, pmd_t pmd);
423extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr, 435extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
424 pmd_t *pmd); 436 pmd_t *pmd);
425 437/*
438 *
439 * For core kernel code by design pmd_trans_huge is never run on any hugetlbfs
440 * page. The hugetlbfs page table walking and mangling paths are totally
441 * separated form the core VM paths and they're differentiated by
442 * VM_HUGETLB being set on vm_flags well before any pmd_trans_huge could run.
443 *
444 * pmd_trans_huge() is defined as false at build time if
445 * CONFIG_TRANSPARENT_HUGEPAGE=n to optimize away code blocks at build
446 * time in such case.
447 *
448 * For ppc64 we need to differntiate from explicit hugepages from THP, because
449 * for THP we also track the subpage details at the pmd level. We don't do
450 * that for explicit huge pages.
451 *
452 */
426static inline int pmd_trans_huge(pmd_t pmd) 453static inline int pmd_trans_huge(pmd_t pmd)
427{ 454{
428 /* 455 /*
@@ -431,16 +458,6 @@ static inline int pmd_trans_huge(pmd_t pmd)
431 return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE); 458 return (pmd_val(pmd) & 0x3) && (pmd_val(pmd) & _PAGE_THP_HUGE);
432} 459}
433 460
434static inline int pmd_large(pmd_t pmd)
435{
436 /*
437 * leaf pte for huge page, bottom two bits != 00
438 */
439 if (pmd_trans_huge(pmd))
440 return pmd_val(pmd) & _PAGE_PRESENT;
441 return 0;
442}
443
444static inline int pmd_trans_splitting(pmd_t pmd) 461static inline int pmd_trans_splitting(pmd_t pmd)
445{ 462{
446 if (pmd_trans_huge(pmd)) 463 if (pmd_trans_huge(pmd))
@@ -451,6 +468,14 @@ static inline int pmd_trans_splitting(pmd_t pmd)
451extern int has_transparent_hugepage(void); 468extern int has_transparent_hugepage(void);
452#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 469#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
453 470
471static inline int pmd_large(pmd_t pmd)
472{
473 /*
474 * leaf pte for huge page, bottom two bits != 00
475 */
476 return ((pmd_val(pmd) & 0x3) != 0x0);
477}
478
454static inline pte_t pmd_pte(pmd_t pmd) 479static inline pte_t pmd_pte(pmd_t pmd)
455{ 480{
456 return __pte(pmd_val(pmd)); 481 return __pte(pmd_val(pmd));
@@ -576,6 +601,5 @@ static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
576 */ 601 */
577 return true; 602 return true;
578} 603}
579
580#endif /* __ASSEMBLY__ */ 604#endif /* __ASSEMBLY__ */
581#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */ 605#endif /* _ASM_POWERPC_PGTABLE_PPC64_H_ */
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index 316f9a5da173..a8805fee0df9 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -274,11 +274,9 @@ extern void paging_init(void);
274 */ 274 */
275extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *); 275extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
276 276
277extern int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, unsigned long addr,
278 unsigned long end, int write, struct page **pages, int *nr);
279
280extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr, 277extern int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
281 unsigned long end, int write, struct page **pages, int *nr); 278 unsigned long end, int write,
279 struct page **pages, int *nr);
282#ifndef CONFIG_TRANSPARENT_HUGEPAGE 280#ifndef CONFIG_TRANSPARENT_HUGEPAGE
283#define pmd_large(pmd) 0 281#define pmd_large(pmd) 0
284#define has_transparent_hugepage() 0 282#define has_transparent_hugepage() 0
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index dda7ac4c80bd..29c3798cf800 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -451,7 +451,7 @@ extern unsigned long cpuidle_disable;
451enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF}; 451enum idle_boot_override {IDLE_NO_OVERRIDE = 0, IDLE_POWERSAVE_OFF};
452 452
453extern int powersave_nap; /* set if nap mode can be used in idle loop */ 453extern int powersave_nap; /* set if nap mode can be used in idle loop */
454extern void power7_nap(int check_irq); 454extern unsigned long power7_nap(int check_irq);
455extern void power7_sleep(void); 455extern void power7_sleep(void);
456extern void flush_instruction_cache(void); 456extern void flush_instruction_cache(void);
457extern void hard_reset_now(void); 457extern void hard_reset_now(void);
diff --git a/arch/powerpc/include/asm/pte-8xx.h b/arch/powerpc/include/asm/pte-8xx.h
index d44826e4ff97..daa4616e61c4 100644
--- a/arch/powerpc/include/asm/pte-8xx.h
+++ b/arch/powerpc/include/asm/pte-8xx.h
@@ -48,19 +48,22 @@
48 */ 48 */
49#define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */ 49#define _PAGE_RW 0x0400 /* lsb PP bits, inverted in HW */
50#define _PAGE_USER 0x0800 /* msb PP bits */ 50#define _PAGE_USER 0x0800 /* msb PP bits */
51/* set when neither _PAGE_USER nor _PAGE_RW are set */
52#define _PAGE_KNLRO 0x0200
51 53
52#define _PMD_PRESENT 0x0001 54#define _PMD_PRESENT 0x0001
53#define _PMD_BAD 0x0ff0 55#define _PMD_BAD 0x0ff0
54#define _PMD_PAGE_MASK 0x000c 56#define _PMD_PAGE_MASK 0x000c
55#define _PMD_PAGE_8M 0x000c 57#define _PMD_PAGE_8M 0x000c
56 58
57#define _PTE_NONE_MASK _PAGE_ACCESSED 59#define _PTE_NONE_MASK _PAGE_KNLRO
58 60
59/* Until my rework is finished, 8xx still needs atomic PTE updates */ 61/* Until my rework is finished, 8xx still needs atomic PTE updates */
60#define PTE_ATOMIC_UPDATES 1 62#define PTE_ATOMIC_UPDATES 1
61 63
62/* We need to add _PAGE_SHARED to kernel pages */ 64/* We need to add _PAGE_SHARED to kernel pages */
63#define _PAGE_KERNEL_RO (_PAGE_SHARED) 65#define _PAGE_KERNEL_RO (_PAGE_SHARED | _PAGE_KNLRO)
66#define _PAGE_KERNEL_ROX (_PAGE_EXEC | _PAGE_KNLRO)
64#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE) 67#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW | _PAGE_HWWRITE)
65 68
66#endif /* __KERNEL__ */ 69#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/setup.h b/arch/powerpc/include/asm/setup.h
index 11ba86e17631..fbdf18cf954c 100644
--- a/arch/powerpc/include/asm/setup.h
+++ b/arch/powerpc/include/asm/setup.h
@@ -8,7 +8,6 @@ extern void ppc_printk_progress(char *s, unsigned short hex);
8 8
9extern unsigned int rtas_data; 9extern unsigned int rtas_data;
10extern int mem_init_done; /* set on boot once kmalloc can be called */ 10extern int mem_init_done; /* set on boot once kmalloc can be called */
11extern int init_bootmem_done; /* set once bootmem is available */
12extern unsigned long long memory_limit; 11extern unsigned long long memory_limit;
13extern unsigned long klimit; 12extern unsigned long klimit;
14extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); 13extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask);
@@ -24,7 +23,7 @@ extern void reloc_got2(unsigned long);
24#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) 23#define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x)))
25 24
26void check_for_initrd(void); 25void check_for_initrd(void);
27void do_init_bootmem(void); 26void initmem_init(void);
28void setup_panic(void); 27void setup_panic(void);
29#define ARCH_PANIC_TIMEOUT 180 28#define ARCH_PANIC_TIMEOUT 180
30 29
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index b034ecdb7c74..ebc4f165690a 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -71,13 +71,12 @@ struct thread_info {
71#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT) 71#define THREAD_SIZE_ORDER (THREAD_SHIFT - PAGE_SHIFT)
72 72
73/* how to get the thread information struct from C */ 73/* how to get the thread information struct from C */
74register unsigned long __current_r1 asm("r1");
74static inline struct thread_info *current_thread_info(void) 75static inline struct thread_info *current_thread_info(void)
75{ 76{
76 register unsigned long sp asm("r1");
77
78 /* gcc4, at least, is smart enough to turn this into a single 77 /* gcc4, at least, is smart enough to turn this into a single
79 * rlwinm for ppc32 and clrrdi for ppc64 */ 78 * rlwinm for ppc32 and clrrdi for ppc64 */
80 return (struct thread_info *)(sp & ~(THREAD_SIZE-1)); 79 return (struct thread_info *)(__current_r1 & ~(THREAD_SIZE-1));
81} 80}
82 81
83#endif /* __ASSEMBLY__ */ 82#endif /* __ASSEMBLY__ */
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h
index 2def01ed0cb2..23d351ca0303 100644
--- a/arch/powerpc/include/asm/tlbflush.h
+++ b/arch/powerpc/include/asm/tlbflush.h
@@ -107,14 +107,14 @@ extern void __flush_tlb_pending(struct ppc64_tlb_batch *batch);
107 107
108static inline void arch_enter_lazy_mmu_mode(void) 108static inline void arch_enter_lazy_mmu_mode(void)
109{ 109{
110 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 110 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
111 111
112 batch->active = 1; 112 batch->active = 1;
113} 113}
114 114
115static inline void arch_leave_lazy_mmu_mode(void) 115static inline void arch_leave_lazy_mmu_mode(void)
116{ 116{
117 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 117 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
118 118
119 if (batch->index) 119 if (batch->index)
120 __flush_tlb_pending(batch); 120 __flush_tlb_pending(batch);
@@ -125,9 +125,11 @@ static inline void arch_leave_lazy_mmu_mode(void)
125 125
126 126
127extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, 127extern void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize,
128 int ssize, int local); 128 int ssize, unsigned long flags);
129extern void flush_hash_range(unsigned long number, int local); 129extern void flush_hash_range(unsigned long number, int local);
130 130extern void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
131 pmd_t *pmdp, unsigned int psize, int ssize,
132 unsigned long flags);
131 133
132static inline void local_flush_tlb_mm(struct mm_struct *mm) 134static inline void local_flush_tlb_mm(struct mm_struct *mm)
133{ 135{
diff --git a/arch/powerpc/include/asm/vga.h b/arch/powerpc/include/asm/vga.h
index a2eac409c1ec..e5f8dd366212 100644
--- a/arch/powerpc/include/asm/vga.h
+++ b/arch/powerpc/include/asm/vga.h
@@ -38,12 +38,10 @@ static inline u16 scr_readw(volatile const u16 *addr)
38 38
39#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */ 39#endif /* !CONFIG_VGA_CONSOLE && !CONFIG_MDA_CONSOLE */
40 40
41extern unsigned long vgacon_remap_base;
42
43#ifdef __powerpc64__ 41#ifdef __powerpc64__
44#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap((x), s)) 42#define VGA_MAP_MEM(x,s) ((unsigned long) ioremap((x), s))
45#else 43#else
46#define VGA_MAP_MEM(x,s) (x + vgacon_remap_base) 44#define VGA_MAP_MEM(x,s) (x)
47#endif 45#endif
48 46
49#define vga_readb(x) (*(x)) 47#define vga_readb(x) (*(x))
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 0d050ea37a04..6997f4a271df 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -98,7 +98,7 @@ DECLARE_PER_CPU(struct xics_cppr, xics_cppr);
98 98
99static inline void xics_push_cppr(unsigned int vec) 99static inline void xics_push_cppr(unsigned int vec)
100{ 100{
101 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 101 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
102 102
103 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) 103 if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1))
104 return; 104 return;
@@ -111,7 +111,7 @@ static inline void xics_push_cppr(unsigned int vec)
111 111
112static inline unsigned char xics_pop_cppr(void) 112static inline unsigned char xics_pop_cppr(void)
113{ 113{
114 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 114 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
115 115
116 if (WARN_ON(os_cppr->index < 1)) 116 if (WARN_ON(os_cppr->index < 1))
117 return LOWEST_PRIORITY; 117 return LOWEST_PRIORITY;
@@ -121,7 +121,7 @@ static inline unsigned char xics_pop_cppr(void)
121 121
122static inline void xics_set_base_cppr(unsigned char cppr) 122static inline void xics_set_base_cppr(unsigned char cppr)
123{ 123{
124 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 124 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
125 125
126 /* we only really want to set the priority when there's 126 /* we only really want to set the priority when there's
127 * just one cppr value on the stack 127 * just one cppr value on the stack
@@ -133,7 +133,7 @@ static inline void xics_set_base_cppr(unsigned char cppr)
133 133
134static inline unsigned char xics_cppr_top(void) 134static inline unsigned char xics_cppr_top(void)
135{ 135{
136 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 136 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
137 137
138 return os_cppr->stack[os_cppr->index]; 138 return os_cppr->stack[os_cppr->index];
139} 139}
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c
index 34f55524d456..86150fbb42c3 100644
--- a/arch/powerpc/kernel/align.c
+++ b/arch/powerpc/kernel/align.c
@@ -908,7 +908,7 @@ int fix_alignment(struct pt_regs *regs)
908 flush_fp_to_thread(current); 908 flush_fp_to_thread(current);
909 } 909 }
910 910
911 if ((nb == 16)) { 911 if (nb == 16) {
912 if (flags & F) { 912 if (flags & F) {
913 /* Special case for 16-byte FP loads and stores */ 913 /* Special case for 16-byte FP loads and stores */
914 PPC_WARN_ALIGNMENT(fp_pair, regs); 914 PPC_WARN_ALIGNMENT(fp_pair, regs);
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 9d7dede2847c..c161ef3f28a1 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -726,12 +726,5 @@ int main(void)
726 arch.timing_last_enter.tv32.tbl)); 726 arch.timing_last_enter.tv32.tbl));
727#endif 727#endif
728 728
729#ifdef CONFIG_PPC_POWERNV
730 DEFINE(OPAL_MC_GPR3, offsetof(struct opal_machine_check_event, gpr3));
731 DEFINE(OPAL_MC_SRR0, offsetof(struct opal_machine_check_event, srr0));
732 DEFINE(OPAL_MC_SRR1, offsetof(struct opal_machine_check_event, srr1));
733 DEFINE(PACA_OPAL_MC_EVT, offsetof(struct paca_struct, opal_mc_evt));
734#endif
735
736 return 0; 729 return 0;
737} 730}
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c
index c78e6dac4d7d..cfa0f81a5bb0 100644
--- a/arch/powerpc/kernel/crash_dump.c
+++ b/arch/powerpc/kernel/crash_dump.c
@@ -12,7 +12,6 @@
12#undef DEBUG 12#undef DEBUG
13 13
14#include <linux/crash_dump.h> 14#include <linux/crash_dump.h>
15#include <linux/bootmem.h>
16#include <linux/io.h> 15#include <linux/io.h>
17#include <linux/memblock.h> 16#include <linux/memblock.h>
18#include <asm/code-patching.h> 17#include <asm/code-patching.h>
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c
index d55c76c571f3..f4217819cc31 100644
--- a/arch/powerpc/kernel/dbell.c
+++ b/arch/powerpc/kernel/dbell.c
@@ -41,7 +41,7 @@ void doorbell_exception(struct pt_regs *regs)
41 41
42 may_hard_irq_enable(); 42 may_hard_irq_enable();
43 43
44 __get_cpu_var(irq_stat).doorbell_irqs++; 44 __this_cpu_inc(irq_stat.doorbell_irqs);
45 45
46 smp_ipi_demux(); 46 smp_ipi_demux();
47 47
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 2248a1999c64..e1b6d8e17289 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -143,6 +143,8 @@ static int __init eeh_setup(char *str)
143{ 143{
144 if (!strcmp(str, "off")) 144 if (!strcmp(str, "off"))
145 eeh_add_flag(EEH_FORCE_DISABLED); 145 eeh_add_flag(EEH_FORCE_DISABLED);
146 else if (!strcmp(str, "early_log"))
147 eeh_add_flag(EEH_EARLY_DUMP_LOG);
146 148
147 return 1; 149 return 1;
148} 150}
@@ -758,30 +760,41 @@ static void eeh_reset_pe_once(struct eeh_pe *pe)
758int eeh_reset_pe(struct eeh_pe *pe) 760int eeh_reset_pe(struct eeh_pe *pe)
759{ 761{
760 int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE); 762 int flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
761 int i, rc; 763 int i, state, ret;
764
765 /* Mark as reset and block config space */
766 eeh_pe_state_mark(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
762 767
763 /* Take three shots at resetting the bus */ 768 /* Take three shots at resetting the bus */
764 for (i=0; i<3; i++) { 769 for (i = 0; i < 3; i++) {
765 eeh_reset_pe_once(pe); 770 eeh_reset_pe_once(pe);
766 771
767 /* 772 /*
768 * EEH_PE_ISOLATED is expected to be removed after 773 * EEH_PE_ISOLATED is expected to be removed after
769 * BAR restore. 774 * BAR restore.
770 */ 775 */
771 rc = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC); 776 state = eeh_ops->wait_state(pe, PCI_BUS_RESET_WAIT_MSEC);
772 if ((rc & flags) == flags) 777 if ((state & flags) == flags) {
773 return 0; 778 ret = 0;
779 goto out;
780 }
774 781
775 if (rc < 0) { 782 if (state < 0) {
776 pr_err("%s: Unrecoverable slot failure on PHB#%d-PE#%x", 783 pr_warn("%s: Unrecoverable slot failure on PHB#%d-PE#%x",
777 __func__, pe->phb->global_number, pe->addr); 784 __func__, pe->phb->global_number, pe->addr);
778 return -1; 785 ret = -ENOTRECOVERABLE;
786 goto out;
779 } 787 }
780 pr_err("EEH: bus reset %d failed on PHB#%d-PE#%x, rc=%d\n", 788
781 i+1, pe->phb->global_number, pe->addr, rc); 789 /* We might run out of credits */
790 ret = -EIO;
791 pr_warn("%s: Failure %d resetting PHB#%x-PE#%x\n (%d)\n",
792 __func__, state, pe->phb->global_number, pe->addr, (i + 1));
782 } 793 }
783 794
784 return -1; 795out:
796 eeh_pe_state_clear(pe, EEH_PE_RESET | EEH_PE_CFG_BLOCKED);
797 return ret;
785} 798}
786 799
787/** 800/**
@@ -920,11 +933,8 @@ int eeh_init(void)
920 pr_warn("%s: Platform EEH operation not found\n", 933 pr_warn("%s: Platform EEH operation not found\n",
921 __func__); 934 __func__);
922 return -EEXIST; 935 return -EEXIST;
923 } else if ((ret = eeh_ops->init())) { 936 } else if ((ret = eeh_ops->init()))
924 pr_warn("%s: Failed to call platform init function (%d)\n",
925 __func__, ret);
926 return ret; 937 return ret;
927 }
928 938
929 /* Initialize EEH event */ 939 /* Initialize EEH event */
930 ret = eeh_event_init(); 940 ret = eeh_event_init();
@@ -1209,6 +1219,7 @@ int eeh_unfreeze_pe(struct eeh_pe *pe, bool sw_state)
1209static struct pci_device_id eeh_reset_ids[] = { 1219static struct pci_device_id eeh_reset_ids[] = {
1210 { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */ 1220 { PCI_DEVICE(0x19a2, 0x0710) }, /* Emulex, BE */
1211 { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */ 1221 { PCI_DEVICE(0x10df, 0xe220) }, /* Emulex, Lancer */
1222 { PCI_DEVICE(0x14e4, 0x1657) }, /* Broadcom BCM5719 */
1212 { 0 } 1223 { 0 }
1213}; 1224};
1214 1225
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 6535936bdf27..b17e793ba67e 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -528,13 +528,11 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
528 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 528 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
529 529
530 /* Issue reset */ 530 /* Issue reset */
531 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
532 ret = eeh_reset_pe(pe); 531 ret = eeh_reset_pe(pe);
533 if (ret) { 532 if (ret) {
534 eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED); 533 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
535 return ret; 534 return ret;
536 } 535 }
537 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
538 536
539 /* Unfreeze the PE */ 537 /* Unfreeze the PE */
540 ret = eeh_clear_pe_frozen_state(pe, true); 538 ret = eeh_clear_pe_frozen_state(pe, true);
@@ -601,19 +599,15 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
601 * config accesses. So we prefer to block them. However, controlled 599 * config accesses. So we prefer to block them. However, controlled
602 * PCI config accesses initiated from EEH itself are allowed. 600 * PCI config accesses initiated from EEH itself are allowed.
603 */ 601 */
604 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
605 rc = eeh_reset_pe(pe); 602 rc = eeh_reset_pe(pe);
606 if (rc) { 603 if (rc)
607 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
608 return rc; 604 return rc;
609 }
610 605
611 pci_lock_rescan_remove(); 606 pci_lock_rescan_remove();
612 607
613 /* Restore PE */ 608 /* Restore PE */
614 eeh_ops->configure_bridge(pe); 609 eeh_ops->configure_bridge(pe);
615 eeh_pe_restore_bars(pe); 610 eeh_pe_restore_bars(pe);
616 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
617 611
618 /* Clear frozen state */ 612 /* Clear frozen state */
619 rc = eeh_clear_pe_frozen_state(pe, false); 613 rc = eeh_clear_pe_frozen_state(pe, false);
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 22b45a4955cd..10a093579191 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -1424,12 +1424,18 @@ _GLOBAL(ftrace_graph_caller)
1424 lwz r4, 44(r1) 1424 lwz r4, 44(r1)
1425 subi r4, r4, MCOUNT_INSN_SIZE 1425 subi r4, r4, MCOUNT_INSN_SIZE
1426 1426
1427 /* get the parent address */ 1427 /* Grab the LR out of the caller stack frame */
1428 addi r3, r1, 52 1428 lwz r3,52(r1)
1429 1429
1430 bl prepare_ftrace_return 1430 bl prepare_ftrace_return
1431 nop 1431 nop
1432 1432
1433 /*
1434 * prepare_ftrace_return gives us the address we divert to.
1435 * Change the LR in the callers stack frame to this.
1436 */
1437 stw r3,52(r1)
1438
1433 MCOUNT_RESTORE_FRAME 1439 MCOUNT_RESTORE_FRAME
1434 /* old link register ends up in ctr reg */ 1440 /* old link register ends up in ctr reg */
1435 bctr 1441 bctr
@@ -1457,4 +1463,4 @@ _GLOBAL(return_to_handler)
1457 blr 1463 blr
1458#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 1464#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1459 1465
1460#endif /* CONFIG_MCOUNT */ 1466#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 0905c8da90f1..194e46dcf08d 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -1227,13 +1227,20 @@ _GLOBAL(ftrace_graph_caller)
1227 ld r4, 128(r1) 1227 ld r4, 128(r1)
1228 subi r4, r4, MCOUNT_INSN_SIZE 1228 subi r4, r4, MCOUNT_INSN_SIZE
1229 1229
1230 /* get the parent address */ 1230 /* Grab the LR out of the caller stack frame */
1231 ld r11, 112(r1) 1231 ld r11, 112(r1)
1232 addi r3, r11, 16 1232 ld r3, 16(r11)
1233 1233
1234 bl prepare_ftrace_return 1234 bl prepare_ftrace_return
1235 nop 1235 nop
1236 1236
1237 /*
1238 * prepare_ftrace_return gives us the address we divert to.
1239 * Change the LR in the callers stack frame to this.
1240 */
1241 ld r11, 112(r1)
1242 std r3, 16(r11)
1243
1237 ld r0, 128(r1) 1244 ld r0, 128(r1)
1238 mtlr r0 1245 mtlr r0
1239 addi r1, r1, 112 1246 addi r1, r1, 112
@@ -1241,28 +1248,6 @@ _GLOBAL(ftrace_graph_caller)
1241 1248
1242_GLOBAL(return_to_handler) 1249_GLOBAL(return_to_handler)
1243 /* need to save return values */ 1250 /* need to save return values */
1244 std r4, -24(r1)
1245 std r3, -16(r1)
1246 std r31, -8(r1)
1247 mr r31, r1
1248 stdu r1, -112(r1)
1249
1250 bl ftrace_return_to_handler
1251 nop
1252
1253 /* return value has real return address */
1254 mtlr r3
1255
1256 ld r1, 0(r1)
1257 ld r4, -24(r1)
1258 ld r3, -16(r1)
1259 ld r31, -8(r1)
1260
1261 /* Jump back to real return address */
1262 blr
1263
1264_GLOBAL(mod_return_to_handler)
1265 /* need to save return values */
1266 std r4, -32(r1) 1251 std r4, -32(r1)
1267 std r3, -24(r1) 1252 std r3, -24(r1)
1268 /* save TOC */ 1253 /* save TOC */
@@ -1272,7 +1257,7 @@ _GLOBAL(mod_return_to_handler)
1272 stdu r1, -112(r1) 1257 stdu r1, -112(r1)
1273 1258
1274 /* 1259 /*
1275 * We are in a module using the module's TOC. 1260 * We might be called from a module.
1276 * Switch to our TOC to run inside the core kernel. 1261 * Switch to our TOC to run inside the core kernel.
1277 */ 1262 */
1278 ld r2, PACATOC(r13) 1263 ld r2, PACATOC(r13)
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 72e783ea0681..db08382e19f1 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -131,6 +131,8 @@ BEGIN_FTR_SECTION
1311: 1311:
132#endif 132#endif
133 133
134 /* Return SRR1 from power7_nap() */
135 mfspr r3,SPRN_SRR1
134 beq cr1,2f 136 beq cr1,2f
135 b power7_wakeup_noloss 137 b power7_wakeup_noloss
1362: b power7_wakeup_loss 1382: b power7_wakeup_loss
@@ -292,15 +294,26 @@ decrementer_pSeries:
292 . = 0xc00 294 . = 0xc00
293 .globl system_call_pSeries 295 .globl system_call_pSeries
294system_call_pSeries: 296system_call_pSeries:
295 HMT_MEDIUM 297 /*
298 * If CONFIG_KVM_BOOK3S_64_HANDLER is set, save the PPR (on systems
299 * that support it) before changing to HMT_MEDIUM. That allows the KVM
300 * code to save that value into the guest state (it is the guest's PPR
301 * value). Otherwise just change to HMT_MEDIUM as userspace has
302 * already saved the PPR.
303 */
296#ifdef CONFIG_KVM_BOOK3S_64_HANDLER 304#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
297 SET_SCRATCH0(r13) 305 SET_SCRATCH0(r13)
298 GET_PACA(r13) 306 GET_PACA(r13)
299 std r9,PACA_EXGEN+EX_R9(r13) 307 std r9,PACA_EXGEN+EX_R9(r13)
308 OPT_GET_SPR(r9, SPRN_PPR, CPU_FTR_HAS_PPR);
309 HMT_MEDIUM;
300 std r10,PACA_EXGEN+EX_R10(r13) 310 std r10,PACA_EXGEN+EX_R10(r13)
311 OPT_SAVE_REG_TO_PACA(PACA_EXGEN+EX_PPR, r9, CPU_FTR_HAS_PPR);
301 mfcr r9 312 mfcr r9
302 KVMTEST(0xc00) 313 KVMTEST(0xc00)
303 GET_SCRATCH0(r13) 314 GET_SCRATCH0(r13)
315#else
316 HMT_MEDIUM;
304#endif 317#endif
305 SYSCALL_PSERIES_1 318 SYSCALL_PSERIES_1
306 SYSCALL_PSERIES_2_RFID 319 SYSCALL_PSERIES_2_RFID
@@ -1301,23 +1314,6 @@ hmi_exception_after_realmode:
1301 EXCEPTION_PROLOG_0(PACA_EXGEN) 1314 EXCEPTION_PROLOG_0(PACA_EXGEN)
1302 b hmi_exception_hv 1315 b hmi_exception_hv
1303 1316
1304#ifdef CONFIG_PPC_POWERNV
1305_GLOBAL(opal_mc_secondary_handler)
1306 HMT_MEDIUM_PPR_DISCARD
1307 SET_SCRATCH0(r13)
1308 GET_PACA(r13)
1309 clrldi r3,r3,2
1310 tovirt(r3,r3)
1311 std r3,PACA_OPAL_MC_EVT(r13)
1312 ld r13,OPAL_MC_SRR0(r3)
1313 mtspr SPRN_SRR0,r13
1314 ld r13,OPAL_MC_SRR1(r3)
1315 mtspr SPRN_SRR1,r13
1316 ld r3,OPAL_MC_GPR3(r3)
1317 GET_SCRATCH0(r13)
1318 b machine_check_pSeries
1319#endif /* CONFIG_PPC_POWERNV */
1320
1321 1317
1322#define MACHINE_CHECK_HANDLER_WINDUP \ 1318#define MACHINE_CHECK_HANDLER_WINDUP \
1323 /* Clear MSR_RI before setting SRR0 and SRR1. */\ 1319 /* Clear MSR_RI before setting SRR0 and SRR1. */\
@@ -1571,9 +1567,11 @@ do_hash_page:
1571 * r3 contains the faulting address 1567 * r3 contains the faulting address
1572 * r4 contains the required access permissions 1568 * r4 contains the required access permissions
1573 * r5 contains the trap number 1569 * r5 contains the trap number
1570 * r6 contains dsisr
1574 * 1571 *
1575 * at return r3 = 0 for success, 1 for page fault, negative for error 1572 * at return r3 = 0 for success, 1 for page fault, negative for error
1576 */ 1573 */
1574 ld r6,_DSISR(r1)
1577 bl hash_page /* build HPTE if possible */ 1575 bl hash_page /* build HPTE if possible */
1578 cmpdi r3,0 /* see if hash_page succeeded */ 1576 cmpdi r3,0 /* see if hash_page succeeded */
1579 1577
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index e66af6d265e8..44d4d8eb3c85 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -510,79 +510,36 @@ int ftrace_disable_ftrace_graph_caller(void)
510} 510}
511#endif /* CONFIG_DYNAMIC_FTRACE */ 511#endif /* CONFIG_DYNAMIC_FTRACE */
512 512
513#ifdef CONFIG_PPC64
514extern void mod_return_to_handler(void);
515#endif
516
517/* 513/*
518 * Hook the return address and push it in the stack of return addrs 514 * Hook the return address and push it in the stack of return addrs
519 * in current thread info. 515 * in current thread info. Return the address we want to divert to.
520 */ 516 */
521void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) 517unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
522{ 518{
523 unsigned long old;
524 int faulted;
525 struct ftrace_graph_ent trace; 519 struct ftrace_graph_ent trace;
526 unsigned long return_hooker = (unsigned long)&return_to_handler; 520 unsigned long return_hooker;
527 521
528 if (unlikely(ftrace_graph_is_dead())) 522 if (unlikely(ftrace_graph_is_dead()))
529 return; 523 goto out;
530 524
531 if (unlikely(atomic_read(&current->tracing_graph_pause))) 525 if (unlikely(atomic_read(&current->tracing_graph_pause)))
532 return; 526 goto out;
533
534#ifdef CONFIG_PPC64
535 /* non core kernel code needs to save and restore the TOC */
536 if (REGION_ID(self_addr) != KERNEL_REGION_ID)
537 return_hooker = (unsigned long)&mod_return_to_handler;
538#endif
539
540 return_hooker = ppc_function_entry((void *)return_hooker);
541 527
542 /* 528 return_hooker = ppc_function_entry(return_to_handler);
543 * Protect against fault, even if it shouldn't
544 * happen. This tool is too much intrusive to
545 * ignore such a protection.
546 */
547 asm volatile(
548 "1: " PPC_LL "%[old], 0(%[parent])\n"
549 "2: " PPC_STL "%[return_hooker], 0(%[parent])\n"
550 " li %[faulted], 0\n"
551 "3:\n"
552
553 ".section .fixup, \"ax\"\n"
554 "4: li %[faulted], 1\n"
555 " b 3b\n"
556 ".previous\n"
557
558 ".section __ex_table,\"a\"\n"
559 PPC_LONG_ALIGN "\n"
560 PPC_LONG "1b,4b\n"
561 PPC_LONG "2b,4b\n"
562 ".previous"
563
564 : [old] "=&r" (old), [faulted] "=r" (faulted)
565 : [parent] "r" (parent), [return_hooker] "r" (return_hooker)
566 : "memory"
567 );
568
569 if (unlikely(faulted)) {
570 ftrace_graph_stop();
571 WARN_ON(1);
572 return;
573 }
574 529
575 trace.func = self_addr; 530 trace.func = ip;
576 trace.depth = current->curr_ret_stack + 1; 531 trace.depth = current->curr_ret_stack + 1;
577 532
578 /* Only trace if the calling function expects to */ 533 /* Only trace if the calling function expects to */
579 if (!ftrace_graph_entry(&trace)) { 534 if (!ftrace_graph_entry(&trace))
580 *parent = old; 535 goto out;
581 return; 536
582 } 537 if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
538 goto out;
583 539
584 if (ftrace_push_return_trace(old, self_addr, &trace.depth, 0) == -EBUSY) 540 parent = return_hooker;
585 *parent = old; 541out:
542 return parent;
586} 543}
587#endif /* CONFIG_FUNCTION_GRAPH_TRACER */ 544#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
588 545
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index fafff8dbd5d9..d99aac0d69f1 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -33,13 +33,31 @@
33 33
34/* Macro to make the code more readable. */ 34/* Macro to make the code more readable. */
35#ifdef CONFIG_8xx_CPU6 35#ifdef CONFIG_8xx_CPU6
36#define DO_8xx_CPU6(val, reg) \ 36#define SPRN_MI_TWC_ADDR 0x2b80
37 li reg, val; \ 37#define SPRN_MI_RPN_ADDR 0x2d80
38 stw reg, 12(r0); \ 38#define SPRN_MD_TWC_ADDR 0x3b80
39 lwz reg, 12(r0); 39#define SPRN_MD_RPN_ADDR 0x3d80
40
41#define MTSPR_CPU6(spr, reg, treg) \
42 li treg, spr##_ADDR; \
43 stw treg, 12(r0); \
44 lwz treg, 12(r0); \
45 mtspr spr, reg
40#else 46#else
41#define DO_8xx_CPU6(val, reg) 47#define MTSPR_CPU6(spr, reg, treg) \
48 mtspr spr, reg
42#endif 49#endif
50
51/*
52 * Value for the bits that have fixed value in RPN entries.
53 * Also used for tagging DAR for DTLBerror.
54 */
55#ifdef CONFIG_PPC_16K_PAGES
56#define RPN_PATTERN (0x00f0 | MD_SPS16K)
57#else
58#define RPN_PATTERN 0x00f0
59#endif
60
43 __HEAD 61 __HEAD
44_ENTRY(_stext); 62_ENTRY(_stext);
45_ENTRY(_start); 63_ENTRY(_start);
@@ -65,13 +83,6 @@ _ENTRY(_start);
65 * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to 83 * 8M 1:1. I also mapped an additional I/O space 1:1 so we can get to
66 * the "internal" processor registers before MMU_init is called. 84 * the "internal" processor registers before MMU_init is called.
67 * 85 *
68 * The TLB code currently contains a major hack. Since I use the condition
69 * code register, I have to save and restore it. I am out of registers, so
70 * I just store it in memory location 0 (the TLB handlers are not reentrant).
71 * To avoid making any decisions, I need to use the "segment" valid bit
72 * in the first level table, but that would require many changes to the
73 * Linux page directory/table functions that I don't want to do right now.
74 *
75 * -- Dan 86 * -- Dan
76 */ 87 */
77 .globl __start 88 .globl __start
@@ -211,7 +222,7 @@ MachineCheck:
211 EXCEPTION_PROLOG 222 EXCEPTION_PROLOG
212 mfspr r4,SPRN_DAR 223 mfspr r4,SPRN_DAR
213 stw r4,_DAR(r11) 224 stw r4,_DAR(r11)
214 li r5,0x00f0 225 li r5,RPN_PATTERN
215 mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ 226 mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */
216 mfspr r5,SPRN_DSISR 227 mfspr r5,SPRN_DSISR
217 stw r5,_DSISR(r11) 228 stw r5,_DSISR(r11)
@@ -219,30 +230,16 @@ MachineCheck:
219 EXC_XFER_STD(0x200, machine_check_exception) 230 EXC_XFER_STD(0x200, machine_check_exception)
220 231
221/* Data access exception. 232/* Data access exception.
222 * This is "never generated" by the MPC8xx. We jump to it for other 233 * This is "never generated" by the MPC8xx.
223 * translation errors.
224 */ 234 */
225 . = 0x300 235 . = 0x300
226DataAccess: 236DataAccess:
227 EXCEPTION_PROLOG
228 mfspr r10,SPRN_DSISR
229 stw r10,_DSISR(r11)
230 mr r5,r10
231 mfspr r4,SPRN_DAR
232 li r10,0x00f0
233 mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
234 EXC_XFER_LITE(0x300, handle_page_fault)
235 237
236/* Instruction access exception. 238/* Instruction access exception.
237 * This is "never generated" by the MPC8xx. We jump to it for other 239 * This is "never generated" by the MPC8xx.
238 * translation errors.
239 */ 240 */
240 . = 0x400 241 . = 0x400
241InstructionAccess: 242InstructionAccess:
242 EXCEPTION_PROLOG
243 mr r4,r12
244 mr r5,r9
245 EXC_XFER_LITE(0x400, handle_page_fault)
246 243
247/* External interrupt */ 244/* External interrupt */
248 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE) 245 EXCEPTION(0x500, HardwareInterrupt, do_IRQ, EXC_XFER_LITE)
@@ -253,7 +250,7 @@ Alignment:
253 EXCEPTION_PROLOG 250 EXCEPTION_PROLOG
254 mfspr r4,SPRN_DAR 251 mfspr r4,SPRN_DAR
255 stw r4,_DAR(r11) 252 stw r4,_DAR(r11)
256 li r5,0x00f0 253 li r5,RPN_PATTERN
257 mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */ 254 mtspr SPRN_DAR,r5 /* Tag DAR, to be used in DTLB Error */
258 mfspr r5,SPRN_DSISR 255 mfspr r5,SPRN_DSISR
259 stw r5,_DSISR(r11) 256 stw r5,_DSISR(r11)
@@ -292,8 +289,8 @@ SystemCall:
292 . = 0x1100 289 . = 0x1100
293/* 290/*
294 * For the MPC8xx, this is a software tablewalk to load the instruction 291 * For the MPC8xx, this is a software tablewalk to load the instruction
295 * TLB. It is modelled after the example in the Motorola manual. The task 292 * TLB. The task switch loads the M_TW register with the pointer to the first
296 * switch loads the M_TWB register with the pointer to the first level table. 293 * level table.
297 * If we discover there is no second level table (value is zero) or if there 294 * If we discover there is no second level table (value is zero) or if there
298 * is an invalid pte, we load that into the TLB, which causes another fault 295 * is an invalid pte, we load that into the TLB, which causes another fault
299 * into the TLB Error interrupt where we can handle such problems. 296 * into the TLB Error interrupt where we can handle such problems.
@@ -302,20 +299,17 @@ SystemCall:
302 */ 299 */
303InstructionTLBMiss: 300InstructionTLBMiss:
304#ifdef CONFIG_8xx_CPU6 301#ifdef CONFIG_8xx_CPU6
305 stw r3, 8(r0) 302 mtspr SPRN_DAR, r3
306#endif 303#endif
307 EXCEPTION_PROLOG_0 304 EXCEPTION_PROLOG_0
308 mtspr SPRN_SPRG_SCRATCH2, r10 305 mtspr SPRN_SPRG_SCRATCH2, r10
309 mfspr r10, SPRN_SRR0 /* Get effective address of fault */ 306 mfspr r10, SPRN_SRR0 /* Get effective address of fault */
310#ifdef CONFIG_8xx_CPU15 307#ifdef CONFIG_8xx_CPU15
311 addi r11, r10, 0x1000 308 addi r11, r10, PAGE_SIZE
312 tlbie r11 309 tlbie r11
313 addi r11, r10, -0x1000 310 addi r11, r10, -PAGE_SIZE
314 tlbie r11 311 tlbie r11
315#endif 312#endif
316 DO_8xx_CPU6(0x3780, r3)
317 mtspr SPRN_MD_EPN, r10 /* Have to use MD_EPN for walk, MI_EPN can't */
318 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */
319 313
320 /* If we are faulting a kernel address, we have to use the 314 /* If we are faulting a kernel address, we have to use the
321 * kernel page tables. 315 * kernel page tables.
@@ -323,32 +317,37 @@ InstructionTLBMiss:
323#ifdef CONFIG_MODULES 317#ifdef CONFIG_MODULES
324 /* Only modules will cause ITLB Misses as we always 318 /* Only modules will cause ITLB Misses as we always
325 * pin the first 8MB of kernel memory */ 319 * pin the first 8MB of kernel memory */
326 andi. r11, r10, 0x0800 /* Address >= 0x80000000 */ 320 andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
321#endif
322 mfspr r11, SPRN_M_TW /* Get level 1 table base address */
323#ifdef CONFIG_MODULES
327 beq 3f 324 beq 3f
328 lis r11, swapper_pg_dir@h 325 lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
329 ori r11, r11, swapper_pg_dir@l 326 ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
330 rlwimi r10, r11, 0, 2, 19
3313: 3273:
332#endif 328#endif
333 lwz r11, 0(r10) /* Get the level 1 entry */ 329 /* Extract level 1 index */
330 rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
331 lwzx r11, r10, r11 /* Get the level 1 entry */
334 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ 332 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
335 beq 2f /* If zero, don't try to find a pte */ 333 beq 2f /* If zero, don't try to find a pte */
336 334
337 /* We have a pte table, so load the MI_TWC with the attributes 335 /* We have a pte table, so load the MI_TWC with the attributes
338 * for this "segment." 336 * for this "segment."
339 */ 337 */
340 ori r11,r11,1 /* Set valid bit */ 338 MTSPR_CPU6(SPRN_MI_TWC, r11, r3) /* Set segment attributes */
341 DO_8xx_CPU6(0x2b80, r3) 339 mfspr r11, SPRN_SRR0 /* Get effective address of fault */
342 mtspr SPRN_MI_TWC, r11 /* Set segment attributes */ 340 /* Extract level 2 index */
343 DO_8xx_CPU6(0x3b80, r3) 341 rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
344 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ 342 lwzx r10, r10, r11 /* Get the pte */
345 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */
346 lwz r10, 0(r11) /* Get the pte */
347 343
348#ifdef CONFIG_SWAP 344#ifdef CONFIG_SWAP
349 andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT 345 andi. r11, r10, _PAGE_ACCESSED | _PAGE_PRESENT
350 cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT 346 cmpwi cr0, r11, _PAGE_ACCESSED | _PAGE_PRESENT
347 li r11, RPN_PATTERN
351 bne- cr0, 2f 348 bne- cr0, 2f
349#else
350 li r11, RPN_PATTERN
352#endif 351#endif
353 /* The Linux PTE won't go exactly into the MMU TLB. 352 /* The Linux PTE won't go exactly into the MMU TLB.
354 * Software indicator bits 21 and 28 must be clear. 353 * Software indicator bits 21 and 28 must be clear.
@@ -356,62 +355,63 @@ InstructionTLBMiss:
356 * set. All other Linux PTE bits control the behavior 355 * set. All other Linux PTE bits control the behavior
357 * of the MMU. 356 * of the MMU.
358 */ 357 */
359 li r11, 0x00f0
360 rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */ 358 rlwimi r10, r11, 0, 0x07f8 /* Set 24-27, clear 21-23,28 */
361 DO_8xx_CPU6(0x2d80, r3) 359 MTSPR_CPU6(SPRN_MI_RPN, r10, r3) /* Update TLB entry */
362 mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
363 360
364 /* Restore registers */ 361 /* Restore registers */
365#ifdef CONFIG_8xx_CPU6 362#ifdef CONFIG_8xx_CPU6
366 lwz r3, 8(r0) 363 mfspr r3, SPRN_DAR
364 mtspr SPRN_DAR, r11 /* Tag DAR */
367#endif 365#endif
368 mfspr r10, SPRN_SPRG_SCRATCH2 366 mfspr r10, SPRN_SPRG_SCRATCH2
369 EXCEPTION_EPILOG_0 367 EXCEPTION_EPILOG_0
370 rfi 368 rfi
3712: 3692:
372 mfspr r11, SPRN_SRR1 370 mfspr r10, SPRN_SRR1
373 /* clear all error bits as TLB Miss 371 /* clear all error bits as TLB Miss
374 * sets a few unconditionally 372 * sets a few unconditionally
375 */ 373 */
376 rlwinm r11, r11, 0, 0xffff 374 rlwinm r10, r10, 0, 0xffff
377 mtspr SPRN_SRR1, r11 375 mtspr SPRN_SRR1, r10
378 376
379 /* Restore registers */ 377 /* Restore registers */
380#ifdef CONFIG_8xx_CPU6 378#ifdef CONFIG_8xx_CPU6
381 lwz r3, 8(r0) 379 mfspr r3, SPRN_DAR
380 mtspr SPRN_DAR, r11 /* Tag DAR */
382#endif 381#endif
383 mfspr r10, SPRN_SPRG_SCRATCH2 382 mfspr r10, SPRN_SPRG_SCRATCH2
384 EXCEPTION_EPILOG_0 383 b InstructionTLBError1
385 b InstructionAccess
386 384
387 . = 0x1200 385 . = 0x1200
388DataStoreTLBMiss: 386DataStoreTLBMiss:
389#ifdef CONFIG_8xx_CPU6 387#ifdef CONFIG_8xx_CPU6
390 stw r3, 8(r0) 388 mtspr SPRN_DAR, r3
391#endif 389#endif
392 EXCEPTION_PROLOG_0 390 EXCEPTION_PROLOG_0
393 mtspr SPRN_SPRG_SCRATCH2, r10 391 mtspr SPRN_SPRG_SCRATCH2, r10
394 mfspr r10, SPRN_M_TWB /* Get level 1 table entry address */ 392 mfspr r10, SPRN_MD_EPN
395 393
396 /* If we are faulting a kernel address, we have to use the 394 /* If we are faulting a kernel address, we have to use the
397 * kernel page tables. 395 * kernel page tables.
398 */ 396 */
399 andi. r11, r10, 0x0800 397 andis. r11, r10, 0x8000
398 mfspr r11, SPRN_M_TW /* Get level 1 table base address */
400 beq 3f 399 beq 3f
401 lis r11, swapper_pg_dir@h 400 lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
402 ori r11, r11, swapper_pg_dir@l 401 ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
403 rlwimi r10, r11, 0, 2, 19
4043: 4023:
405 lwz r11, 0(r10) /* Get the level 1 entry */ 403 /* Extract level 1 index */
404 rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
405 lwzx r11, r10, r11 /* Get the level 1 entry */
406 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */ 406 rlwinm. r10, r11,0,0,19 /* Extract page descriptor page address */
407 beq 2f /* If zero, don't try to find a pte */ 407 beq 2f /* If zero, don't try to find a pte */
408 408
409 /* We have a pte table, so load fetch the pte from the table. 409 /* We have a pte table, so load fetch the pte from the table.
410 */ 410 */
411 ori r11, r11, 1 /* Set valid bit in physical L2 page */ 411 mfspr r10, SPRN_MD_EPN /* Get address of fault */
412 DO_8xx_CPU6(0x3b80, r3) 412 /* Extract level 2 index */
413 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ 413 rlwinm r10, r10, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
414 mfspr r10, SPRN_MD_TWC /* ....and get the pte address */ 414 rlwimi r10, r11, 0, 0, 32 - PAGE_SHIFT - 1 /* Add level 2 base */
415 lwz r10, 0(r10) /* Get the pte */ 415 lwz r10, 0(r10) /* Get the pte */
416 416
417 /* Insert the Guarded flag into the TWC from the Linux PTE. 417 /* Insert the Guarded flag into the TWC from the Linux PTE.
@@ -425,8 +425,7 @@ DataStoreTLBMiss:
425 * It is bit 25 in the Linux PTE and bit 30 in the TWC 425 * It is bit 25 in the Linux PTE and bit 30 in the TWC
426 */ 426 */
427 rlwimi r11, r10, 32-5, 30, 30 427 rlwimi r11, r10, 32-5, 30, 30
428 DO_8xx_CPU6(0x3b80, r3) 428 MTSPR_CPU6(SPRN_MD_TWC, r11, r3)
429 mtspr SPRN_MD_TWC, r11
430 429
431 /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set. 430 /* Both _PAGE_ACCESSED and _PAGE_PRESENT has to be set.
432 * We also need to know if the insn is a load/store, so: 431 * We also need to know if the insn is a load/store, so:
@@ -442,14 +441,8 @@ DataStoreTLBMiss:
442 and r11, r11, r10 441 and r11, r11, r10
443 rlwimi r10, r11, 0, _PAGE_PRESENT 442 rlwimi r10, r11, 0, _PAGE_PRESENT
444#endif 443#endif
445 /* Honour kernel RO, User NA */ 444 /* invert RW */
446 /* 0x200 == Extended encoding, bit 22 */ 445 xori r10, r10, _PAGE_RW
447 rlwimi r10, r10, 32-2, 0x200 /* Copy USER to bit 22, 0x200 */
448 /* r11 = (r10 & _PAGE_RW) >> 1 */
449 rlwinm r11, r10, 32-1, 0x200
450 or r10, r11, r10
451 /* invert RW and 0x200 bits */
452 xori r10, r10, _PAGE_RW | 0x200
453 446
454 /* The Linux PTE won't go exactly into the MMU TLB. 447 /* The Linux PTE won't go exactly into the MMU TLB.
455 * Software indicator bits 22 and 28 must be clear. 448 * Software indicator bits 22 and 28 must be clear.
@@ -457,14 +450,13 @@ DataStoreTLBMiss:
457 * set. All other Linux PTE bits control the behavior 450 * set. All other Linux PTE bits control the behavior
458 * of the MMU. 451 * of the MMU.
459 */ 452 */
4602: li r11, 0x00f0 4532: li r11, RPN_PATTERN
461 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */ 454 rlwimi r10, r11, 0, 24, 28 /* Set 24-27, clear 28 */
462 DO_8xx_CPU6(0x3d80, r3) 455 MTSPR_CPU6(SPRN_MD_RPN, r10, r3) /* Update TLB entry */
463 mtspr SPRN_MD_RPN, r10 /* Update TLB entry */
464 456
465 /* Restore registers */ 457 /* Restore registers */
466#ifdef CONFIG_8xx_CPU6 458#ifdef CONFIG_8xx_CPU6
467 lwz r3, 8(r0) 459 mfspr r3, SPRN_DAR
468#endif 460#endif
469 mtspr SPRN_DAR, r11 /* Tag DAR */ 461 mtspr SPRN_DAR, r11 /* Tag DAR */
470 mfspr r10, SPRN_SPRG_SCRATCH2 462 mfspr r10, SPRN_SPRG_SCRATCH2
@@ -477,7 +469,17 @@ DataStoreTLBMiss:
477 */ 469 */
478 . = 0x1300 470 . = 0x1300
479InstructionTLBError: 471InstructionTLBError:
480 b InstructionAccess 472 EXCEPTION_PROLOG_0
473InstructionTLBError1:
474 EXCEPTION_PROLOG_1
475 EXCEPTION_PROLOG_2
476 mr r4,r12
477 mr r5,r9
478 andis. r10,r5,0x4000
479 beq+ 1f
480 tlbie r4
481 /* 0x400 is InstructionAccess exception, needed by bad_page_fault() */
4821: EXC_XFER_LITE(0x400, handle_page_fault)
481 483
482/* This is the data TLB error on the MPC8xx. This could be due to 484/* This is the data TLB error on the MPC8xx. This could be due to
483 * many reasons, including a dirty update to a pte. We bail out to 485 * many reasons, including a dirty update to a pte. We bail out to
@@ -488,11 +490,21 @@ DataTLBError:
488 EXCEPTION_PROLOG_0 490 EXCEPTION_PROLOG_0
489 491
490 mfspr r11, SPRN_DAR 492 mfspr r11, SPRN_DAR
491 cmpwi cr0, r11, 0x00f0 493 cmpwi cr0, r11, RPN_PATTERN
492 beq- FixupDAR /* must be a buggy dcbX, icbi insn. */ 494 beq- FixupDAR /* must be a buggy dcbX, icbi insn. */
493DARFixed:/* Return from dcbx instruction bug workaround */ 495DARFixed:/* Return from dcbx instruction bug workaround */
494 EXCEPTION_EPILOG_0 496 EXCEPTION_PROLOG_1
495 b DataAccess 497 EXCEPTION_PROLOG_2
498 mfspr r5,SPRN_DSISR
499 stw r5,_DSISR(r11)
500 mfspr r4,SPRN_DAR
501 andis. r10,r5,0x4000
502 beq+ 1f
503 tlbie r4
5041: li r10,RPN_PATTERN
505 mtspr SPRN_DAR,r10 /* Tag DAR, to be used in DTLB Error */
506 /* 0x300 is DataAccess exception, needed by bad_page_fault() */
507 EXC_XFER_LITE(0x300, handle_page_fault)
496 508
497 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE) 509 EXCEPTION(0x1500, Trap_15, unknown_exception, EXC_XFER_EE)
498 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE) 510 EXCEPTION(0x1600, Trap_16, unknown_exception, EXC_XFER_EE)
@@ -521,29 +533,30 @@ DARFixed:/* Return from dcbx instruction bug workaround */
521#define NO_SELF_MODIFYING_CODE 533#define NO_SELF_MODIFYING_CODE
522FixupDAR:/* Entry point for dcbx workaround. */ 534FixupDAR:/* Entry point for dcbx workaround. */
523#ifdef CONFIG_8xx_CPU6 535#ifdef CONFIG_8xx_CPU6
524 stw r3, 8(r0) 536 mtspr SPRN_DAR, r3
525#endif 537#endif
526 mtspr SPRN_SPRG_SCRATCH2, r10 538 mtspr SPRN_SPRG_SCRATCH2, r10
527 /* fetch instruction from memory. */ 539 /* fetch instruction from memory. */
528 mfspr r10, SPRN_SRR0 540 mfspr r10, SPRN_SRR0
529 andis. r11, r10, 0x8000 /* Address >= 0x80000000 */ 541 andis. r11, r10, 0x8000 /* Address >= 0x80000000 */
530 DO_8xx_CPU6(0x3780, r3) 542 mfspr r11, SPRN_M_TW /* Get level 1 table base address */
531 mtspr SPRN_MD_EPN, r10
532 mfspr r11, SPRN_M_TWB /* Get level 1 table entry address */
533 beq- 3f /* Branch if user space */ 543 beq- 3f /* Branch if user space */
534 lis r11, (swapper_pg_dir-PAGE_OFFSET)@h 544 lis r11, (swapper_pg_dir-PAGE_OFFSET)@h
535 ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l 545 ori r11, r11, (swapper_pg_dir-PAGE_OFFSET)@l
536 rlwimi r11, r10, 32-20, 0xffc /* r11 = r11&~0xffc|(r10>>20)&0xffc */ 546 /* Extract level 1 index */
5373: lwz r11, 0(r11) /* Get the level 1 entry */ 5473: rlwinm r10, r10, 32 - ((PAGE_SHIFT - 2) << 1), (PAGE_SHIFT - 2) << 1, 29
538 DO_8xx_CPU6(0x3b80, r3) 548 lwzx r11, r10, r11 /* Get the level 1 entry */
539 mtspr SPRN_MD_TWC, r11 /* Load pte table base address */ 549 rlwinm r10, r11,0,0,19 /* Extract page descriptor page address */
540 mfspr r11, SPRN_MD_TWC /* ....and get the pte address */ 550 mfspr r11, SPRN_SRR0 /* Get effective address of fault */
541 lwz r11, 0(r11) /* Get the pte */ 551 /* Extract level 2 index */
552 rlwinm r11, r11, 32 - (PAGE_SHIFT - 2), 32 - PAGE_SHIFT, 29
553 lwzx r11, r10, r11 /* Get the pte */
542#ifdef CONFIG_8xx_CPU6 554#ifdef CONFIG_8xx_CPU6
543 lwz r3, 8(r0) /* restore r3 from memory */ 555 mfspr r3, SPRN_DAR
544#endif 556#endif
545 /* concat physical page address(r11) and page offset(r10) */ 557 /* concat physical page address(r11) and page offset(r10) */
546 rlwimi r11, r10, 0, 20, 31 558 mfspr r10, SPRN_SRR0
559 rlwimi r11, r10, 0, 32 - PAGE_SHIFT, 31
547 lwz r11,0(r11) 560 lwz r11,0(r11)
548/* Check if it really is a dcbx instruction. */ 561/* Check if it really is a dcbx instruction. */
549/* dcbt and dcbtst does not generate DTLB Misses/Errors, 562/* dcbt and dcbtst does not generate DTLB Misses/Errors,
@@ -698,11 +711,11 @@ start_here:
698#ifdef CONFIG_8xx_CPU6 711#ifdef CONFIG_8xx_CPU6
699 lis r4, cpu6_errata_word@h 712 lis r4, cpu6_errata_word@h
700 ori r4, r4, cpu6_errata_word@l 713 ori r4, r4, cpu6_errata_word@l
701 li r3, 0x3980 714 li r3, 0x3f80
702 stw r3, 12(r4) 715 stw r3, 12(r4)
703 lwz r3, 12(r4) 716 lwz r3, 12(r4)
704#endif 717#endif
705 mtspr SPRN_M_TWB, r6 718 mtspr SPRN_M_TW, r6
706 lis r4,2f@h 719 lis r4,2f@h
707 ori r4,r4,2f@l 720 ori r4,r4,2f@l
708 tophys(r4,r4) 721 tophys(r4,r4)
@@ -876,10 +889,10 @@ _GLOBAL(set_context)
876 lis r6, cpu6_errata_word@h 889 lis r6, cpu6_errata_word@h
877 ori r6, r6, cpu6_errata_word@l 890 ori r6, r6, cpu6_errata_word@l
878 tophys (r4, r4) 891 tophys (r4, r4)
879 li r7, 0x3980 892 li r7, 0x3f80
880 stw r7, 12(r6) 893 stw r7, 12(r6)
881 lwz r7, 12(r6) 894 lwz r7, 12(r6)
882 mtspr SPRN_M_TWB, r4 /* Update MMU base address */ 895 mtspr SPRN_M_TW, r4 /* Update MMU base address */
883 li r7, 0x3380 896 li r7, 0x3380
884 stw r7, 12(r6) 897 stw r7, 12(r6)
885 lwz r7, 12(r6) 898 lwz r7, 12(r6)
@@ -887,7 +900,7 @@ _GLOBAL(set_context)
887#else 900#else
888 mtspr SPRN_M_CASID,r3 /* Update context */ 901 mtspr SPRN_M_CASID,r3 /* Update context */
889 tophys (r4, r4) 902 tophys (r4, r4)
890 mtspr SPRN_M_TWB, r4 /* and pgd */ 903 mtspr SPRN_M_TW, r4 /* and pgd */
891#endif 904#endif
892 SYNC 905 SYNC
893 blr 906 blr
@@ -919,12 +932,13 @@ set_dec_cpu6:
919 .globl sdata 932 .globl sdata
920sdata: 933sdata:
921 .globl empty_zero_page 934 .globl empty_zero_page
935 .align PAGE_SHIFT
922empty_zero_page: 936empty_zero_page:
923 .space 4096 937 .space PAGE_SIZE
924 938
925 .globl swapper_pg_dir 939 .globl swapper_pg_dir
926swapper_pg_dir: 940swapper_pg_dir:
927 .space 4096 941 .space PGD_TABLE_SIZE
928 942
929/* Room for two PTE table poiners, usually the kernel and current user 943/* Room for two PTE table poiners, usually the kernel and current user
930 * pointer to their respective root page table (pgdir). 944 * pointer to their respective root page table (pgdir).
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 1f7d84e2e8b2..05e804cdecaa 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -63,7 +63,7 @@ int hw_breakpoint_slots(int type)
63int arch_install_hw_breakpoint(struct perf_event *bp) 63int arch_install_hw_breakpoint(struct perf_event *bp)
64{ 64{
65 struct arch_hw_breakpoint *info = counter_arch_bp(bp); 65 struct arch_hw_breakpoint *info = counter_arch_bp(bp);
66 struct perf_event **slot = &__get_cpu_var(bp_per_reg); 66 struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
67 67
68 *slot = bp; 68 *slot = bp;
69 69
@@ -88,7 +88,7 @@ int arch_install_hw_breakpoint(struct perf_event *bp)
88 */ 88 */
89void arch_uninstall_hw_breakpoint(struct perf_event *bp) 89void arch_uninstall_hw_breakpoint(struct perf_event *bp)
90{ 90{
91 struct perf_event **slot = &__get_cpu_var(bp_per_reg); 91 struct perf_event **slot = this_cpu_ptr(&bp_per_reg);
92 92
93 if (*slot != bp) { 93 if (*slot != bp) {
94 WARN_ONCE(1, "Can't find the breakpoint"); 94 WARN_ONCE(1, "Can't find the breakpoint");
@@ -226,7 +226,7 @@ int __kprobes hw_breakpoint_handler(struct die_args *args)
226 */ 226 */
227 rcu_read_lock(); 227 rcu_read_lock();
228 228
229 bp = __get_cpu_var(bp_per_reg); 229 bp = __this_cpu_read(bp_per_reg);
230 if (!bp) 230 if (!bp)
231 goto out; 231 goto out;
232 info = counter_arch_bp(bp); 232 info = counter_arch_bp(bp);
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index c0754bbf8118..18c0687e5ab3 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -212,6 +212,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
212 mtspr SPRN_SRR0,r5 212 mtspr SPRN_SRR0,r5
213 rfid 213 rfid
214 214
215/*
216 * R3 here contains the value that will be returned to the caller
217 * of power7_nap.
218 */
215_GLOBAL(power7_wakeup_loss) 219_GLOBAL(power7_wakeup_loss)
216 ld r1,PACAR1(r13) 220 ld r1,PACAR1(r13)
217BEGIN_FTR_SECTION 221BEGIN_FTR_SECTION
@@ -219,15 +223,19 @@ BEGIN_FTR_SECTION
219END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 223END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
220 REST_NVGPRS(r1) 224 REST_NVGPRS(r1)
221 REST_GPR(2, r1) 225 REST_GPR(2, r1)
222 ld r3,_CCR(r1) 226 ld r6,_CCR(r1)
223 ld r4,_MSR(r1) 227 ld r4,_MSR(r1)
224 ld r5,_NIP(r1) 228 ld r5,_NIP(r1)
225 addi r1,r1,INT_FRAME_SIZE 229 addi r1,r1,INT_FRAME_SIZE
226 mtcr r3 230 mtcr r6
227 mtspr SPRN_SRR1,r4 231 mtspr SPRN_SRR1,r4
228 mtspr SPRN_SRR0,r5 232 mtspr SPRN_SRR0,r5
229 rfid 233 rfid
230 234
235/*
236 * R3 here contains the value that will be returned to the caller
237 * of power7_nap.
238 */
231_GLOBAL(power7_wakeup_noloss) 239_GLOBAL(power7_wakeup_noloss)
232 lbz r0,PACA_NAPSTATELOST(r13) 240 lbz r0,PACA_NAPSTATELOST(r13)
233 cmpwi r0,0 241 cmpwi r0,0
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c
index a83cf5ef6488..5d3968c4d799 100644
--- a/arch/powerpc/kernel/iommu.c
+++ b/arch/powerpc/kernel/iommu.c
@@ -208,7 +208,7 @@ static unsigned long iommu_range_alloc(struct device *dev,
208 * We don't need to disable preemption here because any CPU can 208 * We don't need to disable preemption here because any CPU can
209 * safely use any IOMMU pool. 209 * safely use any IOMMU pool.
210 */ 210 */
211 pool_nr = __raw_get_cpu_var(iommu_pool_hash) & (tbl->nr_pools - 1); 211 pool_nr = __this_cpu_read(iommu_pool_hash) & (tbl->nr_pools - 1);
212 212
213 if (largealloc) 213 if (largealloc)
214 pool = &(tbl->large_pool); 214 pool = &(tbl->large_pool);
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index c14383575fe8..45096033d37b 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -50,7 +50,6 @@
50#include <linux/list.h> 50#include <linux/list.h>
51#include <linux/radix-tree.h> 51#include <linux/radix-tree.h>
52#include <linux/mutex.h> 52#include <linux/mutex.h>
53#include <linux/bootmem.h>
54#include <linux/pci.h> 53#include <linux/pci.h>
55#include <linux/debugfs.h> 54#include <linux/debugfs.h>
56#include <linux/of.h> 55#include <linux/of.h>
@@ -114,7 +113,7 @@ static inline notrace void set_soft_enabled(unsigned long enable)
114static inline notrace int decrementer_check_overflow(void) 113static inline notrace int decrementer_check_overflow(void)
115{ 114{
116 u64 now = get_tb_or_rtc(); 115 u64 now = get_tb_or_rtc();
117 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 116 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
118 117
119 return now >= *next_tb; 118 return now >= *next_tb;
120} 119}
@@ -499,7 +498,7 @@ void __do_irq(struct pt_regs *regs)
499 498
500 /* And finally process it */ 499 /* And finally process it */
501 if (unlikely(irq == NO_IRQ)) 500 if (unlikely(irq == NO_IRQ))
502 __get_cpu_var(irq_stat).spurious_irqs++; 501 __this_cpu_inc(irq_stat.spurious_irqs);
503 else 502 else
504 generic_handle_irq(irq); 503 generic_handle_irq(irq);
505 504
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c
index 8504657379f1..e77c3ccf8dcf 100644
--- a/arch/powerpc/kernel/kgdb.c
+++ b/arch/powerpc/kernel/kgdb.c
@@ -155,7 +155,7 @@ static int kgdb_singlestep(struct pt_regs *regs)
155{ 155{
156 struct thread_info *thread_info, *exception_thread_info; 156 struct thread_info *thread_info, *exception_thread_info;
157 struct thread_info *backup_current_thread_info = 157 struct thread_info *backup_current_thread_info =
158 &__get_cpu_var(kgdb_thread_info); 158 this_cpu_ptr(&kgdb_thread_info);
159 159
160 if (user_mode(regs)) 160 if (user_mode(regs))
161 return 0; 161 return 0;
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 2f72af82513c..7c053f281406 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -119,7 +119,7 @@ static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
119 119
120static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb) 120static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
121{ 121{
122 __get_cpu_var(current_kprobe) = kcb->prev_kprobe.kp; 122 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
123 kcb->kprobe_status = kcb->prev_kprobe.status; 123 kcb->kprobe_status = kcb->prev_kprobe.status;
124 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr; 124 kcb->kprobe_saved_msr = kcb->prev_kprobe.saved_msr;
125} 125}
@@ -127,7 +127,7 @@ static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
127static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs, 127static void __kprobes set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
128 struct kprobe_ctlblk *kcb) 128 struct kprobe_ctlblk *kcb)
129{ 129{
130 __get_cpu_var(current_kprobe) = p; 130 __this_cpu_write(current_kprobe, p);
131 kcb->kprobe_saved_msr = regs->msr; 131 kcb->kprobe_saved_msr = regs->msr;
132} 132}
133 133
@@ -192,7 +192,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
192 ret = 1; 192 ret = 1;
193 goto no_kprobe; 193 goto no_kprobe;
194 } 194 }
195 p = __get_cpu_var(current_kprobe); 195 p = __this_cpu_read(current_kprobe);
196 if (p->break_handler && p->break_handler(p, regs)) { 196 if (p->break_handler && p->break_handler(p, regs)) {
197 goto ss_probe; 197 goto ss_probe;
198 } 198 }
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index a7fd4cb78b78..15c99b649b04 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -73,8 +73,8 @@ void save_mce_event(struct pt_regs *regs, long handled,
73 uint64_t nip, uint64_t addr) 73 uint64_t nip, uint64_t addr)
74{ 74{
75 uint64_t srr1; 75 uint64_t srr1;
76 int index = __get_cpu_var(mce_nest_count)++; 76 int index = __this_cpu_inc_return(mce_nest_count);
77 struct machine_check_event *mce = &__get_cpu_var(mce_event[index]); 77 struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
78 78
79 /* 79 /*
80 * Return if we don't have enough space to log mce event. 80 * Return if we don't have enough space to log mce event.
@@ -143,7 +143,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
143 */ 143 */
144int get_mce_event(struct machine_check_event *mce, bool release) 144int get_mce_event(struct machine_check_event *mce, bool release)
145{ 145{
146 int index = __get_cpu_var(mce_nest_count) - 1; 146 int index = __this_cpu_read(mce_nest_count) - 1;
147 struct machine_check_event *mc_evt; 147 struct machine_check_event *mc_evt;
148 int ret = 0; 148 int ret = 0;
149 149
@@ -153,7 +153,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
153 153
154 /* Check if we have MCE info to process. */ 154 /* Check if we have MCE info to process. */
155 if (index < MAX_MC_EVT) { 155 if (index < MAX_MC_EVT) {
156 mc_evt = &__get_cpu_var(mce_event[index]); 156 mc_evt = this_cpu_ptr(&mce_event[index]);
157 /* Copy the event structure and release the original */ 157 /* Copy the event structure and release the original */
158 if (mce) 158 if (mce)
159 *mce = *mc_evt; 159 *mce = *mc_evt;
@@ -163,7 +163,7 @@ int get_mce_event(struct machine_check_event *mce, bool release)
163 } 163 }
164 /* Decrement the count to free the slot. */ 164 /* Decrement the count to free the slot. */
165 if (release) 165 if (release)
166 __get_cpu_var(mce_nest_count)--; 166 __this_cpu_dec(mce_nest_count);
167 167
168 return ret; 168 return ret;
169} 169}
@@ -184,13 +184,13 @@ void machine_check_queue_event(void)
184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE)) 184 if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
185 return; 185 return;
186 186
187 index = __get_cpu_var(mce_queue_count)++; 187 index = __this_cpu_inc_return(mce_queue_count);
188 /* If queue is full, just return for now. */ 188 /* If queue is full, just return for now. */
189 if (index >= MAX_MC_EVT) { 189 if (index >= MAX_MC_EVT) {
190 __get_cpu_var(mce_queue_count)--; 190 __this_cpu_dec(mce_queue_count);
191 return; 191 return;
192 } 192 }
193 __get_cpu_var(mce_event_queue[index]) = evt; 193 memcpy(this_cpu_ptr(&mce_event_queue[index]), &evt, sizeof(evt));
194 194
195 /* Queue irq work to process this event later. */ 195 /* Queue irq work to process this event later. */
196 irq_work_queue(&mce_event_process_work); 196 irq_work_queue(&mce_event_process_work);
@@ -208,11 +208,11 @@ static void machine_check_process_queued_event(struct irq_work *work)
208 * For now just print it to console. 208 * For now just print it to console.
209 * TODO: log this error event to FSP or nvram. 209 * TODO: log this error event to FSP or nvram.
210 */ 210 */
211 while (__get_cpu_var(mce_queue_count) > 0) { 211 while (__this_cpu_read(mce_queue_count) > 0) {
212 index = __get_cpu_var(mce_queue_count) - 1; 212 index = __this_cpu_read(mce_queue_count) - 1;
213 machine_check_print_event_info( 213 machine_check_print_event_info(
214 &__get_cpu_var(mce_event_queue[index])); 214 this_cpu_ptr(&mce_event_queue[index]));
215 __get_cpu_var(mce_queue_count)--; 215 __this_cpu_dec(mce_queue_count);
216 } 216 }
217} 217}
218 218
diff --git a/arch/powerpc/kernel/mce_power.c b/arch/powerpc/kernel/mce_power.c
index aa9aff3d6ad3..b6f123ab90ed 100644
--- a/arch/powerpc/kernel/mce_power.c
+++ b/arch/powerpc/kernel/mce_power.c
@@ -79,7 +79,7 @@ static long mce_handle_derror(uint64_t dsisr, uint64_t slb_error_bits)
79 } 79 }
80 if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) { 80 if (dsisr & P7_DSISR_MC_TLB_MULTIHIT_MFTLB) {
81 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) 81 if (cur_cpu_spec && cur_cpu_spec->flush_tlb)
82 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); 82 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
83 /* reset error bits */ 83 /* reset error bits */
84 dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB; 84 dsisr &= ~P7_DSISR_MC_TLB_MULTIHIT_MFTLB;
85 } 85 }
@@ -110,7 +110,7 @@ static long mce_handle_common_ierror(uint64_t srr1)
110 break; 110 break;
111 case P7_SRR1_MC_IFETCH_TLB_MULTIHIT: 111 case P7_SRR1_MC_IFETCH_TLB_MULTIHIT:
112 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) { 112 if (cur_cpu_spec && cur_cpu_spec->flush_tlb) {
113 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_PAGE); 113 cur_cpu_spec->flush_tlb(TLBIEL_INVAL_SET);
114 handled = 1; 114 handled = 1;
115 } 115 }
116 break; 116 break;
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c
index e5dad9a9edc0..37d512d35943 100644
--- a/arch/powerpc/kernel/pci-common.c
+++ b/arch/powerpc/kernel/pci-common.c
@@ -20,7 +20,6 @@
20#include <linux/pci.h> 20#include <linux/pci.h>
21#include <linux/string.h> 21#include <linux/string.h>
22#include <linux/init.h> 22#include <linux/init.h>
23#include <linux/bootmem.h>
24#include <linux/delay.h> 23#include <linux/delay.h>
25#include <linux/export.h> 24#include <linux/export.h>
26#include <linux/of_address.h> 25#include <linux/of_address.h>
@@ -1464,7 +1463,7 @@ static void pcibios_setup_phb_resources(struct pci_controller *hose,
1464 res = &hose->io_resource; 1463 res = &hose->io_resource;
1465 1464
1466 if (!res->flags) { 1465 if (!res->flags) {
1467 printk(KERN_WARNING "PCI: I/O resource not set for host" 1466 pr_info("PCI: I/O resource not set for host"
1468 " bridge %s (domain %d)\n", 1467 " bridge %s (domain %d)\n",
1469 hose->dn->full_name, hose->global_number); 1468 hose->dn->full_name, hose->global_number);
1470 } else { 1469 } else {
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c
index 432459c817fa..1f7930037cb7 100644
--- a/arch/powerpc/kernel/pci_32.c
+++ b/arch/powerpc/kernel/pci_32.c
@@ -199,9 +199,7 @@ pci_create_OF_bus_map(void)
199 struct property* of_prop; 199 struct property* of_prop;
200 struct device_node *dn; 200 struct device_node *dn;
201 201
202 of_prop = (struct property*) alloc_bootmem(sizeof(struct property) + 256); 202 of_prop = memblock_virt_alloc(sizeof(struct property) + 256, 0);
203 if (!of_prop)
204 return;
205 dn = of_find_node_by_path("/"); 203 dn = of_find_node_by_path("/");
206 if (dn) { 204 if (dn) {
207 memset(of_prop, -1, sizeof(struct property) + 256); 205 memset(of_prop, -1, sizeof(struct property) + 256);
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index b15194e2c5fc..60bb187cb46a 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -17,7 +17,6 @@
17#include <linux/pci.h> 17#include <linux/pci.h>
18#include <linux/string.h> 18#include <linux/string.h>
19#include <linux/init.h> 19#include <linux/init.h>
20#include <linux/bootmem.h>
21#include <linux/export.h> 20#include <linux/export.h>
22#include <linux/mm.h> 21#include <linux/mm.h>
23#include <linux/list.h> 22#include <linux/list.h>
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 923cd2daba89..b4cc7bef6b16 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -37,9 +37,9 @@
37#include <linux/personality.h> 37#include <linux/personality.h>
38#include <linux/random.h> 38#include <linux/random.h>
39#include <linux/hw_breakpoint.h> 39#include <linux/hw_breakpoint.h>
40#include <linux/uaccess.h>
40 41
41#include <asm/pgtable.h> 42#include <asm/pgtable.h>
42#include <asm/uaccess.h>
43#include <asm/io.h> 43#include <asm/io.h>
44#include <asm/processor.h> 44#include <asm/processor.h>
45#include <asm/mmu.h> 45#include <asm/mmu.h>
@@ -499,7 +499,7 @@ static inline int set_dawr(struct arch_hw_breakpoint *brk)
499 499
500void __set_breakpoint(struct arch_hw_breakpoint *brk) 500void __set_breakpoint(struct arch_hw_breakpoint *brk)
501{ 501{
502 __get_cpu_var(current_brk) = *brk; 502 memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
503 503
504 if (cpu_has_feature(CPU_FTR_DAWR)) 504 if (cpu_has_feature(CPU_FTR_DAWR))
505 set_dawr(brk); 505 set_dawr(brk);
@@ -842,7 +842,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
842 * schedule DABR 842 * schedule DABR
843 */ 843 */
844#ifndef CONFIG_HAVE_HW_BREAKPOINT 844#ifndef CONFIG_HAVE_HW_BREAKPOINT
845 if (unlikely(!hw_brk_match(&__get_cpu_var(current_brk), &new->thread.hw_brk))) 845 if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
846 __set_breakpoint(&new->thread.hw_brk); 846 __set_breakpoint(&new->thread.hw_brk);
847#endif /* CONFIG_HAVE_HW_BREAKPOINT */ 847#endif /* CONFIG_HAVE_HW_BREAKPOINT */
848#endif 848#endif
@@ -856,7 +856,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
856 * Collect processor utilization data per process 856 * Collect processor utilization data per process
857 */ 857 */
858 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 858 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
859 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 859 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
860 long unsigned start_tb, current_tb; 860 long unsigned start_tb, current_tb;
861 start_tb = old_thread->start_tb; 861 start_tb = old_thread->start_tb;
862 cu->current_tb = current_tb = mfspr(SPRN_PURR); 862 cu->current_tb = current_tb = mfspr(SPRN_PURR);
@@ -866,7 +866,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
866#endif /* CONFIG_PPC64 */ 866#endif /* CONFIG_PPC64 */
867 867
868#ifdef CONFIG_PPC_BOOK3S_64 868#ifdef CONFIG_PPC_BOOK3S_64
869 batch = &__get_cpu_var(ppc64_tlb_batch); 869 batch = this_cpu_ptr(&ppc64_tlb_batch);
870 if (batch->active) { 870 if (batch->active) {
871 current_thread_info()->local_flags |= _TLF_LAZY_MMU; 871 current_thread_info()->local_flags |= _TLF_LAZY_MMU;
872 if (batch->index) 872 if (batch->index)
@@ -889,7 +889,7 @@ struct task_struct *__switch_to(struct task_struct *prev,
889#ifdef CONFIG_PPC_BOOK3S_64 889#ifdef CONFIG_PPC_BOOK3S_64
890 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { 890 if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
891 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; 891 current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
892 batch = &__get_cpu_var(ppc64_tlb_batch); 892 batch = this_cpu_ptr(&ppc64_tlb_batch);
893 batch->active = 1; 893 batch->active = 1;
894 } 894 }
895#endif /* CONFIG_PPC_BOOK3S_64 */ 895#endif /* CONFIG_PPC_BOOK3S_64 */
@@ -921,12 +921,8 @@ static void show_instructions(struct pt_regs *regs)
921 pc = (unsigned long)phys_to_virt(pc); 921 pc = (unsigned long)phys_to_virt(pc);
922#endif 922#endif
923 923
924 /* We use __get_user here *only* to avoid an OOPS on a
925 * bad address because the pc *should* only be a
926 * kernel address.
927 */
928 if (!__kernel_text_address(pc) || 924 if (!__kernel_text_address(pc) ||
929 __get_user(instr, (unsigned int __user *)pc)) { 925 probe_kernel_address((unsigned int __user *)pc, instr)) {
930 printk(KERN_CONT "XXXXXXXX "); 926 printk(KERN_CONT "XXXXXXXX ");
931 } else { 927 } else {
932 if (regs->nip == pc) 928 if (regs->nip == pc)
@@ -1531,13 +1527,6 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1531 int curr_frame = current->curr_ret_stack; 1527 int curr_frame = current->curr_ret_stack;
1532 extern void return_to_handler(void); 1528 extern void return_to_handler(void);
1533 unsigned long rth = (unsigned long)return_to_handler; 1529 unsigned long rth = (unsigned long)return_to_handler;
1534 unsigned long mrth = -1;
1535#ifdef CONFIG_PPC64
1536 extern void mod_return_to_handler(void);
1537 rth = *(unsigned long *)rth;
1538 mrth = (unsigned long)mod_return_to_handler;
1539 mrth = *(unsigned long *)mrth;
1540#endif
1541#endif 1530#endif
1542 1531
1543 sp = (unsigned long) stack; 1532 sp = (unsigned long) stack;
@@ -1562,7 +1551,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1562 if (!firstframe || ip != lr) { 1551 if (!firstframe || ip != lr) {
1563 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip); 1552 printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
1564#ifdef CONFIG_FUNCTION_GRAPH_TRACER 1553#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1565 if ((ip == rth || ip == mrth) && curr_frame >= 0) { 1554 if ((ip == rth) && curr_frame >= 0) {
1566 printk(" (%pS)", 1555 printk(" (%pS)",
1567 (void *)current->ret_stack[curr_frame].ret); 1556 (void *)current->ret_stack[curr_frame].ret);
1568 curr_frame--; 1557 curr_frame--;
@@ -1665,12 +1654,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
1665 return ret; 1654 return ret;
1666} 1655}
1667 1656
1668unsigned long randomize_et_dyn(unsigned long base)
1669{
1670 unsigned long ret = PAGE_ALIGN(base + brk_rnd());
1671
1672 if (ret < base)
1673 return base;
1674
1675 return ret;
1676}
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 099f27e6d1b0..6a799b3cc6b4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -160,6 +160,12 @@ static struct ibm_pa_feature {
160 {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, 160 {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1},
161 {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, 161 {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0},
162 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, 162 {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0},
163 /*
164 * If the kernel doesn't support TM (ie. CONFIG_PPC_TRANSACTIONAL_MEM=n),
165 * we don't want to turn on CPU_FTR_TM here, so we use CPU_FTR_TM_COMP
166 * which is 0 if the kernel doesn't support TM.
167 */
168 {CPU_FTR_TM_COMP, 0, 0, 22, 0, 0},
163}; 169};
164 170
165static void __init scan_features(unsigned long node, const unsigned char *ftrs, 171static void __init scan_features(unsigned long node, const unsigned char *ftrs,
@@ -696,10 +702,7 @@ void __init early_init_devtree(void *params)
696 reserve_crashkernel(); 702 reserve_crashkernel();
697 early_reserve_mem(); 703 early_reserve_mem();
698 704
699 /* 705 /* Ensure that total memory size is page-aligned. */
700 * Ensure that total memory size is page-aligned, because otherwise
701 * mark_bootmem() gets upset.
702 */
703 limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE); 706 limit = ALIGN(memory_limit ?: memblock_phys_mem_size(), PAGE_SIZE);
704 memblock_enforce_memory_limit(limit); 707 memblock_enforce_memory_limit(limit);
705 708
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 8777fb02349f..fb2fb3ea85e5 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -113,17 +113,6 @@
113#define SENSOR_PREFIX "ibm,sensor-" 113#define SENSOR_PREFIX "ibm,sensor-"
114#define cel_to_fahr(x) ((x*9/5)+32) 114#define cel_to_fahr(x) ((x*9/5)+32)
115 115
116
117/* Globals */
118static struct rtas_sensors sensors;
119static struct device_node *rtas_node = NULL;
120static unsigned long power_on_time = 0; /* Save the time the user set */
121static char progress_led[MAX_LINELENGTH];
122
123static unsigned long rtas_tone_frequency = 1000;
124static unsigned long rtas_tone_volume = 0;
125
126/* ****************STRUCTS******************************************* */
127struct individual_sensor { 116struct individual_sensor {
128 unsigned int token; 117 unsigned int token;
129 unsigned int quant; 118 unsigned int quant;
@@ -134,6 +123,15 @@ struct rtas_sensors {
134 unsigned int quant; 123 unsigned int quant;
135}; 124};
136 125
126/* Globals */
127static struct rtas_sensors sensors;
128static struct device_node *rtas_node = NULL;
129static unsigned long power_on_time = 0; /* Save the time the user set */
130static char progress_led[MAX_LINELENGTH];
131
132static unsigned long rtas_tone_frequency = 1000;
133static unsigned long rtas_tone_volume = 0;
134
137/* ****************************************************************** */ 135/* ****************************************************************** */
138/* Declarations */ 136/* Declarations */
139static int ppc_rtas_sensors_show(struct seq_file *m, void *v); 137static int ppc_rtas_sensors_show(struct seq_file *m, void *v);
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 8b4c857c1421..4af905e81ab0 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -1091,8 +1091,8 @@ asmlinkage int ppc_rtas(struct rtas_args __user *uargs)
1091} 1091}
1092 1092
1093/* 1093/*
1094 * Call early during boot, before mem init or bootmem, to retrieve the RTAS 1094 * Call early during boot, before mem init, to retrieve the RTAS
1095 * informations from the device-tree and allocate the RMO buffer for userland 1095 * information from the device-tree and allocate the RMO buffer for userland
1096 * accesses. 1096 * accesses.
1097 */ 1097 */
1098void __init rtas_initialize(void) 1098void __init rtas_initialize(void)
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 7c55b86206b3..ce230da2c015 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -26,7 +26,6 @@
26#include <linux/pci.h> 26#include <linux/pci.h>
27#include <linux/string.h> 27#include <linux/string.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/bootmem.h>
30 29
31#include <asm/io.h> 30#include <asm/io.h>
32#include <asm/pgtable.h> 31#include <asm/pgtable.h>
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 1362cd62b3fa..44c8d03558ac 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -139,8 +139,8 @@ void machine_restart(char *cmd)
139void machine_power_off(void) 139void machine_power_off(void)
140{ 140{
141 machine_shutdown(); 141 machine_shutdown();
142 if (ppc_md.power_off) 142 if (pm_power_off)
143 ppc_md.power_off(); 143 pm_power_off();
144#ifdef CONFIG_SMP 144#ifdef CONFIG_SMP
145 smp_send_stop(); 145 smp_send_stop();
146#endif 146#endif
@@ -151,7 +151,7 @@ void machine_power_off(void)
151/* Used by the G5 thermal driver */ 151/* Used by the G5 thermal driver */
152EXPORT_SYMBOL_GPL(machine_power_off); 152EXPORT_SYMBOL_GPL(machine_power_off);
153 153
154void (*pm_power_off)(void) = machine_power_off; 154void (*pm_power_off)(void);
155EXPORT_SYMBOL_GPL(pm_power_off); 155EXPORT_SYMBOL_GPL(pm_power_off);
156 156
157void machine_halt(void) 157void machine_halt(void)
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 07831ed0d9ef..bb02e9f6944e 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -11,7 +11,6 @@
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/initrd.h> 12#include <linux/initrd.h>
13#include <linux/tty.h> 13#include <linux/tty.h>
14#include <linux/bootmem.h>
15#include <linux/seq_file.h> 14#include <linux/seq_file.h>
16#include <linux/root_dev.h> 15#include <linux/root_dev.h>
17#include <linux/cpu.h> 16#include <linux/cpu.h>
@@ -53,11 +52,6 @@ unsigned long ISA_DMA_THRESHOLD;
53unsigned int DMA_MODE_READ; 52unsigned int DMA_MODE_READ;
54unsigned int DMA_MODE_WRITE; 53unsigned int DMA_MODE_WRITE;
55 54
56#ifdef CONFIG_VGA_CONSOLE
57unsigned long vgacon_remap_base;
58EXPORT_SYMBOL(vgacon_remap_base);
59#endif
60
61/* 55/*
62 * These are used in binfmt_elf.c to put aux entries on the stack 56 * These are used in binfmt_elf.c to put aux entries on the stack
63 * for each elf executable being started. 57 * for each elf executable being started.
@@ -311,9 +305,8 @@ void __init setup_arch(char **cmdline_p)
311 305
312 irqstack_early_init(); 306 irqstack_early_init();
313 307
314 /* set up the bootmem stuff with available memory */ 308 initmem_init();
315 do_init_bootmem(); 309 if ( ppc_md.progress ) ppc_md.progress("setup_arch: initmem", 0x3eab);
316 if ( ppc_md.progress ) ppc_md.progress("setup_arch: bootmem", 0x3eab);
317 310
318#ifdef CONFIG_DUMMY_CONSOLE 311#ifdef CONFIG_DUMMY_CONSOLE
319 conswitchp = &dummy_con; 312 conswitchp = &dummy_con;
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 4f3cfe1b6a33..49f553bbb360 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -660,13 +660,11 @@ static void __init emergency_stack_init(void)
660} 660}
661 661
662/* 662/*
663 * Called into from start_kernel this initializes bootmem, which is used 663 * Called into from start_kernel this initializes memblock, which is used
664 * to manage page allocation until mem_init is called. 664 * to manage page allocation until mem_init is called.
665 */ 665 */
666void __init setup_arch(char **cmdline_p) 666void __init setup_arch(char **cmdline_p)
667{ 667{
668 ppc64_boot_msg(0x12, "Setup Arch");
669
670 *cmdline_p = boot_command_line; 668 *cmdline_p = boot_command_line;
671 669
672 /* 670 /*
@@ -691,9 +689,7 @@ void __init setup_arch(char **cmdline_p)
691 exc_lvl_early_init(); 689 exc_lvl_early_init();
692 emergency_stack_init(); 690 emergency_stack_init();
693 691
694 /* set up the bootmem stuff with available memory */ 692 initmem_init();
695 do_init_bootmem();
696 sparse_init();
697 693
698#ifdef CONFIG_DUMMY_CONSOLE 694#ifdef CONFIG_DUMMY_CONSOLE
699 conswitchp = &dummy_con; 695 conswitchp = &dummy_con;
@@ -711,33 +707,6 @@ void __init setup_arch(char **cmdline_p)
711 if ((unsigned long)_stext & 0xffff) 707 if ((unsigned long)_stext & 0xffff)
712 panic("Kernelbase not 64K-aligned (0x%lx)!\n", 708 panic("Kernelbase not 64K-aligned (0x%lx)!\n",
713 (unsigned long)_stext); 709 (unsigned long)_stext);
714
715 ppc64_boot_msg(0x15, "Setup Done");
716}
717
718
719/* ToDo: do something useful if ppc_md is not yet setup. */
720#define PPC64_LINUX_FUNCTION 0x0f000000
721#define PPC64_IPL_MESSAGE 0xc0000000
722#define PPC64_TERM_MESSAGE 0xb0000000
723
724static void ppc64_do_msg(unsigned int src, const char *msg)
725{
726 if (ppc_md.progress) {
727 char buf[128];
728
729 sprintf(buf, "%08X\n", src);
730 ppc_md.progress(buf, 0);
731 snprintf(buf, 128, "%s", msg);
732 ppc_md.progress(buf, 0);
733 }
734}
735
736/* Print a boot progress message. */
737void ppc64_boot_msg(unsigned int src, const char *msg)
738{
739 ppc64_do_msg(PPC64_LINUX_FUNCTION|PPC64_IPL_MESSAGE|src, msg);
740 printk("[boot]%04x %s\n", src, msg);
741} 710}
742 711
743#ifdef CONFIG_SMP 712#ifdef CONFIG_SMP
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 71e186d5f331..8b2d2dc8ef10 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -243,7 +243,7 @@ void smp_muxed_ipi_message_pass(int cpu, int msg)
243 243
244irqreturn_t smp_ipi_demux(void) 244irqreturn_t smp_ipi_demux(void)
245{ 245{
246 struct cpu_messages *info = &__get_cpu_var(ipi_message); 246 struct cpu_messages *info = this_cpu_ptr(&ipi_message);
247 unsigned int all; 247 unsigned int all;
248 248
249 mb(); /* order any irq clear */ 249 mb(); /* order any irq clear */
@@ -442,9 +442,9 @@ void generic_mach_cpu_die(void)
442 idle_task_exit(); 442 idle_task_exit();
443 cpu = smp_processor_id(); 443 cpu = smp_processor_id();
444 printk(KERN_DEBUG "CPU%d offline\n", cpu); 444 printk(KERN_DEBUG "CPU%d offline\n", cpu);
445 __get_cpu_var(cpu_state) = CPU_DEAD; 445 __this_cpu_write(cpu_state, CPU_DEAD);
446 smp_wmb(); 446 smp_wmb();
447 while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) 447 while (__this_cpu_read(cpu_state) != CPU_UP_PREPARE)
448 cpu_relax(); 448 cpu_relax();
449} 449}
450 450
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index 67fd2fd2620a..fa1fd8a0c867 100644
--- a/arch/powerpc/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -394,10 +394,10 @@ void ppc_enable_pmcs(void)
394 ppc_set_pmu_inuse(1); 394 ppc_set_pmu_inuse(1);
395 395
396 /* Only need to enable them once */ 396 /* Only need to enable them once */
397 if (__get_cpu_var(pmcs_enabled)) 397 if (__this_cpu_read(pmcs_enabled))
398 return; 398 return;
399 399
400 __get_cpu_var(pmcs_enabled) = 1; 400 __this_cpu_write(pmcs_enabled, 1);
401 401
402 if (ppc_md.enable_pmcs) 402 if (ppc_md.enable_pmcs)
403 ppc_md.enable_pmcs(); 403 ppc_md.enable_pmcs();
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7505599c2593..fa7c4f12104f 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -458,9 +458,9 @@ static inline void clear_irq_work_pending(void)
458 458
459DEFINE_PER_CPU(u8, irq_work_pending); 459DEFINE_PER_CPU(u8, irq_work_pending);
460 460
461#define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 461#define set_irq_work_pending_flag() __this_cpu_write(irq_work_pending, 1)
462#define test_irq_work_pending() __get_cpu_var(irq_work_pending) 462#define test_irq_work_pending() __this_cpu_read(irq_work_pending)
463#define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 463#define clear_irq_work_pending() __this_cpu_write(irq_work_pending, 0)
464 464
465#endif /* 32 vs 64 bit */ 465#endif /* 32 vs 64 bit */
466 466
@@ -482,8 +482,8 @@ void arch_irq_work_raise(void)
482static void __timer_interrupt(void) 482static void __timer_interrupt(void)
483{ 483{
484 struct pt_regs *regs = get_irq_regs(); 484 struct pt_regs *regs = get_irq_regs();
485 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 485 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
486 struct clock_event_device *evt = &__get_cpu_var(decrementers); 486 struct clock_event_device *evt = this_cpu_ptr(&decrementers);
487 u64 now; 487 u64 now;
488 488
489 trace_timer_interrupt_entry(regs); 489 trace_timer_interrupt_entry(regs);
@@ -498,7 +498,7 @@ static void __timer_interrupt(void)
498 *next_tb = ~(u64)0; 498 *next_tb = ~(u64)0;
499 if (evt->event_handler) 499 if (evt->event_handler)
500 evt->event_handler(evt); 500 evt->event_handler(evt);
501 __get_cpu_var(irq_stat).timer_irqs_event++; 501 __this_cpu_inc(irq_stat.timer_irqs_event);
502 } else { 502 } else {
503 now = *next_tb - now; 503 now = *next_tb - now;
504 if (now <= DECREMENTER_MAX) 504 if (now <= DECREMENTER_MAX)
@@ -506,13 +506,13 @@ static void __timer_interrupt(void)
506 /* We may have raced with new irq work */ 506 /* We may have raced with new irq work */
507 if (test_irq_work_pending()) 507 if (test_irq_work_pending())
508 set_dec(1); 508 set_dec(1);
509 __get_cpu_var(irq_stat).timer_irqs_others++; 509 __this_cpu_inc(irq_stat.timer_irqs_others);
510 } 510 }
511 511
512#ifdef CONFIG_PPC64 512#ifdef CONFIG_PPC64
513 /* collect purr register values often, for accurate calculations */ 513 /* collect purr register values often, for accurate calculations */
514 if (firmware_has_feature(FW_FEATURE_SPLPAR)) { 514 if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
515 struct cpu_usage *cu = &__get_cpu_var(cpu_usage_array); 515 struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
516 cu->current_tb = mfspr(SPRN_PURR); 516 cu->current_tb = mfspr(SPRN_PURR);
517 } 517 }
518#endif 518#endif
@@ -527,7 +527,7 @@ static void __timer_interrupt(void)
527void timer_interrupt(struct pt_regs * regs) 527void timer_interrupt(struct pt_regs * regs)
528{ 528{
529 struct pt_regs *old_regs; 529 struct pt_regs *old_regs;
530 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 530 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
531 531
532 /* Ensure a positive value is written to the decrementer, or else 532 /* Ensure a positive value is written to the decrementer, or else
533 * some CPUs will continue to take decrementer exceptions. 533 * some CPUs will continue to take decrementer exceptions.
@@ -813,7 +813,7 @@ static void __init clocksource_init(void)
813static int decrementer_set_next_event(unsigned long evt, 813static int decrementer_set_next_event(unsigned long evt,
814 struct clock_event_device *dev) 814 struct clock_event_device *dev)
815{ 815{
816 __get_cpu_var(decrementers_next_tb) = get_tb_or_rtc() + evt; 816 __this_cpu_write(decrementers_next_tb, get_tb_or_rtc() + evt);
817 set_dec(evt); 817 set_dec(evt);
818 818
819 /* We may have raced with new irq work */ 819 /* We may have raced with new irq work */
@@ -833,7 +833,7 @@ static void decrementer_set_mode(enum clock_event_mode mode,
833/* Interrupt handler for the timer broadcast IPI */ 833/* Interrupt handler for the timer broadcast IPI */
834void tick_broadcast_ipi_handler(void) 834void tick_broadcast_ipi_handler(void)
835{ 835{
836 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 836 u64 *next_tb = this_cpu_ptr(&decrementers_next_tb);
837 837
838 *next_tb = get_tb_or_rtc(); 838 *next_tb = get_tb_or_rtc();
839 __timer_interrupt(); 839 __timer_interrupt();
@@ -989,6 +989,7 @@ void GregorianDay(struct rtc_time * tm)
989 989
990 tm->tm_wday = day % 7; 990 tm->tm_wday = day % 7;
991} 991}
992EXPORT_SYMBOL_GPL(GregorianDay);
992 993
993void to_tm(int tim, struct rtc_time * tm) 994void to_tm(int tim, struct rtc_time * tm)
994{ 995{
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0dc43f9932cf..e6595b72269b 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -295,7 +295,7 @@ long machine_check_early(struct pt_regs *regs)
295{ 295{
296 long handled = 0; 296 long handled = 0;
297 297
298 __get_cpu_var(irq_stat).mce_exceptions++; 298 __this_cpu_inc(irq_stat.mce_exceptions);
299 299
300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
301 handled = cur_cpu_spec->machine_check_early(regs); 301 handled = cur_cpu_spec->machine_check_early(regs);
@@ -304,7 +304,7 @@ long machine_check_early(struct pt_regs *regs)
304 304
305long hmi_exception_realmode(struct pt_regs *regs) 305long hmi_exception_realmode(struct pt_regs *regs)
306{ 306{
307 __get_cpu_var(irq_stat).hmi_exceptions++; 307 __this_cpu_inc(irq_stat.hmi_exceptions);
308 308
309 if (ppc_md.hmi_exception_early) 309 if (ppc_md.hmi_exception_early)
310 ppc_md.hmi_exception_early(regs); 310 ppc_md.hmi_exception_early(regs);
@@ -700,7 +700,7 @@ void machine_check_exception(struct pt_regs *regs)
700 enum ctx_state prev_state = exception_enter(); 700 enum ctx_state prev_state = exception_enter();
701 int recover = 0; 701 int recover = 0;
702 702
703 __get_cpu_var(irq_stat).mce_exceptions++; 703 __this_cpu_inc(irq_stat.mce_exceptions);
704 704
705 /* See if any machine dependent calls. In theory, we would want 705 /* See if any machine dependent calls. In theory, we would want
706 * to call the CPU first, and call the ppc_md. one if the CPU 706 * to call the CPU first, and call the ppc_md. one if the CPU
@@ -1519,7 +1519,7 @@ void vsx_unavailable_tm(struct pt_regs *regs)
1519 1519
1520void performance_monitor_exception(struct pt_regs *regs) 1520void performance_monitor_exception(struct pt_regs *regs)
1521{ 1521{
1522 __get_cpu_var(irq_stat).pmu_irqs++; 1522 __this_cpu_inc(irq_stat.pmu_irqs);
1523 1523
1524 perf_irq(regs); 1524 perf_irq(regs);
1525} 1525}
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 6e7c4923b5ea..411116c38da4 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -69,8 +69,12 @@ static void udbg_uart_putc(char c)
69 69
70static int udbg_uart_getc_poll(void) 70static int udbg_uart_getc_poll(void)
71{ 71{
72 if (!udbg_uart_in || !(udbg_uart_in(UART_LSR) & LSR_DR)) 72 if (!udbg_uart_in)
73 return -1;
74
75 if (!(udbg_uart_in(UART_LSR) & LSR_DR))
73 return udbg_uart_in(UART_RBR); 76 return udbg_uart_in(UART_RBR);
77
74 return -1; 78 return -1;
75} 79}
76 80
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
index f174351842cf..305eb0d9b768 100644
--- a/arch/powerpc/kernel/vdso.c
+++ b/arch/powerpc/kernel/vdso.c
@@ -20,7 +20,6 @@
20#include <linux/user.h> 20#include <linux/user.h>
21#include <linux/elf.h> 21#include <linux/elf.h>
22#include <linux/security.h> 22#include <linux/security.h>
23#include <linux/bootmem.h>
24#include <linux/memblock.h> 23#include <linux/memblock.h>
25 24
26#include <asm/pgtable.h> 25#include <asm/pgtable.h>
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index 4fdc27c80f4c..3f1bb5a36c27 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -12,7 +12,6 @@
12#include <linux/export.h> 12#include <linux/export.h>
13#include <linux/sched.h> 13#include <linux/sched.h>
14#include <linux/spinlock.h> 14#include <linux/spinlock.h>
15#include <linux/bootmem.h>
16#include <linux/init.h> 15#include <linux/init.h>
17#include <linux/memblock.h> 16#include <linux/memblock.h>
18#include <linux/sizes.h> 17#include <linux/sizes.h>
@@ -154,7 +153,7 @@ EXPORT_SYMBOL_GPL(kvm_release_hpt);
154 * kvm_cma_reserve() - reserve area for kvm hash pagetable 153 * kvm_cma_reserve() - reserve area for kvm hash pagetable
155 * 154 *
156 * This function reserves memory from early allocator. It should be 155 * This function reserves memory from early allocator. It should be
157 * called by arch specific code once the early allocator (memblock or bootmem) 156 * called by arch specific code once the memblock allocator
158 * has been activated and all other subsystems have already allocated/reserved 157 * has been activated and all other subsystems have already allocated/reserved
159 * memory. 158 * memory.
160 */ 159 */
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index edb2ccdbb2ba..65c105b17a25 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -201,8 +201,6 @@ kvmppc_primary_no_guest:
201 bge kvm_novcpu_exit /* another thread already exiting */ 201 bge kvm_novcpu_exit /* another thread already exiting */
202 li r3, NAPPING_NOVCPU 202 li r3, NAPPING_NOVCPU
203 stb r3, HSTATE_NAPPING(r13) 203 stb r3, HSTATE_NAPPING(r13)
204 li r3, 1
205 stb r3, HSTATE_HWTHREAD_REQ(r13)
206 204
207 b kvm_do_nap 205 b kvm_do_nap
208 206
@@ -293,6 +291,8 @@ kvm_start_guest:
293 /* if we have no vcpu to run, go back to sleep */ 291 /* if we have no vcpu to run, go back to sleep */
294 beq kvm_no_guest 292 beq kvm_no_guest
295 293
294kvm_secondary_got_guest:
295
296 /* Set HSTATE_DSCR(r13) to something sensible */ 296 /* Set HSTATE_DSCR(r13) to something sensible */
297 ld r6, PACA_DSCR(r13) 297 ld r6, PACA_DSCR(r13)
298 std r6, HSTATE_DSCR(r13) 298 std r6, HSTATE_DSCR(r13)
@@ -318,27 +318,46 @@ kvm_start_guest:
318 stwcx. r3, 0, r4 318 stwcx. r3, 0, r4
319 bne 51b 319 bne 51b
320 320
321/*
322 * At this point we have finished executing in the guest.
323 * We need to wait for hwthread_req to become zero, since
324 * we may not turn on the MMU while hwthread_req is non-zero.
325 * While waiting we also need to check if we get given a vcpu to run.
326 */
321kvm_no_guest: 327kvm_no_guest:
322 li r0, KVM_HWTHREAD_IN_NAP 328 lbz r3, HSTATE_HWTHREAD_REQ(r13)
329 cmpwi r3, 0
330 bne 53f
331 HMT_MEDIUM
332 li r0, KVM_HWTHREAD_IN_KERNEL
323 stb r0, HSTATE_HWTHREAD_STATE(r13) 333 stb r0, HSTATE_HWTHREAD_STATE(r13)
324kvm_do_nap: 334 /* need to recheck hwthread_req after a barrier, to avoid race */
325 /* Clear the runlatch bit before napping */ 335 sync
326 mfspr r2, SPRN_CTRLF 336 lbz r3, HSTATE_HWTHREAD_REQ(r13)
327 clrrdi r2, r2, 1 337 cmpwi r3, 0
328 mtspr SPRN_CTRLT, r2 338 bne 54f
329 339/*
340 * We jump to power7_wakeup_loss, which will return to the caller
341 * of power7_nap in the powernv cpu offline loop. The value we
342 * put in r3 becomes the return value for power7_nap.
343 */
330 li r3, LPCR_PECE0 344 li r3, LPCR_PECE0
331 mfspr r4, SPRN_LPCR 345 mfspr r4, SPRN_LPCR
332 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 346 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
333 mtspr SPRN_LPCR, r4 347 mtspr SPRN_LPCR, r4
334 isync 348 li r3, 0
335 std r0, HSTATE_SCRATCH0(r13) 349 b power7_wakeup_loss
336 ptesync 350
337 ld r0, HSTATE_SCRATCH0(r13) 35153: HMT_LOW
3381: cmpd r0, r0 352 ld r4, HSTATE_KVM_VCPU(r13)
339 bne 1b 353 cmpdi r4, 0
340 nap 354 beq kvm_no_guest
341 b . 355 HMT_MEDIUM
356 b kvm_secondary_got_guest
357
35854: li r0, KVM_HWTHREAD_IN_KVM
359 stb r0, HSTATE_HWTHREAD_STATE(r13)
360 b kvm_no_guest
342 361
343/****************************************************************************** 362/******************************************************************************
344 * * 363 * *
@@ -2172,6 +2191,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
2172 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the 2191 * occurs, with PECE1, PECE0 and PECEDP set in LPCR. Also clear the
2173 * runlatch bit before napping. 2192 * runlatch bit before napping.
2174 */ 2193 */
2194kvm_do_nap:
2175 mfspr r2, SPRN_CTRLF 2195 mfspr r2, SPRN_CTRLF
2176 clrrdi r2, r2, 1 2196 clrrdi r2, r2, 1
2177 mtspr SPRN_CTRLT, r2 2197 mtspr SPRN_CTRLT, r2
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 2e02ed849f36..16095841afe1 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -76,11 +76,11 @@ static inline int local_sid_setup_one(struct id *entry)
76 unsigned long sid; 76 unsigned long sid;
77 int ret = -1; 77 int ret = -1;
78 78
79 sid = ++(__get_cpu_var(pcpu_last_used_sid)); 79 sid = __this_cpu_inc_return(pcpu_last_used_sid);
80 if (sid < NUM_TIDS) { 80 if (sid < NUM_TIDS) {
81 __get_cpu_var(pcpu_sids).entry[sid] = entry; 81 __this_cpu_write(pcpu_sids)entry[sid], entry);
82 entry->val = sid; 82 entry->val = sid;
83 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid]; 83 entry->pentry = this_cpu_ptr(&pcpu_sids.entry[sid]);
84 ret = sid; 84 ret = sid;
85 } 85 }
86 86
@@ -108,8 +108,8 @@ static inline int local_sid_setup_one(struct id *entry)
108static inline int local_sid_lookup(struct id *entry) 108static inline int local_sid_lookup(struct id *entry)
109{ 109{
110 if (entry && entry->val != 0 && 110 if (entry && entry->val != 0 &&
111 __get_cpu_var(pcpu_sids).entry[entry->val] == entry && 111 __this_cpu_read(pcpu_sids.entry[entry->val]) == entry &&
112 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val]) 112 entry->pentry == this_cpu_ptr(&pcpu_sids.entry[entry->val]))
113 return entry->val; 113 return entry->val;
114 return -1; 114 return -1;
115} 115}
@@ -117,8 +117,8 @@ static inline int local_sid_lookup(struct id *entry)
117/* Invalidate all id mappings on local core -- call with preempt disabled */ 117/* Invalidate all id mappings on local core -- call with preempt disabled */
118static inline void local_sid_destroy_all(void) 118static inline void local_sid_destroy_all(void)
119{ 119{
120 __get_cpu_var(pcpu_last_used_sid) = 0; 120 __this_cpu_write(pcpu_last_used_sid, 0);
121 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids))); 121 memset(this_cpu_ptr(&pcpu_sids), 0, sizeof(pcpu_sids));
122} 122}
123 123
124static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500) 124static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
diff --git a/arch/powerpc/kvm/e500_mmu_host.c b/arch/powerpc/kvm/e500_mmu_host.c
index 769778f855b0..cc536d4a75ef 100644
--- a/arch/powerpc/kvm/e500_mmu_host.c
+++ b/arch/powerpc/kvm/e500_mmu_host.c
@@ -661,7 +661,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
661 if (unlikely((pr && !(mas3 & MAS3_UX)) || 661 if (unlikely((pr && !(mas3 & MAS3_UX)) ||
662 (!pr && !(mas3 & MAS3_SX)))) { 662 (!pr && !(mas3 & MAS3_SX)))) {
663 pr_err_ratelimited( 663 pr_err_ratelimited(
664 "%s: Instuction emulation from guest addres %08lx without execute permission\n", 664 "%s: Instruction emulation from guest address %08lx without execute permission\n",
665 __func__, geaddr); 665 __func__, geaddr);
666 return EMULATE_AGAIN; 666 return EMULATE_AGAIN;
667 } 667 }
@@ -673,7 +673,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
673 if (has_feature(vcpu, VCPU_FTR_MMU_V2) && 673 if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
674 unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) { 674 unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
675 pr_err_ratelimited( 675 pr_err_ratelimited(
676 "%s: Instuction emulation from guest addres %08lx mismatches storage attributes\n", 676 "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
677 __func__, geaddr); 677 __func__, geaddr);
678 return EMULATE_AGAIN; 678 return EMULATE_AGAIN;
679 } 679 }
@@ -686,7 +686,7 @@ int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
686 686
687 /* Guard against emulation from devices area */ 687 /* Guard against emulation from devices area */
688 if (unlikely(!page_is_ram(pfn))) { 688 if (unlikely(!page_is_ram(pfn))) {
689 pr_err_ratelimited("%s: Instruction emulation from non-RAM host addres %08llx is not supported\n", 689 pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
690 __func__, addr); 690 __func__, addr);
691 return EMULATE_AGAIN; 691 return EMULATE_AGAIN;
692 } 692 }
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
index 2fdc8722e324..cda695de8aa7 100644
--- a/arch/powerpc/kvm/e500mc.c
+++ b/arch/powerpc/kvm/e500mc.c
@@ -144,9 +144,9 @@ static void kvmppc_core_vcpu_load_e500mc(struct kvm_vcpu *vcpu, int cpu)
144 mtspr(SPRN_GESR, vcpu->arch.shared->esr); 144 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
145 145
146 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) || 146 if (vcpu->arch.oldpir != mfspr(SPRN_PIR) ||
147 __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] != vcpu) { 147 __this_cpu_read(last_vcpu_of_lpid[get_lpid(vcpu)]) != vcpu) {
148 kvmppc_e500_tlbil_all(vcpu_e500); 148 kvmppc_e500_tlbil_all(vcpu_e500);
149 __get_cpu_var(last_vcpu_of_lpid)[get_lpid(vcpu)] = vcpu; 149 __this_cpu_write(last_vcpu_of_lpid[get_lpid(vcpu)], vcpu);
150 } 150 }
151} 151}
152 152
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile
index 9f342f134ae4..597562f69b2d 100644
--- a/arch/powerpc/lib/Makefile
+++ b/arch/powerpc/lib/Makefile
@@ -12,7 +12,6 @@ CFLAGS_REMOVE_feature-fixups.o = -pg
12obj-y := string.o alloc.o \ 12obj-y := string.o alloc.o \
13 crtsavres.o ppc_ksyms.o 13 crtsavres.o ppc_ksyms.o
14obj-$(CONFIG_PPC32) += div64.o copy_32.o 14obj-$(CONFIG_PPC32) += div64.o copy_32.o
15obj-$(CONFIG_HAS_IOMEM) += devres.o
16 15
17obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ 16obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \
18 usercopy_64.o mem_64.o string.o \ 17 usercopy_64.o mem_64.o string.o \
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c
index da22c84a8fed..4a6c2cf890d9 100644
--- a/arch/powerpc/lib/alloc.c
+++ b/arch/powerpc/lib/alloc.c
@@ -13,9 +13,7 @@ void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask)
13 if (mem_init_done) 13 if (mem_init_done)
14 p = kzalloc(size, mask); 14 p = kzalloc(size, mask);
15 else { 15 else {
16 p = alloc_bootmem(size); 16 p = memblock_virt_alloc(size, 0);
17 if (p)
18 memset(p, 0, size);
19 } 17 }
20 return p; 18 return p;
21} 19}
diff --git a/arch/powerpc/lib/copyuser_power7.S b/arch/powerpc/lib/copyuser_power7.S
index c46c876ac96a..92ee840529bc 100644
--- a/arch/powerpc/lib/copyuser_power7.S
+++ b/arch/powerpc/lib/copyuser_power7.S
@@ -718,4 +718,4 @@ err3; stb r0,0(r3)
718 718
71915: addi r1,r1,STACKFRAMESIZE 71915: addi r1,r1,STACKFRAMESIZE
720 b exit_vmx_usercopy /* tail call optimise */ 720 b exit_vmx_usercopy /* tail call optimise */
721#endif /* CONFiG_ALTIVEC */ 721#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/devres.c b/arch/powerpc/lib/devres.c
deleted file mode 100644
index 8df55fc3aad6..000000000000
--- a/arch/powerpc/lib/devres.c
+++ /dev/null
@@ -1,43 +0,0 @@
1/*
2 * Copyright (C) 2008 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/device.h> /* devres_*(), devm_ioremap_release() */
11#include <linux/gfp.h>
12#include <linux/io.h> /* ioremap_prot() */
13#include <linux/export.h> /* EXPORT_SYMBOL() */
14
15/**
16 * devm_ioremap_prot - Managed ioremap_prot()
17 * @dev: Generic device to remap IO address for
18 * @offset: BUS offset to map
19 * @size: Size of map
20 * @flags: Page flags
21 *
22 * Managed ioremap_prot(). Map is automatically unmapped on driver
23 * detach.
24 */
25void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset,
26 size_t size, unsigned long flags)
27{
28 void __iomem **ptr, *addr;
29
30 ptr = devres_alloc(devm_ioremap_release, sizeof(*ptr), GFP_KERNEL);
31 if (!ptr)
32 return NULL;
33
34 addr = ioremap_prot(offset, size, flags);
35 if (addr) {
36 *ptr = addr;
37 devres_add(dev, ptr);
38 } else
39 devres_free(ptr);
40
41 return addr;
42}
43EXPORT_SYMBOL(devm_ioremap_prot);
diff --git a/arch/powerpc/lib/memcpy_power7.S b/arch/powerpc/lib/memcpy_power7.S
index 2ff5c142f87b..0830587df16e 100644
--- a/arch/powerpc/lib/memcpy_power7.S
+++ b/arch/powerpc/lib/memcpy_power7.S
@@ -653,4 +653,4 @@ _GLOBAL(memcpy_power7)
65315: addi r1,r1,STACKFRAMESIZE 65315: addi r1,r1,STACKFRAMESIZE
654 ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1) 654 ld r3,-STACKFRAMESIZE+STK_REG(R31)(r1)
655 b exit_vmx_copy /* tail call optimise */ 655 b exit_vmx_copy /* tail call optimise */
656#endif /* CONFiG_ALTIVEC */ 656#endif /* CONFIG_ALTIVEC */
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index 54651fc2d412..dc885b30f7a6 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1865,6 +1865,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1865 } 1865 }
1866 goto ldst_done; 1866 goto ldst_done;
1867 1867
1868#ifdef CONFIG_PPC_FPU
1868 case LOAD_FP: 1869 case LOAD_FP:
1869 if (regs->msr & MSR_LE) 1870 if (regs->msr & MSR_LE)
1870 return 0; 1871 return 0;
@@ -1873,7 +1874,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1873 else 1874 else
1874 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs); 1875 err = do_fp_load(op.reg, do_lfd, op.ea, size, regs);
1875 goto ldst_done; 1876 goto ldst_done;
1876 1877#endif
1877#ifdef CONFIG_ALTIVEC 1878#ifdef CONFIG_ALTIVEC
1878 case LOAD_VMX: 1879 case LOAD_VMX:
1879 if (regs->msr & MSR_LE) 1880 if (regs->msr & MSR_LE)
@@ -1919,6 +1920,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1919 err = write_mem(op.val, op.ea, size, regs); 1920 err = write_mem(op.val, op.ea, size, regs);
1920 goto ldst_done; 1921 goto ldst_done;
1921 1922
1923#ifdef CONFIG_PPC_FPU
1922 case STORE_FP: 1924 case STORE_FP:
1923 if (regs->msr & MSR_LE) 1925 if (regs->msr & MSR_LE)
1924 return 0; 1926 return 0;
@@ -1927,7 +1929,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1927 else 1929 else
1928 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs); 1930 err = do_fp_store(op.reg, do_stfd, op.ea, size, regs);
1929 goto ldst_done; 1931 goto ldst_done;
1930 1932#endif
1931#ifdef CONFIG_ALTIVEC 1933#ifdef CONFIG_ALTIVEC
1932 case STORE_VMX: 1934 case STORE_VMX:
1933 if (regs->msr & MSR_LE) 1935 if (regs->msr & MSR_LE)
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile
index 325e861616a1..438dcd3fd0d1 100644
--- a/arch/powerpc/mm/Makefile
+++ b/arch/powerpc/mm/Makefile
@@ -6,7 +6,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror
6 6
7ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC) 7ccflags-$(CONFIG_PPC64) := $(NO_MINIMAL_TOC)
8 8
9obj-y := fault.o mem.o pgtable.o gup.o mmap.o \ 9obj-y := fault.o mem.o pgtable.o mmap.o \
10 init_$(CONFIG_WORD_SIZE).o \ 10 init_$(CONFIG_WORD_SIZE).o \
11 pgtable_$(CONFIG_WORD_SIZE).o 11 pgtable_$(CONFIG_WORD_SIZE).o
12obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ 12obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
index 08d659a9fcdb..eb79907f34fa 100644
--- a/arch/powerpc/mm/fault.c
+++ b/arch/powerpc/mm/fault.c
@@ -43,7 +43,6 @@
43#include <asm/tlbflush.h> 43#include <asm/tlbflush.h>
44#include <asm/siginfo.h> 44#include <asm/siginfo.h>
45#include <asm/debug.h> 45#include <asm/debug.h>
46#include <mm/mmu_decl.h>
47 46
48#include "icswx.h" 47#include "icswx.h"
49 48
@@ -380,12 +379,6 @@ good_area:
380 goto bad_area; 379 goto bad_area;
381#endif /* CONFIG_6xx */ 380#endif /* CONFIG_6xx */
382#if defined(CONFIG_8xx) 381#if defined(CONFIG_8xx)
383 /* 8xx sometimes need to load a invalid/non-present TLBs.
384 * These must be invalidated separately as linux mm don't.
385 */
386 if (error_code & 0x40000000) /* no translation? */
387 _tlbil_va(address, 0, 0, 0);
388
389 /* The MPC8xx seems to always set 0x80000000, which is 382 /* The MPC8xx seems to always set 0x80000000, which is
390 * "undefined". Of those that can be set, this is the only 383 * "undefined". Of those that can be set, this is the only
391 * one which seems bad. 384 * one which seems bad.
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c
deleted file mode 100644
index d8746684f606..000000000000
--- a/arch/powerpc/mm/gup.c
+++ /dev/null
@@ -1,235 +0,0 @@
1/*
2 * Lockless get_user_pages_fast for powerpc
3 *
4 * Copyright (C) 2008 Nick Piggin
5 * Copyright (C) 2008 Novell Inc.
6 */
7#undef DEBUG
8
9#include <linux/sched.h>
10#include <linux/mm.h>
11#include <linux/hugetlb.h>
12#include <linux/vmstat.h>
13#include <linux/pagemap.h>
14#include <linux/rwsem.h>
15#include <asm/pgtable.h>
16
17#ifdef __HAVE_ARCH_PTE_SPECIAL
18
19/*
20 * The performance critical leaf functions are made noinline otherwise gcc
21 * inlines everything into a single function which results in too much
22 * register pressure.
23 */
24static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
25 unsigned long end, int write, struct page **pages, int *nr)
26{
27 unsigned long mask, result;
28 pte_t *ptep;
29
30 result = _PAGE_PRESENT|_PAGE_USER;
31 if (write)
32 result |= _PAGE_RW;
33 mask = result | _PAGE_SPECIAL;
34
35 ptep = pte_offset_kernel(&pmd, addr);
36 do {
37 pte_t pte = ACCESS_ONCE(*ptep);
38 struct page *page;
39 /*
40 * Similar to the PMD case, NUMA hinting must take slow path
41 */
42 if (pte_numa(pte))
43 return 0;
44
45 if ((pte_val(pte) & mask) != result)
46 return 0;
47 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
48 page = pte_page(pte);
49 if (!page_cache_get_speculative(page))
50 return 0;
51 if (unlikely(pte_val(pte) != pte_val(*ptep))) {
52 put_page(page);
53 return 0;
54 }
55 pages[*nr] = page;
56 (*nr)++;
57
58 } while (ptep++, addr += PAGE_SIZE, addr != end);
59
60 return 1;
61}
62
63static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
64 int write, struct page **pages, int *nr)
65{
66 unsigned long next;
67 pmd_t *pmdp;
68
69 pmdp = pmd_offset(&pud, addr);
70 do {
71 pmd_t pmd = ACCESS_ONCE(*pmdp);
72
73 next = pmd_addr_end(addr, end);
74 /*
75 * If we find a splitting transparent hugepage we
76 * return zero. That will result in taking the slow
77 * path which will call wait_split_huge_page()
78 * if the pmd is still in splitting state
79 */
80 if (pmd_none(pmd) || pmd_trans_splitting(pmd))
81 return 0;
82 if (pmd_huge(pmd) || pmd_large(pmd)) {
83 /*
84 * NUMA hinting faults need to be handled in the GUP
85 * slowpath for accounting purposes and so that they
86 * can be serialised against THP migration.
87 */
88 if (pmd_numa(pmd))
89 return 0;
90
91 if (!gup_hugepte((pte_t *)pmdp, PMD_SIZE, addr, next,
92 write, pages, nr))
93 return 0;
94 } else if (is_hugepd(pmdp)) {
95 if (!gup_hugepd((hugepd_t *)pmdp, PMD_SHIFT,
96 addr, next, write, pages, nr))
97 return 0;
98 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
99 return 0;
100 } while (pmdp++, addr = next, addr != end);
101
102 return 1;
103}
104
105static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
106 int write, struct page **pages, int *nr)
107{
108 unsigned long next;
109 pud_t *pudp;
110
111 pudp = pud_offset(&pgd, addr);
112 do {
113 pud_t pud = ACCESS_ONCE(*pudp);
114
115 next = pud_addr_end(addr, end);
116 if (pud_none(pud))
117 return 0;
118 if (pud_huge(pud)) {
119 if (!gup_hugepte((pte_t *)pudp, PUD_SIZE, addr, next,
120 write, pages, nr))
121 return 0;
122 } else if (is_hugepd(pudp)) {
123 if (!gup_hugepd((hugepd_t *)pudp, PUD_SHIFT,
124 addr, next, write, pages, nr))
125 return 0;
126 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
127 return 0;
128 } while (pudp++, addr = next, addr != end);
129
130 return 1;
131}
132
133int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
134 struct page **pages)
135{
136 struct mm_struct *mm = current->mm;
137 unsigned long addr, len, end;
138 unsigned long next;
139 unsigned long flags;
140 pgd_t *pgdp;
141 int nr = 0;
142
143 pr_devel("%s(%lx,%x,%s)\n", __func__, start, nr_pages, write ? "write" : "read");
144
145 start &= PAGE_MASK;
146 addr = start;
147 len = (unsigned long) nr_pages << PAGE_SHIFT;
148 end = start + len;
149
150 if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
151 start, len)))
152 return 0;
153
154 pr_devel(" aligned: %lx .. %lx\n", start, end);
155
156 /*
157 * XXX: batch / limit 'nr', to avoid large irq off latency
158 * needs some instrumenting to determine the common sizes used by
159 * important workloads (eg. DB2), and whether limiting the batch size
160 * will decrease performance.
161 *
162 * It seems like we're in the clear for the moment. Direct-IO is
163 * the main guy that batches up lots of get_user_pages, and even
164 * they are limited to 64-at-a-time which is not so many.
165 */
166 /*
167 * This doesn't prevent pagetable teardown, but does prevent
168 * the pagetables from being freed on powerpc.
169 *
170 * So long as we atomically load page table pointers versus teardown,
171 * we can follow the address down to the the page and take a ref on it.
172 */
173 local_irq_save(flags);
174
175 pgdp = pgd_offset(mm, addr);
176 do {
177 pgd_t pgd = ACCESS_ONCE(*pgdp);
178
179 pr_devel(" %016lx: normal pgd %p\n", addr,
180 (void *)pgd_val(pgd));
181 next = pgd_addr_end(addr, end);
182 if (pgd_none(pgd))
183 break;
184 if (pgd_huge(pgd)) {
185 if (!gup_hugepte((pte_t *)pgdp, PGDIR_SIZE, addr, next,
186 write, pages, &nr))
187 break;
188 } else if (is_hugepd(pgdp)) {
189 if (!gup_hugepd((hugepd_t *)pgdp, PGDIR_SHIFT,
190 addr, next, write, pages, &nr))
191 break;
192 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
193 break;
194 } while (pgdp++, addr = next, addr != end);
195
196 local_irq_restore(flags);
197
198 return nr;
199}
200
201int get_user_pages_fast(unsigned long start, int nr_pages, int write,
202 struct page **pages)
203{
204 struct mm_struct *mm = current->mm;
205 int nr, ret;
206
207 start &= PAGE_MASK;
208 nr = __get_user_pages_fast(start, nr_pages, write, pages);
209 ret = nr;
210
211 if (nr < nr_pages) {
212 pr_devel(" slow path ! nr = %d\n", nr);
213
214 /* Try to get the remaining pages with get_user_pages */
215 start += nr << PAGE_SHIFT;
216 pages += nr;
217
218 down_read(&mm->mmap_sem);
219 ret = get_user_pages(current, mm, start,
220 nr_pages - nr, write, 0, pages, NULL);
221 up_read(&mm->mmap_sem);
222
223 /* Have to be a bit careful with return values */
224 if (nr > 0) {
225 if (ret < 0)
226 ret = nr;
227 else
228 ret += nr;
229 }
230 }
231
232 return ret;
233}
234
235#endif /* __HAVE_ARCH_PTE_SPECIAL */
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S
index 057cbbb4c576..463174a4a647 100644
--- a/arch/powerpc/mm/hash_low_64.S
+++ b/arch/powerpc/mm/hash_low_64.S
@@ -46,7 +46,8 @@
46 46
47/* 47/*
48 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, 48 * _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
49 * pte_t *ptep, unsigned long trap, int local, int ssize) 49 * pte_t *ptep, unsigned long trap, unsigned long flags,
50 * int ssize)
50 * 51 *
51 * Adds a 4K page to the hash table in a segment of 4K pages only 52 * Adds a 4K page to the hash table in a segment of 4K pages only
52 */ 53 */
@@ -298,7 +299,7 @@ htab_modify_pte:
298 li r6,MMU_PAGE_4K /* base page size */ 299 li r6,MMU_PAGE_4K /* base page size */
299 li r7,MMU_PAGE_4K /* actual page size */ 300 li r7,MMU_PAGE_4K /* actual page size */
300 ld r8,STK_PARAM(R9)(r1) /* segment size */ 301 ld r8,STK_PARAM(R9)(r1) /* segment size */
301 ld r9,STK_PARAM(R8)(r1) /* get "local" param */ 302 ld r9,STK_PARAM(R8)(r1) /* get "flags" param */
302.globl htab_call_hpte_updatepp 303.globl htab_call_hpte_updatepp
303htab_call_hpte_updatepp: 304htab_call_hpte_updatepp:
304 bl . /* Patched by htab_finish_init() */ 305 bl . /* Patched by htab_finish_init() */
@@ -338,8 +339,8 @@ htab_pte_insert_failure:
338 *****************************************************************************/ 339 *****************************************************************************/
339 340
340/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid, 341/* _hash_page_4K(unsigned long ea, unsigned long access, unsigned long vsid,
341 * pte_t *ptep, unsigned long trap, int local, int ssize, 342 * pte_t *ptep, unsigned long trap, unsigned local flags,
342 * int subpg_prot) 343 * int ssize, int subpg_prot)
343 */ 344 */
344 345
345/* 346/*
@@ -514,7 +515,7 @@ htab_insert_pte:
514 andis. r0,r31,_PAGE_4K_PFN@h 515 andis. r0,r31,_PAGE_4K_PFN@h
515 srdi r5,r31,PTE_RPN_SHIFT 516 srdi r5,r31,PTE_RPN_SHIFT
516 bne- htab_special_pfn 517 bne- htab_special_pfn
517 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT 518 sldi r5,r5,PAGE_FACTOR
518 add r5,r5,r25 519 add r5,r5,r25
519htab_special_pfn: 520htab_special_pfn:
520 sldi r5,r5,HW_PAGE_SHIFT 521 sldi r5,r5,HW_PAGE_SHIFT
@@ -544,7 +545,7 @@ htab_call_hpte_insert1:
544 andis. r0,r31,_PAGE_4K_PFN@h 545 andis. r0,r31,_PAGE_4K_PFN@h
545 srdi r5,r31,PTE_RPN_SHIFT 546 srdi r5,r31,PTE_RPN_SHIFT
546 bne- 3f 547 bne- 3f
547 sldi r5,r5,PAGE_SHIFT-HW_PAGE_SHIFT 548 sldi r5,r5,PAGE_FACTOR
548 add r5,r5,r25 549 add r5,r5,r25
5493: sldi r5,r5,HW_PAGE_SHIFT 5503: sldi r5,r5,HW_PAGE_SHIFT
550 551
@@ -594,7 +595,7 @@ htab_inval_old_hpte:
594 li r5,0 /* PTE.hidx */ 595 li r5,0 /* PTE.hidx */
595 li r6,MMU_PAGE_64K /* psize */ 596 li r6,MMU_PAGE_64K /* psize */
596 ld r7,STK_PARAM(R9)(r1) /* ssize */ 597 ld r7,STK_PARAM(R9)(r1) /* ssize */
597 ld r8,STK_PARAM(R8)(r1) /* local */ 598 ld r8,STK_PARAM(R8)(r1) /* flags */
598 bl flush_hash_page 599 bl flush_hash_page
599 /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */ 600 /* Clear out _PAGE_HPTE_SUB bits in the new linux PTE */
600 lis r0,_PAGE_HPTE_SUB@h 601 lis r0,_PAGE_HPTE_SUB@h
@@ -666,7 +667,7 @@ htab_modify_pte:
666 li r6,MMU_PAGE_4K /* base page size */ 667 li r6,MMU_PAGE_4K /* base page size */
667 li r7,MMU_PAGE_4K /* actual page size */ 668 li r7,MMU_PAGE_4K /* actual page size */
668 ld r8,STK_PARAM(R9)(r1) /* segment size */ 669 ld r8,STK_PARAM(R9)(r1) /* segment size */
669 ld r9,STK_PARAM(R8)(r1) /* get "local" param */ 670 ld r9,STK_PARAM(R8)(r1) /* get "flags" param */
670.globl htab_call_hpte_updatepp 671.globl htab_call_hpte_updatepp
671htab_call_hpte_updatepp: 672htab_call_hpte_updatepp:
672 bl . /* patched by htab_finish_init() */ 673 bl . /* patched by htab_finish_init() */
@@ -962,7 +963,7 @@ ht64_modify_pte:
962 li r6,MMU_PAGE_64K /* base page size */ 963 li r6,MMU_PAGE_64K /* base page size */
963 li r7,MMU_PAGE_64K /* actual page size */ 964 li r7,MMU_PAGE_64K /* actual page size */
964 ld r8,STK_PARAM(R9)(r1) /* segment size */ 965 ld r8,STK_PARAM(R9)(r1) /* segment size */
965 ld r9,STK_PARAM(R8)(r1) /* get "local" param */ 966 ld r9,STK_PARAM(R8)(r1) /* get "flags" param */
966.globl ht64_call_hpte_updatepp 967.globl ht64_call_hpte_updatepp
967ht64_call_hpte_updatepp: 968ht64_call_hpte_updatepp:
968 bl . /* patched by htab_finish_init() */ 969 bl . /* patched by htab_finish_init() */
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index ae4962a06476..9c4880ddecd6 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -283,19 +283,17 @@ static long native_hpte_remove(unsigned long hpte_group)
283 283
284static long native_hpte_updatepp(unsigned long slot, unsigned long newpp, 284static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
285 unsigned long vpn, int bpsize, 285 unsigned long vpn, int bpsize,
286 int apsize, int ssize, int local) 286 int apsize, int ssize, unsigned long flags)
287{ 287{
288 struct hash_pte *hptep = htab_address + slot; 288 struct hash_pte *hptep = htab_address + slot;
289 unsigned long hpte_v, want_v; 289 unsigned long hpte_v, want_v;
290 int ret = 0; 290 int ret = 0, local = 0;
291 291
292 want_v = hpte_encode_avpn(vpn, bpsize, ssize); 292 want_v = hpte_encode_avpn(vpn, bpsize, ssize);
293 293
294 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)", 294 DBG_LOW(" update(vpn=%016lx, avpnv=%016lx, group=%lx, newpp=%lx)",
295 vpn, want_v & HPTE_V_AVPN, slot, newpp); 295 vpn, want_v & HPTE_V_AVPN, slot, newpp);
296 296
297 native_lock_hpte(hptep);
298
299 hpte_v = be64_to_cpu(hptep->v); 297 hpte_v = be64_to_cpu(hptep->v);
300 /* 298 /*
301 * We need to invalidate the TLB always because hpte_remove doesn't do 299 * We need to invalidate the TLB always because hpte_remove doesn't do
@@ -308,15 +306,30 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
308 DBG_LOW(" -> miss\n"); 306 DBG_LOW(" -> miss\n");
309 ret = -1; 307 ret = -1;
310 } else { 308 } else {
311 DBG_LOW(" -> hit\n"); 309 native_lock_hpte(hptep);
312 /* Update the HPTE */ 310 /* recheck with locks held */
313 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) & ~(HPTE_R_PP | HPTE_R_N)) | 311 hpte_v = be64_to_cpu(hptep->v);
314 (newpp & (HPTE_R_PP | HPTE_R_N | HPTE_R_C))); 312 if (unlikely(!HPTE_V_COMPARE(hpte_v, want_v) ||
313 !(hpte_v & HPTE_V_VALID))) {
314 ret = -1;
315 } else {
316 DBG_LOW(" -> hit\n");
317 /* Update the HPTE */
318 hptep->r = cpu_to_be64((be64_to_cpu(hptep->r) &
319 ~(HPTE_R_PP | HPTE_R_N)) |
320 (newpp & (HPTE_R_PP | HPTE_R_N |
321 HPTE_R_C)));
322 }
323 native_unlock_hpte(hptep);
315 } 324 }
316 native_unlock_hpte(hptep);
317 325
318 /* Ensure it is out of the tlb too. */ 326 if (flags & HPTE_LOCAL_UPDATE)
319 tlbie(vpn, bpsize, apsize, ssize, local); 327 local = 1;
328 /*
329 * Ensure it is out of the tlb too if it is not a nohpte fault
330 */
331 if (!(flags & HPTE_NOHPTE_UPDATE))
332 tlbie(vpn, bpsize, apsize, ssize, local);
320 333
321 return ret; 334 return ret;
322} 335}
@@ -419,7 +432,7 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
419static void native_hugepage_invalidate(unsigned long vsid, 432static void native_hugepage_invalidate(unsigned long vsid,
420 unsigned long addr, 433 unsigned long addr,
421 unsigned char *hpte_slot_array, 434 unsigned char *hpte_slot_array,
422 int psize, int ssize) 435 int psize, int ssize, int local)
423{ 436{
424 int i; 437 int i;
425 struct hash_pte *hptep; 438 struct hash_pte *hptep;
@@ -465,7 +478,7 @@ static void native_hugepage_invalidate(unsigned long vsid,
465 * instruction compares entry_VA in tlb with the VA specified 478 * instruction compares entry_VA in tlb with the VA specified
466 * here 479 * here
467 */ 480 */
468 tlbie(vpn, psize, actual_psize, ssize, 0); 481 tlbie(vpn, psize, actual_psize, ssize, local);
469 } 482 }
470 local_irq_restore(flags); 483 local_irq_restore(flags);
471} 484}
@@ -629,7 +642,7 @@ static void native_flush_hash_range(unsigned long number, int local)
629 unsigned long want_v; 642 unsigned long want_v;
630 unsigned long flags; 643 unsigned long flags;
631 real_pte_t pte; 644 real_pte_t pte;
632 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 645 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
633 unsigned long psize = batch->psize; 646 unsigned long psize = batch->psize;
634 int ssize = batch->ssize; 647 int ssize = batch->ssize;
635 int i; 648 int i;
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d5339a3b9945..e56a307bc676 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -989,7 +989,9 @@ static void check_paca_psize(unsigned long ea, struct mm_struct *mm,
989 * -1 - critical hash insertion error 989 * -1 - critical hash insertion error
990 * -2 - access not permitted by subpage protection mechanism 990 * -2 - access not permitted by subpage protection mechanism
991 */ 991 */
992int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, unsigned long trap) 992int hash_page_mm(struct mm_struct *mm, unsigned long ea,
993 unsigned long access, unsigned long trap,
994 unsigned long flags)
993{ 995{
994 enum ctx_state prev_state = exception_enter(); 996 enum ctx_state prev_state = exception_enter();
995 pgd_t *pgdir; 997 pgd_t *pgdir;
@@ -997,7 +999,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
997 pte_t *ptep; 999 pte_t *ptep;
998 unsigned hugeshift; 1000 unsigned hugeshift;
999 const struct cpumask *tmp; 1001 const struct cpumask *tmp;
1000 int rc, user_region = 0, local = 0; 1002 int rc, user_region = 0;
1001 int psize, ssize; 1003 int psize, ssize;
1002 1004
1003 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", 1005 DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
@@ -1049,7 +1051,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
1049 /* Check CPU locality */ 1051 /* Check CPU locality */
1050 tmp = cpumask_of(smp_processor_id()); 1052 tmp = cpumask_of(smp_processor_id());
1051 if (user_region && cpumask_equal(mm_cpumask(mm), tmp)) 1053 if (user_region && cpumask_equal(mm_cpumask(mm), tmp))
1052 local = 1; 1054 flags |= HPTE_LOCAL_UPDATE;
1053 1055
1054#ifndef CONFIG_PPC_64K_PAGES 1056#ifndef CONFIG_PPC_64K_PAGES
1055 /* If we use 4K pages and our psize is not 4K, then we might 1057 /* If we use 4K pages and our psize is not 4K, then we might
@@ -1086,11 +1088,11 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
1086 if (hugeshift) { 1088 if (hugeshift) {
1087 if (pmd_trans_huge(*(pmd_t *)ptep)) 1089 if (pmd_trans_huge(*(pmd_t *)ptep))
1088 rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep, 1090 rc = __hash_page_thp(ea, access, vsid, (pmd_t *)ptep,
1089 trap, local, ssize, psize); 1091 trap, flags, ssize, psize);
1090#ifdef CONFIG_HUGETLB_PAGE 1092#ifdef CONFIG_HUGETLB_PAGE
1091 else 1093 else
1092 rc = __hash_page_huge(ea, access, vsid, ptep, trap, 1094 rc = __hash_page_huge(ea, access, vsid, ptep, trap,
1093 local, ssize, hugeshift, psize); 1095 flags, ssize, hugeshift, psize);
1094#else 1096#else
1095 else { 1097 else {
1096 /* 1098 /*
@@ -1149,7 +1151,8 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
1149 1151
1150#ifdef CONFIG_PPC_HAS_HASH_64K 1152#ifdef CONFIG_PPC_HAS_HASH_64K
1151 if (psize == MMU_PAGE_64K) 1153 if (psize == MMU_PAGE_64K)
1152 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1154 rc = __hash_page_64K(ea, access, vsid, ptep, trap,
1155 flags, ssize);
1153 else 1156 else
1154#endif /* CONFIG_PPC_HAS_HASH_64K */ 1157#endif /* CONFIG_PPC_HAS_HASH_64K */
1155 { 1158 {
@@ -1158,7 +1161,7 @@ int hash_page_mm(struct mm_struct *mm, unsigned long ea, unsigned long access, u
1158 rc = -2; 1161 rc = -2;
1159 else 1162 else
1160 rc = __hash_page_4K(ea, access, vsid, ptep, trap, 1163 rc = __hash_page_4K(ea, access, vsid, ptep, trap,
1161 local, ssize, spp); 1164 flags, ssize, spp);
1162 } 1165 }
1163 1166
1164 /* Dump some info in case of hash insertion failure, they should 1167 /* Dump some info in case of hash insertion failure, they should
@@ -1181,14 +1184,19 @@ bail:
1181} 1184}
1182EXPORT_SYMBOL_GPL(hash_page_mm); 1185EXPORT_SYMBOL_GPL(hash_page_mm);
1183 1186
1184int hash_page(unsigned long ea, unsigned long access, unsigned long trap) 1187int hash_page(unsigned long ea, unsigned long access, unsigned long trap,
1188 unsigned long dsisr)
1185{ 1189{
1190 unsigned long flags = 0;
1186 struct mm_struct *mm = current->mm; 1191 struct mm_struct *mm = current->mm;
1187 1192
1188 if (REGION_ID(ea) == VMALLOC_REGION_ID) 1193 if (REGION_ID(ea) == VMALLOC_REGION_ID)
1189 mm = &init_mm; 1194 mm = &init_mm;
1190 1195
1191 return hash_page_mm(mm, ea, access, trap); 1196 if (dsisr & DSISR_NOHPTE)
1197 flags |= HPTE_NOHPTE_UPDATE;
1198
1199 return hash_page_mm(mm, ea, access, trap, flags);
1192} 1200}
1193EXPORT_SYMBOL_GPL(hash_page); 1201EXPORT_SYMBOL_GPL(hash_page);
1194 1202
@@ -1200,7 +1208,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1200 pgd_t *pgdir; 1208 pgd_t *pgdir;
1201 pte_t *ptep; 1209 pte_t *ptep;
1202 unsigned long flags; 1210 unsigned long flags;
1203 int rc, ssize, local = 0; 1211 int rc, ssize, update_flags = 0;
1204 1212
1205 BUG_ON(REGION_ID(ea) != USER_REGION_ID); 1213 BUG_ON(REGION_ID(ea) != USER_REGION_ID);
1206 1214
@@ -1251,16 +1259,17 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
1251 1259
1252 /* Is that local to this CPU ? */ 1260 /* Is that local to this CPU ? */
1253 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id()))) 1261 if (cpumask_equal(mm_cpumask(mm), cpumask_of(smp_processor_id())))
1254 local = 1; 1262 update_flags |= HPTE_LOCAL_UPDATE;
1255 1263
1256 /* Hash it in */ 1264 /* Hash it in */
1257#ifdef CONFIG_PPC_HAS_HASH_64K 1265#ifdef CONFIG_PPC_HAS_HASH_64K
1258 if (mm->context.user_psize == MMU_PAGE_64K) 1266 if (mm->context.user_psize == MMU_PAGE_64K)
1259 rc = __hash_page_64K(ea, access, vsid, ptep, trap, local, ssize); 1267 rc = __hash_page_64K(ea, access, vsid, ptep, trap,
1268 update_flags, ssize);
1260 else 1269 else
1261#endif /* CONFIG_PPC_HAS_HASH_64K */ 1270#endif /* CONFIG_PPC_HAS_HASH_64K */
1262 rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, 1271 rc = __hash_page_4K(ea, access, vsid, ptep, trap, update_flags,
1263 subpage_protection(mm, ea)); 1272 ssize, subpage_protection(mm, ea));
1264 1273
1265 /* Dump some info in case of hash insertion failure, they should 1274 /* Dump some info in case of hash insertion failure, they should
1266 * never happen so it is really useful to know if/when they do 1275 * never happen so it is really useful to know if/when they do
@@ -1278,9 +1287,10 @@ out_exit:
1278 * do not forget to update the assembly call site ! 1287 * do not forget to update the assembly call site !
1279 */ 1288 */
1280void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize, 1289void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1281 int local) 1290 unsigned long flags)
1282{ 1291{
1283 unsigned long hash, index, shift, hidx, slot; 1292 unsigned long hash, index, shift, hidx, slot;
1293 int local = flags & HPTE_LOCAL_UPDATE;
1284 1294
1285 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn); 1295 DBG_LOW("flush_hash_page(vpn=%016lx)\n", vpn);
1286 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) { 1296 pte_iterate_hashed_subpages(pte, psize, vpn, index, shift) {
@@ -1315,6 +1325,78 @@ void flush_hash_page(unsigned long vpn, real_pte_t pte, int psize, int ssize,
1315#endif 1325#endif
1316} 1326}
1317 1327
1328#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1329void flush_hash_hugepage(unsigned long vsid, unsigned long addr,
1330 pmd_t *pmdp, unsigned int psize, int ssize,
1331 unsigned long flags)
1332{
1333 int i, max_hpte_count, valid;
1334 unsigned long s_addr;
1335 unsigned char *hpte_slot_array;
1336 unsigned long hidx, shift, vpn, hash, slot;
1337 int local = flags & HPTE_LOCAL_UPDATE;
1338
1339 s_addr = addr & HPAGE_PMD_MASK;
1340 hpte_slot_array = get_hpte_slot_array(pmdp);
1341 /*
1342 * IF we try to do a HUGE PTE update after a withdraw is done.
1343 * we will find the below NULL. This happens when we do
1344 * split_huge_page_pmd
1345 */
1346 if (!hpte_slot_array)
1347 return;
1348
1349 if (ppc_md.hugepage_invalidate) {
1350 ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
1351 psize, ssize, local);
1352 goto tm_abort;
1353 }
1354 /*
1355 * No bluk hpte removal support, invalidate each entry
1356 */
1357 shift = mmu_psize_defs[psize].shift;
1358 max_hpte_count = HPAGE_PMD_SIZE >> shift;
1359 for (i = 0; i < max_hpte_count; i++) {
1360 /*
1361 * 8 bits per each hpte entries
1362 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
1363 */
1364 valid = hpte_valid(hpte_slot_array, i);
1365 if (!valid)
1366 continue;
1367 hidx = hpte_hash_index(hpte_slot_array, i);
1368
1369 /* get the vpn */
1370 addr = s_addr + (i * (1ul << shift));
1371 vpn = hpt_vpn(addr, vsid, ssize);
1372 hash = hpt_hash(vpn, shift, ssize);
1373 if (hidx & _PTEIDX_SECONDARY)
1374 hash = ~hash;
1375
1376 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
1377 slot += hidx & _PTEIDX_GROUP_IX;
1378 ppc_md.hpte_invalidate(slot, vpn, psize,
1379 MMU_PAGE_16M, ssize, local);
1380 }
1381tm_abort:
1382#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1383 /* Transactions are not aborted by tlbiel, only tlbie.
1384 * Without, syncing a page back to a block device w/ PIO could pick up
1385 * transactional data (bad!) so we force an abort here. Before the
1386 * sync the page will be made read-only, which will flush_hash_page.
1387 * BIG ISSUE here: if the kernel uses a page from userspace without
1388 * unmapping it first, it may see the speculated version.
1389 */
1390 if (local && cpu_has_feature(CPU_FTR_TM) &&
1391 current->thread.regs &&
1392 MSR_TM_ACTIVE(current->thread.regs->msr)) {
1393 tm_enable();
1394 tm_abort(TM_CAUSE_TLBI);
1395 }
1396#endif
1397}
1398#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
1399
1318void flush_hash_range(unsigned long number, int local) 1400void flush_hash_range(unsigned long number, int local)
1319{ 1401{
1320 if (ppc_md.flush_hash_range) 1402 if (ppc_md.flush_hash_range)
@@ -1322,7 +1404,7 @@ void flush_hash_range(unsigned long number, int local)
1322 else { 1404 else {
1323 int i; 1405 int i;
1324 struct ppc64_tlb_batch *batch = 1406 struct ppc64_tlb_batch *batch =
1325 &__get_cpu_var(ppc64_tlb_batch); 1407 this_cpu_ptr(&ppc64_tlb_batch);
1326 1408
1327 for (i = 0; i < number; i++) 1409 for (i = 0; i < number; i++)
1328 flush_hash_page(batch->vpn[i], batch->pte[i], 1410 flush_hash_page(batch->vpn[i], batch->pte[i],
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 5f5e6328c21c..86686514ae13 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -18,60 +18,9 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <asm/machdep.h> 19#include <asm/machdep.h>
20 20
21static void invalidate_old_hpte(unsigned long vsid, unsigned long addr,
22 pmd_t *pmdp, unsigned int psize, int ssize)
23{
24 int i, max_hpte_count, valid;
25 unsigned long s_addr;
26 unsigned char *hpte_slot_array;
27 unsigned long hidx, shift, vpn, hash, slot;
28
29 s_addr = addr & HPAGE_PMD_MASK;
30 hpte_slot_array = get_hpte_slot_array(pmdp);
31 /*
32 * IF we try to do a HUGE PTE update after a withdraw is done.
33 * we will find the below NULL. This happens when we do
34 * split_huge_page_pmd
35 */
36 if (!hpte_slot_array)
37 return;
38
39 if (ppc_md.hugepage_invalidate)
40 return ppc_md.hugepage_invalidate(vsid, s_addr, hpte_slot_array,
41 psize, ssize);
42 /*
43 * No bluk hpte removal support, invalidate each entry
44 */
45 shift = mmu_psize_defs[psize].shift;
46 max_hpte_count = HPAGE_PMD_SIZE >> shift;
47 for (i = 0; i < max_hpte_count; i++) {
48 /*
49 * 8 bits per each hpte entries
50 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
51 */
52 valid = hpte_valid(hpte_slot_array, i);
53 if (!valid)
54 continue;
55 hidx = hpte_hash_index(hpte_slot_array, i);
56
57 /* get the vpn */
58 addr = s_addr + (i * (1ul << shift));
59 vpn = hpt_vpn(addr, vsid, ssize);
60 hash = hpt_hash(vpn, shift, ssize);
61 if (hidx & _PTEIDX_SECONDARY)
62 hash = ~hash;
63
64 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
65 slot += hidx & _PTEIDX_GROUP_IX;
66 ppc_md.hpte_invalidate(slot, vpn, psize,
67 MMU_PAGE_16M, ssize, 0);
68 }
69}
70
71
72int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, 21int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
73 pmd_t *pmdp, unsigned long trap, int local, int ssize, 22 pmd_t *pmdp, unsigned long trap, unsigned long flags,
74 unsigned int psize) 23 int ssize, unsigned int psize)
75{ 24{
76 unsigned int index, valid; 25 unsigned int index, valid;
77 unsigned char *hpte_slot_array; 26 unsigned char *hpte_slot_array;
@@ -145,7 +94,8 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
145 * hash page table entries. 94 * hash page table entries.
146 */ 95 */
147 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) 96 if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
148 invalidate_old_hpte(vsid, ea, pmdp, MMU_PAGE_64K, ssize); 97 flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
98 ssize, flags);
149 } 99 }
150 100
151 valid = hpte_valid(hpte_slot_array, index); 101 valid = hpte_valid(hpte_slot_array, index);
@@ -158,7 +108,7 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid,
158 slot += hidx & _PTEIDX_GROUP_IX; 108 slot += hidx & _PTEIDX_GROUP_IX;
159 109
160 ret = ppc_md.hpte_updatepp(slot, rflags, vpn, 110 ret = ppc_md.hpte_updatepp(slot, rflags, vpn,
161 psize, lpsize, ssize, local); 111 psize, lpsize, ssize, flags);
162 /* 112 /*
163 * We failed to update, try to insert a new entry. 113 * We failed to update, try to insert a new entry.
164 */ 114 */
diff --git a/arch/powerpc/mm/hugetlbpage-book3e.c b/arch/powerpc/mm/hugetlbpage-book3e.c
index 5e4ee2573903..ba47aaf33a4b 100644
--- a/arch/powerpc/mm/hugetlbpage-book3e.c
+++ b/arch/powerpc/mm/hugetlbpage-book3e.c
@@ -33,13 +33,13 @@ static inline int tlb1_next(void)
33 33
34 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY; 34 ncams = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
35 35
36 index = __get_cpu_var(next_tlbcam_idx); 36 index = this_cpu_read(next_tlbcam_idx);
37 37
38 /* Just round-robin the entries and wrap when we hit the end */ 38 /* Just round-robin the entries and wrap when we hit the end */
39 if (unlikely(index == ncams - 1)) 39 if (unlikely(index == ncams - 1))
40 __get_cpu_var(next_tlbcam_idx) = tlbcam_index; 40 __this_cpu_write(next_tlbcam_idx, tlbcam_index);
41 else 41 else
42 __get_cpu_var(next_tlbcam_idx)++; 42 __this_cpu_inc(next_tlbcam_idx);
43 43
44 return index; 44 return index;
45} 45}
diff --git a/arch/powerpc/mm/hugetlbpage-hash64.c b/arch/powerpc/mm/hugetlbpage-hash64.c
index a5bcf9301196..d94b1af53a93 100644
--- a/arch/powerpc/mm/hugetlbpage-hash64.c
+++ b/arch/powerpc/mm/hugetlbpage-hash64.c
@@ -19,8 +19,8 @@ extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn,
19 unsigned long vflags, int psize, int ssize); 19 unsigned long vflags, int psize, int ssize);
20 20
21int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, 21int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
22 pte_t *ptep, unsigned long trap, int local, int ssize, 22 pte_t *ptep, unsigned long trap, unsigned long flags,
23 unsigned int shift, unsigned int mmu_psize) 23 int ssize, unsigned int shift, unsigned int mmu_psize)
24{ 24{
25 unsigned long vpn; 25 unsigned long vpn;
26 unsigned long old_pte, new_pte; 26 unsigned long old_pte, new_pte;
@@ -81,7 +81,7 @@ int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
81 slot += (old_pte & _PAGE_F_GIX) >> 12; 81 slot += (old_pte & _PAGE_F_GIX) >> 12;
82 82
83 if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize, 83 if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
84 mmu_psize, ssize, local) == -1) 84 mmu_psize, ssize, flags) == -1)
85 old_pte &= ~_PAGE_HPTEFLAGS; 85 old_pte &= ~_PAGE_HPTEFLAGS;
86 } 86 }
87 87
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 6a4a5fcb9730..5ff4e07d920a 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -62,6 +62,9 @@ static unsigned nr_gpages;
62/* 62/*
63 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have 63 * We have PGD_INDEX_SIZ = 12 and PTE_INDEX_SIZE = 8, so that we can have
64 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD; 64 * 16GB hugepage pte in PGD and 16MB hugepage pte at PMD;
65 *
66 * Defined in such a way that we can optimize away code block at build time
67 * if CONFIG_HUGETLB_PAGE=n.
65 */ 68 */
66int pmd_huge(pmd_t pmd) 69int pmd_huge(pmd_t pmd)
67{ 70{
@@ -230,7 +233,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
230 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) 233 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
231 return NULL; 234 return NULL;
232 235
233 return hugepte_offset(hpdp, addr, pdshift); 236 return hugepte_offset(*hpdp, addr, pdshift);
234} 237}
235 238
236#else 239#else
@@ -270,13 +273,13 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr, unsigned long sz
270 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift)) 273 if (hugepd_none(*hpdp) && __hugepte_alloc(mm, hpdp, addr, pdshift, pshift))
271 return NULL; 274 return NULL;
272 275
273 return hugepte_offset(hpdp, addr, pdshift); 276 return hugepte_offset(*hpdp, addr, pdshift);
274} 277}
275#endif 278#endif
276 279
277#ifdef CONFIG_PPC_FSL_BOOK3E 280#ifdef CONFIG_PPC_FSL_BOOK3E
278/* Build list of addresses of gigantic pages. This function is used in early 281/* Build list of addresses of gigantic pages. This function is used in early
279 * boot before the buddy or bootmem allocator is setup. 282 * boot before the buddy allocator is setup.
280 */ 283 */
281void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) 284void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
282{ 285{
@@ -312,7 +315,7 @@ int alloc_bootmem_huge_page(struct hstate *hstate)
312 * If gpages can be in highmem we can't use the trick of storing the 315 * If gpages can be in highmem we can't use the trick of storing the
313 * data structure in the page; allocate space for this 316 * data structure in the page; allocate space for this
314 */ 317 */
315 m = alloc_bootmem(sizeof(struct huge_bootmem_page)); 318 m = memblock_virt_alloc(sizeof(struct huge_bootmem_page), 0);
316 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages]; 319 m->phys = gpage_freearray[idx].gpage_list[--nr_gpages];
317#else 320#else
318 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]); 321 m = phys_to_virt(gpage_freearray[idx].gpage_list[--nr_gpages]);
@@ -352,6 +355,13 @@ static int __init do_gpage_early_setup(char *param, char *val,
352 if (size != 0) { 355 if (size != 0) {
353 if (sscanf(val, "%lu", &npages) <= 0) 356 if (sscanf(val, "%lu", &npages) <= 0)
354 npages = 0; 357 npages = 0;
358 if (npages > MAX_NUMBER_GPAGES) {
359 pr_warn("MMU: %lu pages requested for page "
360 "size %llu KB, limiting to "
361 __stringify(MAX_NUMBER_GPAGES) "\n",
362 npages, size / 1024);
363 npages = MAX_NUMBER_GPAGES;
364 }
355 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages; 365 gpage_npages[shift_to_mmu_psize(__ffs(size))] = npages;
356 size = 0; 366 size = 0;
357 } 367 }
@@ -399,7 +409,7 @@ void __init reserve_hugetlb_gpages(void)
399#else /* !PPC_FSL_BOOK3E */ 409#else /* !PPC_FSL_BOOK3E */
400 410
401/* Build list of addresses of gigantic pages. This function is used in early 411/* Build list of addresses of gigantic pages. This function is used in early
402 * boot before the buddy or bootmem allocator is setup. 412 * boot before the buddy allocator is setup.
403 */ 413 */
404void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages) 414void add_gpage(u64 addr, u64 page_size, unsigned long number_of_pages)
405{ 415{
@@ -462,7 +472,7 @@ static void hugepd_free(struct mmu_gather *tlb, void *hugepte)
462{ 472{
463 struct hugepd_freelist **batchp; 473 struct hugepd_freelist **batchp;
464 474
465 batchp = &get_cpu_var(hugepd_freelist_cur); 475 batchp = this_cpu_ptr(&hugepd_freelist_cur);
466 476
467 if (atomic_read(&tlb->mm->mm_users) < 2 || 477 if (atomic_read(&tlb->mm->mm_users) < 2 ||
468 cpumask_equal(mm_cpumask(tlb->mm), 478 cpumask_equal(mm_cpumask(tlb->mm),
@@ -536,7 +546,7 @@ static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
536 do { 546 do {
537 pmd = pmd_offset(pud, addr); 547 pmd = pmd_offset(pud, addr);
538 next = pmd_addr_end(addr, end); 548 next = pmd_addr_end(addr, end);
539 if (!is_hugepd(pmd)) { 549 if (!is_hugepd(__hugepd(pmd_val(*pmd)))) {
540 /* 550 /*
541 * if it is not hugepd pointer, we should already find 551 * if it is not hugepd pointer, we should already find
542 * it cleared. 552 * it cleared.
@@ -585,7 +595,7 @@ static void hugetlb_free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
585 do { 595 do {
586 pud = pud_offset(pgd, addr); 596 pud = pud_offset(pgd, addr);
587 next = pud_addr_end(addr, end); 597 next = pud_addr_end(addr, end);
588 if (!is_hugepd(pud)) { 598 if (!is_hugepd(__hugepd(pud_val(*pud)))) {
589 if (pud_none_or_clear_bad(pud)) 599 if (pud_none_or_clear_bad(pud))
590 continue; 600 continue;
591 hugetlb_free_pmd_range(tlb, pud, addr, next, floor, 601 hugetlb_free_pmd_range(tlb, pud, addr, next, floor,
@@ -651,7 +661,7 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
651 do { 661 do {
652 next = pgd_addr_end(addr, end); 662 next = pgd_addr_end(addr, end);
653 pgd = pgd_offset(tlb->mm, addr); 663 pgd = pgd_offset(tlb->mm, addr);
654 if (!is_hugepd(pgd)) { 664 if (!is_hugepd(__hugepd(pgd_val(*pgd)))) {
655 if (pgd_none_or_clear_bad(pgd)) 665 if (pgd_none_or_clear_bad(pgd))
656 continue; 666 continue;
657 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling); 667 hugetlb_free_pud_range(tlb, pgd, addr, next, floor, ceiling);
@@ -711,12 +721,11 @@ static unsigned long hugepte_addr_end(unsigned long addr, unsigned long end,
711 return (__boundary - 1 < end - 1) ? __boundary : end; 721 return (__boundary - 1 < end - 1) ? __boundary : end;
712} 722}
713 723
714int gup_hugepd(hugepd_t *hugepd, unsigned pdshift, 724int gup_huge_pd(hugepd_t hugepd, unsigned long addr, unsigned pdshift,
715 unsigned long addr, unsigned long end, 725 unsigned long end, int write, struct page **pages, int *nr)
716 int write, struct page **pages, int *nr)
717{ 726{
718 pte_t *ptep; 727 pte_t *ptep;
719 unsigned long sz = 1UL << hugepd_shift(*hugepd); 728 unsigned long sz = 1UL << hugepd_shift(hugepd);
720 unsigned long next; 729 unsigned long next;
721 730
722 ptep = hugepte_offset(hugepd, addr, pdshift); 731 ptep = hugepte_offset(hugepd, addr, pdshift);
@@ -959,7 +968,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
959 else if (pgd_huge(pgd)) { 968 else if (pgd_huge(pgd)) {
960 ret_pte = (pte_t *) pgdp; 969 ret_pte = (pte_t *) pgdp;
961 goto out; 970 goto out;
962 } else if (is_hugepd(&pgd)) 971 } else if (is_hugepd(__hugepd(pgd_val(pgd))))
963 hpdp = (hugepd_t *)&pgd; 972 hpdp = (hugepd_t *)&pgd;
964 else { 973 else {
965 /* 974 /*
@@ -976,7 +985,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
976 else if (pud_huge(pud)) { 985 else if (pud_huge(pud)) {
977 ret_pte = (pte_t *) pudp; 986 ret_pte = (pte_t *) pudp;
978 goto out; 987 goto out;
979 } else if (is_hugepd(&pud)) 988 } else if (is_hugepd(__hugepd(pud_val(pud))))
980 hpdp = (hugepd_t *)&pud; 989 hpdp = (hugepd_t *)&pud;
981 else { 990 else {
982 pdshift = PMD_SHIFT; 991 pdshift = PMD_SHIFT;
@@ -997,7 +1006,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
997 if (pmd_huge(pmd) || pmd_large(pmd)) { 1006 if (pmd_huge(pmd) || pmd_large(pmd)) {
998 ret_pte = (pte_t *) pmdp; 1007 ret_pte = (pte_t *) pmdp;
999 goto out; 1008 goto out;
1000 } else if (is_hugepd(&pmd)) 1009 } else if (is_hugepd(__hugepd(pmd_val(pmd))))
1001 hpdp = (hugepd_t *)&pmd; 1010 hpdp = (hugepd_t *)&pmd;
1002 else 1011 else
1003 return pte_offset_kernel(&pmd, ea); 1012 return pte_offset_kernel(&pmd, ea);
@@ -1006,7 +1015,7 @@ pte_t *find_linux_pte_or_hugepte(pgd_t *pgdir, unsigned long ea, unsigned *shift
1006 if (!hpdp) 1015 if (!hpdp)
1007 return NULL; 1016 return NULL;
1008 1017
1009 ret_pte = hugepte_offset(hpdp, ea, pdshift); 1018 ret_pte = hugepte_offset(*hpdp, ea, pdshift);
1010 pdshift = hugepd_shift(*hpdp); 1019 pdshift = hugepd_shift(*hpdp);
1011out: 1020out:
1012 if (shift) 1021 if (shift)
@@ -1036,14 +1045,6 @@ int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
1036 if ((pte_val(pte) & mask) != mask) 1045 if ((pte_val(pte) & mask) != mask)
1037 return 0; 1046 return 0;
1038 1047
1039#ifdef CONFIG_TRANSPARENT_HUGEPAGE
1040 /*
1041 * check for splitting here
1042 */
1043 if (pmd_trans_splitting(pte_pmd(pte)))
1044 return 0;
1045#endif
1046
1047 /* hugepages are never "special" */ 1048 /* hugepages are never "special" */
1048 VM_BUG_ON(!pfn_valid(pte_pfn(pte))); 1049 VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
1049 1050
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 415a51b028b9..a10be665b645 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -26,7 +26,6 @@
26#include <linux/mm.h> 26#include <linux/mm.h>
27#include <linux/stddef.h> 27#include <linux/stddef.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/bootmem.h>
30#include <linux/highmem.h> 29#include <linux/highmem.h>
31#include <linux/initrd.h> 30#include <linux/initrd.h>
32#include <linux/pagemap.h> 31#include <linux/pagemap.h>
@@ -195,15 +194,6 @@ void __init MMU_init(void)
195 memblock_set_current_limit(lowmem_end_addr); 194 memblock_set_current_limit(lowmem_end_addr);
196} 195}
197 196
198/* This is only called until mem_init is done. */
199void __init *early_get_page(void)
200{
201 if (init_bootmem_done)
202 return alloc_bootmem_pages(PAGE_SIZE);
203 else
204 return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
205}
206
207#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ 197#ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */
208void setup_initial_memory_limit(phys_addr_t first_memblock_base, 198void setup_initial_memory_limit(phys_addr_t first_memblock_base,
209 phys_addr_t first_memblock_size) 199 phys_addr_t first_memblock_size)
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index 3481556a1880..10471f9bb63f 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -34,7 +34,6 @@
34#include <linux/vmalloc.h> 34#include <linux/vmalloc.h>
35#include <linux/init.h> 35#include <linux/init.h>
36#include <linux/delay.h> 36#include <linux/delay.h>
37#include <linux/bootmem.h>
38#include <linux/highmem.h> 37#include <linux/highmem.h>
39#include <linux/idr.h> 38#include <linux/idr.h>
40#include <linux/nodemask.h> 39#include <linux/nodemask.h>
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 8ebaac75c940..b7285a5870f8 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -35,6 +35,7 @@
35#include <linux/memblock.h> 35#include <linux/memblock.h>
36#include <linux/hugetlb.h> 36#include <linux/hugetlb.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/vmalloc.h>
38 39
39#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
40#include <asm/prom.h> 41#include <asm/prom.h>
@@ -60,7 +61,6 @@
60#define CPU_FTR_NOEXECUTE 0 61#define CPU_FTR_NOEXECUTE 0
61#endif 62#endif
62 63
63int init_bootmem_done;
64int mem_init_done; 64int mem_init_done;
65unsigned long long memory_limit; 65unsigned long long memory_limit;
66 66
@@ -144,8 +144,17 @@ int arch_remove_memory(u64 start, u64 size)
144 144
145 zone = page_zone(pfn_to_page(start_pfn)); 145 zone = page_zone(pfn_to_page(start_pfn));
146 ret = __remove_pages(zone, start_pfn, nr_pages); 146 ret = __remove_pages(zone, start_pfn, nr_pages);
147 if (!ret && (ppc_md.remove_memory)) 147 if (ret)
148 ret = ppc_md.remove_memory(start, size); 148 return ret;
149
150 /* Remove htab bolted mappings for this section of memory */
151 start = (unsigned long)__va(start);
152 ret = remove_section_mapping(start, start + size);
153
154 /* Ensure all vmalloc mappings are flushed in case they also
155 * hit that section of memory
156 */
157 vm_unmap_aliases();
149 158
150 return ret; 159 return ret;
151} 160}
@@ -180,70 +189,23 @@ walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages,
180} 189}
181EXPORT_SYMBOL_GPL(walk_system_ram_range); 190EXPORT_SYMBOL_GPL(walk_system_ram_range);
182 191
183/*
184 * Initialize the bootmem system and give it all the memory we
185 * have available. If we are using highmem, we only put the
186 * lowmem into the bootmem system.
187 */
188#ifndef CONFIG_NEED_MULTIPLE_NODES 192#ifndef CONFIG_NEED_MULTIPLE_NODES
189void __init do_init_bootmem(void) 193void __init initmem_init(void)
190{ 194{
191 unsigned long start, bootmap_pages;
192 unsigned long total_pages;
193 struct memblock_region *reg;
194 int boot_mapsize;
195
196 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 195 max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
197 total_pages = (memblock_end_of_DRAM() - memstart_addr) >> PAGE_SHIFT; 196 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
198#ifdef CONFIG_HIGHMEM 197#ifdef CONFIG_HIGHMEM
199 total_pages = total_lowmem >> PAGE_SHIFT;
200 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT; 198 max_low_pfn = lowmem_end_addr >> PAGE_SHIFT;
201#endif 199#endif
202 200
203 /*
204 * Find an area to use for the bootmem bitmap. Calculate the size of
205 * bitmap required as (Total Memory) / PAGE_SIZE / BITS_PER_BYTE.
206 * Add 1 additional page in case the address isn't page-aligned.
207 */
208 bootmap_pages = bootmem_bootmap_pages(total_pages);
209
210 start = memblock_alloc(bootmap_pages << PAGE_SHIFT, PAGE_SIZE);
211
212 min_low_pfn = MEMORY_START >> PAGE_SHIFT;
213 boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn);
214
215 /* Place all memblock_regions in the same node and merge contiguous 201 /* Place all memblock_regions in the same node and merge contiguous
216 * memblock_regions 202 * memblock_regions
217 */ 203 */
218 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0); 204 memblock_set_node(0, (phys_addr_t)ULLONG_MAX, &memblock.memory, 0);
219 205
220 /* Add all physical memory to the bootmem map, mark each area
221 * present.
222 */
223#ifdef CONFIG_HIGHMEM
224 free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT);
225
226 /* reserve the sections we're already using */
227 for_each_memblock(reserved, reg) {
228 unsigned long top = reg->base + reg->size - 1;
229 if (top < lowmem_end_addr)
230 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
231 else if (reg->base < lowmem_end_addr) {
232 unsigned long trunc_size = lowmem_end_addr - reg->base;
233 reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT);
234 }
235 }
236#else
237 free_bootmem_with_active_regions(0, max_pfn);
238
239 /* reserve the sections we're already using */
240 for_each_memblock(reserved, reg)
241 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
242#endif
243 /* XXX need to clip this if using highmem? */ 206 /* XXX need to clip this if using highmem? */
244 sparse_memory_present_with_active_regions(0); 207 sparse_memory_present_with_active_regions(0);
245 208 sparse_init();
246 init_bootmem_done = 1;
247} 209}
248 210
249/* mark pages that don't exist as nosave */ 211/* mark pages that don't exist as nosave */
@@ -359,14 +321,6 @@ void __init paging_init(void)
359 mark_nonram_nosave(); 321 mark_nonram_nosave();
360} 322}
361 323
362static void __init register_page_bootmem_info(void)
363{
364 int i;
365
366 for_each_online_node(i)
367 register_page_bootmem_info_node(NODE_DATA(i));
368}
369
370void __init mem_init(void) 324void __init mem_init(void)
371{ 325{
372 /* 326 /*
@@ -379,7 +333,6 @@ void __init mem_init(void)
379 swiotlb_init(0); 333 swiotlb_init(0);
380#endif 334#endif
381 335
382 register_page_bootmem_info();
383 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); 336 high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);
384 set_max_mapnr(max_pfn); 337 set_max_mapnr(max_pfn);
385 free_all_bootmem(); 338 free_all_bootmem();
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 928ebe79668b..9cba6cba2e50 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -421,12 +421,12 @@ void __init mmu_context_init(void)
421 /* 421 /*
422 * Allocate the maps used by context management 422 * Allocate the maps used by context management
423 */ 423 */
424 context_map = alloc_bootmem(CTX_MAP_SIZE); 424 context_map = memblock_virt_alloc(CTX_MAP_SIZE, 0);
425 context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); 425 context_mm = memblock_virt_alloc(sizeof(void *) * (last_context + 1), 0);
426#ifndef CONFIG_SMP 426#ifndef CONFIG_SMP
427 stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); 427 stale_map[0] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
428#else 428#else
429 stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE); 429 stale_map[boot_cpuid] = memblock_virt_alloc(CTX_MAP_SIZE, 0);
430 430
431 register_cpu_notifier(&mmu_context_cpu_nb); 431 register_cpu_notifier(&mmu_context_cpu_nb);
432#endif 432#endif
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h
index 9615d82919b8..78c45f392f5b 100644
--- a/arch/powerpc/mm/mmu_decl.h
+++ b/arch/powerpc/mm/mmu_decl.h
@@ -67,7 +67,7 @@ static inline void _tlbil_va(unsigned long address, unsigned int pid,
67{ 67{
68 __tlbil_va(address, pid); 68 __tlbil_va(address, pid);
69} 69}
70#endif /* CONIFG_8xx */ 70#endif /* CONFIG_8xx */
71 71
72#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x) 72#if defined(CONFIG_PPC_BOOK3E) || defined(CONFIG_PPC_47x)
73extern void _tlbivax_bcast(unsigned long address, unsigned int pid, 73extern void _tlbivax_bcast(unsigned long address, unsigned int pid,
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 9fe6002c1d5a..0257a7d659ef 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -134,28 +134,6 @@ static int __init fake_numa_create_new_node(unsigned long end_pfn,
134 return 0; 134 return 0;
135} 135}
136 136
137/*
138 * get_node_active_region - Return active region containing pfn
139 * Active range returned is empty if none found.
140 * @pfn: The page to return the region for
141 * @node_ar: Returned set to the active region containing @pfn
142 */
143static void __init get_node_active_region(unsigned long pfn,
144 struct node_active_region *node_ar)
145{
146 unsigned long start_pfn, end_pfn;
147 int i, nid;
148
149 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
150 if (pfn >= start_pfn && pfn < end_pfn) {
151 node_ar->nid = nid;
152 node_ar->start_pfn = start_pfn;
153 node_ar->end_pfn = end_pfn;
154 break;
155 }
156 }
157}
158
159static void reset_numa_cpu_lookup_table(void) 137static void reset_numa_cpu_lookup_table(void)
160{ 138{
161 unsigned int cpu; 139 unsigned int cpu;
@@ -928,134 +906,48 @@ static void __init dump_numa_memory_topology(void)
928 } 906 }
929} 907}
930 908
931/*
932 * Allocate some memory, satisfying the memblock or bootmem allocator where
933 * required. nid is the preferred node and end is the physical address of
934 * the highest address in the node.
935 *
936 * Returns the virtual address of the memory.
937 */
938static void __init *careful_zallocation(int nid, unsigned long size,
939 unsigned long align,
940 unsigned long end_pfn)
941{
942 void *ret;
943 int new_nid;
944 unsigned long ret_paddr;
945
946 ret_paddr = __memblock_alloc_base(size, align, end_pfn << PAGE_SHIFT);
947
948 /* retry over all memory */
949 if (!ret_paddr)
950 ret_paddr = __memblock_alloc_base(size, align, memblock_end_of_DRAM());
951
952 if (!ret_paddr)
953 panic("numa.c: cannot allocate %lu bytes for node %d",
954 size, nid);
955
956 ret = __va(ret_paddr);
957
958 /*
959 * We initialize the nodes in numeric order: 0, 1, 2...
960 * and hand over control from the MEMBLOCK allocator to the
961 * bootmem allocator. If this function is called for
962 * node 5, then we know that all nodes <5 are using the
963 * bootmem allocator instead of the MEMBLOCK allocator.
964 *
965 * So, check the nid from which this allocation came
966 * and double check to see if we need to use bootmem
967 * instead of the MEMBLOCK. We don't free the MEMBLOCK memory
968 * since it would be useless.
969 */
970 new_nid = early_pfn_to_nid(ret_paddr >> PAGE_SHIFT);
971 if (new_nid < nid) {
972 ret = __alloc_bootmem_node(NODE_DATA(new_nid),
973 size, align, 0);
974
975 dbg("alloc_bootmem %p %lx\n", ret, size);
976 }
977
978 memset(ret, 0, size);
979 return ret;
980}
981
982static struct notifier_block ppc64_numa_nb = { 909static struct notifier_block ppc64_numa_nb = {
983 .notifier_call = cpu_numa_callback, 910 .notifier_call = cpu_numa_callback,
984 .priority = 1 /* Must run before sched domains notifier. */ 911 .priority = 1 /* Must run before sched domains notifier. */
985}; 912};
986 913
987static void __init mark_reserved_regions_for_nid(int nid) 914/* Initialize NODE_DATA for a node on the local memory */
915static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn)
988{ 916{
989 struct pglist_data *node = NODE_DATA(nid); 917 u64 spanned_pages = end_pfn - start_pfn;
990 struct memblock_region *reg; 918 const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES);
991 919 u64 nd_pa;
992 for_each_memblock(reserved, reg) { 920 void *nd;
993 unsigned long physbase = reg->base; 921 int tnid;
994 unsigned long size = reg->size; 922
995 unsigned long start_pfn = physbase >> PAGE_SHIFT; 923 if (spanned_pages)
996 unsigned long end_pfn = PFN_UP(physbase + size); 924 pr_info("Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
997 struct node_active_region node_ar; 925 nid, start_pfn << PAGE_SHIFT,
998 unsigned long node_end_pfn = pgdat_end_pfn(node); 926 (end_pfn << PAGE_SHIFT) - 1);
999 927 else
1000 /* 928 pr_info("Initmem setup node %d\n", nid);
1001 * Check to make sure that this memblock.reserved area is 929
1002 * within the bounds of the node that we care about. 930 nd_pa = memblock_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid);
1003 * Checking the nid of the start and end points is not 931 nd = __va(nd_pa);
1004 * sufficient because the reserved area could span the 932
1005 * entire node. 933 /* report and initialize */
1006 */ 934 pr_info(" NODE_DATA [mem %#010Lx-%#010Lx]\n",
1007 if (end_pfn <= node->node_start_pfn || 935 nd_pa, nd_pa + nd_size - 1);
1008 start_pfn >= node_end_pfn) 936 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
1009 continue; 937 if (tnid != nid)
1010 938 pr_info(" NODE_DATA(%d) on node %d\n", nid, tnid);
1011 get_node_active_region(start_pfn, &node_ar); 939
1012 while (start_pfn < end_pfn && 940 node_data[nid] = nd;
1013 node_ar.start_pfn < node_ar.end_pfn) { 941 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
1014 unsigned long reserve_size = size; 942 NODE_DATA(nid)->node_id = nid;
1015 /* 943 NODE_DATA(nid)->node_start_pfn = start_pfn;
1016 * if reserved region extends past active region 944 NODE_DATA(nid)->node_spanned_pages = spanned_pages;
1017 * then trim size to active region
1018 */
1019 if (end_pfn > node_ar.end_pfn)
1020 reserve_size = (node_ar.end_pfn << PAGE_SHIFT)
1021 - physbase;
1022 /*
1023 * Only worry about *this* node, others may not
1024 * yet have valid NODE_DATA().
1025 */
1026 if (node_ar.nid == nid) {
1027 dbg("reserve_bootmem %lx %lx nid=%d\n",
1028 physbase, reserve_size, node_ar.nid);
1029 reserve_bootmem_node(NODE_DATA(node_ar.nid),
1030 physbase, reserve_size,
1031 BOOTMEM_DEFAULT);
1032 }
1033 /*
1034 * if reserved region is contained in the active region
1035 * then done.
1036 */
1037 if (end_pfn <= node_ar.end_pfn)
1038 break;
1039
1040 /*
1041 * reserved region extends past the active region
1042 * get next active region that contains this
1043 * reserved region
1044 */
1045 start_pfn = node_ar.end_pfn;
1046 physbase = start_pfn << PAGE_SHIFT;
1047 size = size - reserve_size;
1048 get_node_active_region(start_pfn, &node_ar);
1049 }
1050 }
1051} 945}
1052 946
1053 947void __init initmem_init(void)
1054void __init do_init_bootmem(void)
1055{ 948{
1056 int nid, cpu; 949 int nid, cpu;
1057 950
1058 min_low_pfn = 0;
1059 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; 951 max_low_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT;
1060 max_pfn = max_low_pfn; 952 max_pfn = max_low_pfn;
1061 953
@@ -1064,64 +956,18 @@ void __init do_init_bootmem(void)
1064 else 956 else
1065 dump_numa_memory_topology(); 957 dump_numa_memory_topology();
1066 958
959 memblock_dump_all();
960
1067 for_each_online_node(nid) { 961 for_each_online_node(nid) {
1068 unsigned long start_pfn, end_pfn; 962 unsigned long start_pfn, end_pfn;
1069 void *bootmem_vaddr;
1070 unsigned long bootmap_pages;
1071 963
1072 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); 964 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn);
1073 965 setup_node_data(nid, start_pfn, end_pfn);
1074 /*
1075 * Allocate the node structure node local if possible
1076 *
1077 * Be careful moving this around, as it relies on all
1078 * previous nodes' bootmem to be initialized and have
1079 * all reserved areas marked.
1080 */
1081 NODE_DATA(nid) = careful_zallocation(nid,
1082 sizeof(struct pglist_data),
1083 SMP_CACHE_BYTES, end_pfn);
1084
1085 dbg("node %d\n", nid);
1086 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
1087
1088 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
1089 NODE_DATA(nid)->node_start_pfn = start_pfn;
1090 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
1091
1092 if (NODE_DATA(nid)->node_spanned_pages == 0)
1093 continue;
1094
1095 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
1096 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
1097
1098 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
1099 bootmem_vaddr = careful_zallocation(nid,
1100 bootmap_pages << PAGE_SHIFT,
1101 PAGE_SIZE, end_pfn);
1102
1103 dbg("bootmap_vaddr = %p\n", bootmem_vaddr);
1104
1105 init_bootmem_node(NODE_DATA(nid),
1106 __pa(bootmem_vaddr) >> PAGE_SHIFT,
1107 start_pfn, end_pfn);
1108
1109 free_bootmem_with_active_regions(nid, end_pfn);
1110 /*
1111 * Be very careful about moving this around. Future
1112 * calls to careful_zallocation() depend on this getting
1113 * done correctly.
1114 */
1115 mark_reserved_regions_for_nid(nid);
1116 sparse_memory_present_with_active_regions(nid); 966 sparse_memory_present_with_active_regions(nid);
1117 } 967 }
1118 968
1119 init_bootmem_done = 1; 969 sparse_init();
1120 970
1121 /*
1122 * Now bootmem is initialised we can create the node to cpumask
1123 * lookup tables and setup the cpu callback to populate them.
1124 */
1125 setup_node_to_cpumask_map(); 971 setup_node_to_cpumask_map();
1126 972
1127 reset_numa_cpu_lookup_table(); 973 reset_numa_cpu_lookup_table();
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index cf11342bf519..d545b1231594 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -100,12 +100,11 @@ __init_refok pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long add
100{ 100{
101 pte_t *pte; 101 pte_t *pte;
102 extern int mem_init_done; 102 extern int mem_init_done;
103 extern void *early_get_page(void);
104 103
105 if (mem_init_done) { 104 if (mem_init_done) {
106 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); 105 pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
107 } else { 106 } else {
108 pte = (pte_t *)early_get_page(); 107 pte = __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE));
109 if (pte) 108 if (pte)
110 clear_page(pte); 109 clear_page(pte);
111 } 110 }
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index c8d709ab489d..4fe5f64cc179 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -33,9 +33,9 @@
33#include <linux/swap.h> 33#include <linux/swap.h>
34#include <linux/stddef.h> 34#include <linux/stddef.h>
35#include <linux/vmalloc.h> 35#include <linux/vmalloc.h>
36#include <linux/bootmem.h>
37#include <linux/memblock.h> 36#include <linux/memblock.h>
38#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/hugetlb.h>
39 39
40#include <asm/pgalloc.h> 40#include <asm/pgalloc.h>
41#include <asm/page.h> 41#include <asm/page.h>
@@ -51,6 +51,7 @@
51#include <asm/cputable.h> 51#include <asm/cputable.h>
52#include <asm/sections.h> 52#include <asm/sections.h>
53#include <asm/firmware.h> 53#include <asm/firmware.h>
54#include <asm/dma.h>
54 55
55#include "mmu_decl.h" 56#include "mmu_decl.h"
56 57
@@ -75,11 +76,7 @@ static __ref void *early_alloc_pgtable(unsigned long size)
75{ 76{
76 void *pt; 77 void *pt;
77 78
78 if (init_bootmem_done) 79 pt = __va(memblock_alloc_base(size, size, __pa(MAX_DMA_ADDRESS)));
79 pt = __alloc_bootmem(size, size, __pa(MAX_DMA_ADDRESS));
80 else
81 pt = __va(memblock_alloc_base(size, size,
82 __pa(MAX_DMA_ADDRESS)));
83 memset(pt, 0, size); 80 memset(pt, 0, size);
84 81
85 return pt; 82 return pt;
@@ -113,10 +110,6 @@ int map_kernel_page(unsigned long ea, unsigned long pa, int flags)
113 __pgprot(flags))); 110 __pgprot(flags)));
114 } else { 111 } else {
115#ifdef CONFIG_PPC_MMU_NOHASH 112#ifdef CONFIG_PPC_MMU_NOHASH
116 /* Warning ! This will blow up if bootmem is not initialized
117 * which our ppc64 code is keen to do that, we'll need to
118 * fix it and/or be more careful
119 */
120 pgdp = pgd_offset_k(ea); 113 pgdp = pgd_offset_k(ea);
121#ifdef PUD_TABLE_SIZE 114#ifdef PUD_TABLE_SIZE
122 if (pgd_none(*pgdp)) { 115 if (pgd_none(*pgdp)) {
@@ -352,16 +345,31 @@ EXPORT_SYMBOL(iounmap);
352EXPORT_SYMBOL(__iounmap); 345EXPORT_SYMBOL(__iounmap);
353EXPORT_SYMBOL(__iounmap_at); 346EXPORT_SYMBOL(__iounmap_at);
354 347
348#ifndef __PAGETABLE_PUD_FOLDED
349/* 4 level page table */
350struct page *pgd_page(pgd_t pgd)
351{
352 if (pgd_huge(pgd))
353 return pte_page(pgd_pte(pgd));
354 return virt_to_page(pgd_page_vaddr(pgd));
355}
356#endif
357
358struct page *pud_page(pud_t pud)
359{
360 if (pud_huge(pud))
361 return pte_page(pud_pte(pud));
362 return virt_to_page(pud_page_vaddr(pud));
363}
364
355/* 365/*
356 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags 366 * For hugepage we have pfn in the pmd, we use PTE_RPN_SHIFT bits for flags
357 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address. 367 * For PTE page, we have a PTE_FRAG_SIZE (4K) aligned virtual address.
358 */ 368 */
359struct page *pmd_page(pmd_t pmd) 369struct page *pmd_page(pmd_t pmd)
360{ 370{
361#ifdef CONFIG_TRANSPARENT_HUGEPAGE 371 if (pmd_trans_huge(pmd) || pmd_huge(pmd))
362 if (pmd_trans_huge(pmd))
363 return pfn_to_page(pmd_pfn(pmd)); 372 return pfn_to_page(pmd_pfn(pmd));
364#endif
365 return virt_to_page(pmd_page_vaddr(pmd)); 373 return virt_to_page(pmd_page_vaddr(pmd));
366} 374}
367 375
@@ -731,29 +739,15 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
731void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr, 739void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
732 pmd_t *pmdp, unsigned long old_pmd) 740 pmd_t *pmdp, unsigned long old_pmd)
733{ 741{
734 int ssize, i; 742 int ssize;
735 unsigned long s_addr; 743 unsigned int psize;
736 int max_hpte_count; 744 unsigned long vsid;
737 unsigned int psize, valid; 745 unsigned long flags = 0;
738 unsigned char *hpte_slot_array; 746 const struct cpumask *tmp;
739 unsigned long hidx, vpn, vsid, hash, shift, slot;
740
741 /*
742 * Flush all the hptes mapping this hugepage
743 */
744 s_addr = addr & HPAGE_PMD_MASK;
745 hpte_slot_array = get_hpte_slot_array(pmdp);
746 /*
747 * IF we try to do a HUGE PTE update after a withdraw is done.
748 * we will find the below NULL. This happens when we do
749 * split_huge_page_pmd
750 */
751 if (!hpte_slot_array)
752 return;
753 747
754 /* get the base page size,vsid and segment size */ 748 /* get the base page size,vsid and segment size */
755#ifdef CONFIG_DEBUG_VM 749#ifdef CONFIG_DEBUG_VM
756 psize = get_slice_psize(mm, s_addr); 750 psize = get_slice_psize(mm, addr);
757 BUG_ON(psize == MMU_PAGE_16M); 751 BUG_ON(psize == MMU_PAGE_16M);
758#endif 752#endif
759 if (old_pmd & _PAGE_COMBO) 753 if (old_pmd & _PAGE_COMBO)
@@ -761,46 +755,20 @@ void hpte_do_hugepage_flush(struct mm_struct *mm, unsigned long addr,
761 else 755 else
762 psize = MMU_PAGE_64K; 756 psize = MMU_PAGE_64K;
763 757
764 if (!is_kernel_addr(s_addr)) { 758 if (!is_kernel_addr(addr)) {
765 ssize = user_segment_size(s_addr); 759 ssize = user_segment_size(addr);
766 vsid = get_vsid(mm->context.id, s_addr, ssize); 760 vsid = get_vsid(mm->context.id, addr, ssize);
767 WARN_ON(vsid == 0); 761 WARN_ON(vsid == 0);
768 } else { 762 } else {
769 vsid = get_kernel_vsid(s_addr, mmu_kernel_ssize); 763 vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
770 ssize = mmu_kernel_ssize; 764 ssize = mmu_kernel_ssize;
771 } 765 }
772 766
773 if (ppc_md.hugepage_invalidate) 767 tmp = cpumask_of(smp_processor_id());
774 return ppc_md.hugepage_invalidate(vsid, s_addr, 768 if (cpumask_equal(mm_cpumask(mm), tmp))
775 hpte_slot_array, 769 flags |= HPTE_LOCAL_UPDATE;
776 psize, ssize); 770
777 /* 771 return flush_hash_hugepage(vsid, addr, pmdp, psize, ssize, flags);
778 * No bluk hpte removal support, invalidate each entry
779 */
780 shift = mmu_psize_defs[psize].shift;
781 max_hpte_count = HPAGE_PMD_SIZE >> shift;
782 for (i = 0; i < max_hpte_count; i++) {
783 /*
784 * 8 bits per each hpte entries
785 * 000| [ secondary group (one bit) | hidx (3 bits) | valid bit]
786 */
787 valid = hpte_valid(hpte_slot_array, i);
788 if (!valid)
789 continue;
790 hidx = hpte_hash_index(hpte_slot_array, i);
791
792 /* get the vpn */
793 addr = s_addr + (i * (1ul << shift));
794 vpn = hpt_vpn(addr, vsid, ssize);
795 hash = hpt_hash(vpn, shift, ssize);
796 if (hidx & _PTEIDX_SECONDARY)
797 hash = ~hash;
798
799 slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
800 slot += hidx & _PTEIDX_GROUP_IX;
801 ppc_md.hpte_invalidate(slot, vpn, psize,
802 MMU_PAGE_16M, ssize, 0);
803 }
804} 772}
805 773
806static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot) 774static pmd_t pmd_set_protbits(pmd_t pmd, pgprot_t pgprot)
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c
index 6adf55fa5d88..ecc66d5f02c9 100644
--- a/arch/powerpc/oprofile/backtrace.c
+++ b/arch/powerpc/oprofile/backtrace.c
@@ -10,7 +10,7 @@
10#include <linux/oprofile.h> 10#include <linux/oprofile.h>
11#include <linux/sched.h> 11#include <linux/sched.h>
12#include <asm/processor.h> 12#include <asm/processor.h>
13#include <asm/uaccess.h> 13#include <linux/uaccess.h>
14#include <asm/compat.h> 14#include <asm/compat.h>
15#include <asm/oprofile_impl.h> 15#include <asm/oprofile_impl.h>
16 16
@@ -105,6 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
105 first_frame = 0; 105 first_frame = 0;
106 } 106 }
107 } else { 107 } else {
108 pagefault_disable();
108#ifdef CONFIG_PPC64 109#ifdef CONFIG_PPC64
109 if (!is_32bit_task()) { 110 if (!is_32bit_task()) {
110 while (depth--) { 111 while (depth--) {
@@ -113,7 +114,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
113 break; 114 break;
114 first_frame = 0; 115 first_frame = 0;
115 } 116 }
116 117 pagefault_enable();
117 return; 118 return;
118 } 119 }
119#endif 120#endif
@@ -124,5 +125,6 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth)
124 break; 125 break;
125 first_frame = 0; 126 first_frame = 0;
126 } 127 }
128 pagefault_enable();
127 } 129 }
128} 130}
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index a6995d4e93d4..7c4f6690533a 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -339,7 +339,7 @@ static void power_pmu_bhrb_reset(void)
339 339
340static void power_pmu_bhrb_enable(struct perf_event *event) 340static void power_pmu_bhrb_enable(struct perf_event *event)
341{ 341{
342 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 342 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
343 343
344 if (!ppmu->bhrb_nr) 344 if (!ppmu->bhrb_nr)
345 return; 345 return;
@@ -354,7 +354,7 @@ static void power_pmu_bhrb_enable(struct perf_event *event)
354 354
355static void power_pmu_bhrb_disable(struct perf_event *event) 355static void power_pmu_bhrb_disable(struct perf_event *event)
356{ 356{
357 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 357 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
358 358
359 if (!ppmu->bhrb_nr) 359 if (!ppmu->bhrb_nr)
360 return; 360 return;
@@ -1144,7 +1144,7 @@ static void power_pmu_disable(struct pmu *pmu)
1144 if (!ppmu) 1144 if (!ppmu)
1145 return; 1145 return;
1146 local_irq_save(flags); 1146 local_irq_save(flags);
1147 cpuhw = &__get_cpu_var(cpu_hw_events); 1147 cpuhw = this_cpu_ptr(&cpu_hw_events);
1148 1148
1149 if (!cpuhw->disabled) { 1149 if (!cpuhw->disabled) {
1150 /* 1150 /*
@@ -1211,7 +1211,7 @@ static void power_pmu_enable(struct pmu *pmu)
1211 return; 1211 return;
1212 local_irq_save(flags); 1212 local_irq_save(flags);
1213 1213
1214 cpuhw = &__get_cpu_var(cpu_hw_events); 1214 cpuhw = this_cpu_ptr(&cpu_hw_events);
1215 if (!cpuhw->disabled) 1215 if (!cpuhw->disabled)
1216 goto out; 1216 goto out;
1217 1217
@@ -1403,7 +1403,7 @@ static int power_pmu_add(struct perf_event *event, int ef_flags)
1403 * Add the event to the list (if there is room) 1403 * Add the event to the list (if there is room)
1404 * and check whether the total set is still feasible. 1404 * and check whether the total set is still feasible.
1405 */ 1405 */
1406 cpuhw = &__get_cpu_var(cpu_hw_events); 1406 cpuhw = this_cpu_ptr(&cpu_hw_events);
1407 n0 = cpuhw->n_events; 1407 n0 = cpuhw->n_events;
1408 if (n0 >= ppmu->n_counter) 1408 if (n0 >= ppmu->n_counter)
1409 goto out; 1409 goto out;
@@ -1469,7 +1469,7 @@ static void power_pmu_del(struct perf_event *event, int ef_flags)
1469 1469
1470 power_pmu_read(event); 1470 power_pmu_read(event);
1471 1471
1472 cpuhw = &__get_cpu_var(cpu_hw_events); 1472 cpuhw = this_cpu_ptr(&cpu_hw_events);
1473 for (i = 0; i < cpuhw->n_events; ++i) { 1473 for (i = 0; i < cpuhw->n_events; ++i) {
1474 if (event == cpuhw->event[i]) { 1474 if (event == cpuhw->event[i]) {
1475 while (++i < cpuhw->n_events) { 1475 while (++i < cpuhw->n_events) {
@@ -1575,7 +1575,7 @@ static void power_pmu_stop(struct perf_event *event, int ef_flags)
1575 */ 1575 */
1576static void power_pmu_start_txn(struct pmu *pmu) 1576static void power_pmu_start_txn(struct pmu *pmu)
1577{ 1577{
1578 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1578 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1579 1579
1580 perf_pmu_disable(pmu); 1580 perf_pmu_disable(pmu);
1581 cpuhw->group_flag |= PERF_EVENT_TXN; 1581 cpuhw->group_flag |= PERF_EVENT_TXN;
@@ -1589,7 +1589,7 @@ static void power_pmu_start_txn(struct pmu *pmu)
1589 */ 1589 */
1590static void power_pmu_cancel_txn(struct pmu *pmu) 1590static void power_pmu_cancel_txn(struct pmu *pmu)
1591{ 1591{
1592 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 1592 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
1593 1593
1594 cpuhw->group_flag &= ~PERF_EVENT_TXN; 1594 cpuhw->group_flag &= ~PERF_EVENT_TXN;
1595 perf_pmu_enable(pmu); 1595 perf_pmu_enable(pmu);
@@ -1607,7 +1607,7 @@ static int power_pmu_commit_txn(struct pmu *pmu)
1607 1607
1608 if (!ppmu) 1608 if (!ppmu)
1609 return -EAGAIN; 1609 return -EAGAIN;
1610 cpuhw = &__get_cpu_var(cpu_hw_events); 1610 cpuhw = this_cpu_ptr(&cpu_hw_events);
1611 n = cpuhw->n_events; 1611 n = cpuhw->n_events;
1612 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n)) 1612 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1613 return -EAGAIN; 1613 return -EAGAIN;
@@ -1964,7 +1964,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
1964 1964
1965 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) { 1965 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
1966 struct cpu_hw_events *cpuhw; 1966 struct cpu_hw_events *cpuhw;
1967 cpuhw = &__get_cpu_var(cpu_hw_events); 1967 cpuhw = this_cpu_ptr(&cpu_hw_events);
1968 power_pmu_bhrb_read(cpuhw); 1968 power_pmu_bhrb_read(cpuhw);
1969 data.br_stack = &cpuhw->bhrb_stack; 1969 data.br_stack = &cpuhw->bhrb_stack;
1970 } 1970 }
@@ -2037,7 +2037,7 @@ static bool pmc_overflow(unsigned long val)
2037static void perf_event_interrupt(struct pt_regs *regs) 2037static void perf_event_interrupt(struct pt_regs *regs)
2038{ 2038{
2039 int i, j; 2039 int i, j;
2040 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 2040 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
2041 struct perf_event *event; 2041 struct perf_event *event;
2042 unsigned long val[8]; 2042 unsigned long val[8];
2043 int found, active; 2043 int found, active;
diff --git a/arch/powerpc/perf/core-fsl-emb.c b/arch/powerpc/perf/core-fsl-emb.c
index d35ae52c69dc..4acaea01fe03 100644
--- a/arch/powerpc/perf/core-fsl-emb.c
+++ b/arch/powerpc/perf/core-fsl-emb.c
@@ -210,7 +210,7 @@ static void fsl_emb_pmu_disable(struct pmu *pmu)
210 unsigned long flags; 210 unsigned long flags;
211 211
212 local_irq_save(flags); 212 local_irq_save(flags);
213 cpuhw = &__get_cpu_var(cpu_hw_events); 213 cpuhw = this_cpu_ptr(&cpu_hw_events);
214 214
215 if (!cpuhw->disabled) { 215 if (!cpuhw->disabled) {
216 cpuhw->disabled = 1; 216 cpuhw->disabled = 1;
@@ -249,7 +249,7 @@ static void fsl_emb_pmu_enable(struct pmu *pmu)
249 unsigned long flags; 249 unsigned long flags;
250 250
251 local_irq_save(flags); 251 local_irq_save(flags);
252 cpuhw = &__get_cpu_var(cpu_hw_events); 252 cpuhw = this_cpu_ptr(&cpu_hw_events);
253 if (!cpuhw->disabled) 253 if (!cpuhw->disabled)
254 goto out; 254 goto out;
255 255
@@ -653,7 +653,7 @@ static void record_and_restart(struct perf_event *event, unsigned long val,
653static void perf_event_interrupt(struct pt_regs *regs) 653static void perf_event_interrupt(struct pt_regs *regs)
654{ 654{
655 int i; 655 int i;
656 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); 656 struct cpu_hw_events *cpuhw = this_cpu_ptr(&cpu_hw_events);
657 struct perf_event *event; 657 struct perf_event *event;
658 unsigned long val; 658 unsigned long val;
659 int found = 0; 659 int found = 0;
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig
index 82f2da28cd27..d2ac1c116454 100644
--- a/arch/powerpc/platforms/44x/Kconfig
+++ b/arch/powerpc/platforms/44x/Kconfig
@@ -220,7 +220,6 @@ config AKEBONO
220 select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD 220 select USB_EHCI_HCD_PLATFORM if USB_EHCI_HCD
221 select MMC_SDHCI 221 select MMC_SDHCI
222 select MMC_SDHCI_PLTFM 222 select MMC_SDHCI_PLTFM
223 select MMC_SDHCI_OF_476GTR
224 select ATA 223 select ATA
225 select SATA_AHCI_PLATFORM 224 select SATA_AHCI_PLATFORM
226 help 225 help
diff --git a/arch/powerpc/platforms/44x/ppc476.c b/arch/powerpc/platforms/44x/ppc476.c
index 58db9d083969..c11ce6516c8f 100644
--- a/arch/powerpc/platforms/44x/ppc476.c
+++ b/arch/powerpc/platforms/44x/ppc476.c
@@ -94,7 +94,7 @@ static int avr_probe(struct i2c_client *client,
94{ 94{
95 avr_i2c_client = client; 95 avr_i2c_client = client;
96 ppc_md.restart = avr_reset_system; 96 ppc_md.restart = avr_reset_system;
97 ppc_md.power_off = avr_power_off_system; 97 pm_power_off = avr_power_off_system;
98 return 0; 98 return 0;
99} 99}
100 100
diff --git a/arch/powerpc/platforms/512x/mpc512x_shared.c b/arch/powerpc/platforms/512x/mpc512x_shared.c
index e996e007bc44..711f3d352af7 100644
--- a/arch/powerpc/platforms/512x/mpc512x_shared.c
+++ b/arch/powerpc/platforms/512x/mpc512x_shared.c
@@ -18,7 +18,7 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/of_platform.h> 19#include <linux/of_platform.h>
20#include <linux/fsl-diu-fb.h> 20#include <linux/fsl-diu-fb.h>
21#include <linux/bootmem.h> 21#include <linux/memblock.h>
22#include <sysdev/fsl_soc.h> 22#include <sysdev/fsl_soc.h>
23 23
24#include <asm/cacheflush.h> 24#include <asm/cacheflush.h>
@@ -297,14 +297,13 @@ static void __init mpc512x_setup_diu(void)
297 * and so negatively affect boot time. Instead we reserve the 297 * and so negatively affect boot time. Instead we reserve the
298 * already configured frame buffer area so that it won't be 298 * already configured frame buffer area so that it won't be
299 * destroyed. The starting address of the area to reserve and 299 * destroyed. The starting address of the area to reserve and
300 * also it's length is passed to reserve_bootmem(). It will be 300 * also it's length is passed to memblock_reserve(). It will be
301 * freed later on first open of fbdev, when splash image is not 301 * freed later on first open of fbdev, when splash image is not
302 * needed any more. 302 * needed any more.
303 */ 303 */
304 if (diu_shared_fb.in_use) { 304 if (diu_shared_fb.in_use) {
305 ret = reserve_bootmem(diu_shared_fb.fb_phys, 305 ret = memblock_reserve(diu_shared_fb.fb_phys,
306 diu_shared_fb.fb_len, 306 diu_shared_fb.fb_len);
307 BOOTMEM_EXCLUSIVE);
308 if (ret) { 307 if (ret) {
309 pr_err("%s: reserve bootmem failed\n", __func__); 308 pr_err("%s: reserve bootmem failed\n", __func__);
310 diu_shared_fb.in_use = false; 309 diu_shared_fb.in_use = false;
diff --git a/arch/powerpc/platforms/52xx/efika.c b/arch/powerpc/platforms/52xx/efika.c
index 3feffde9128d..6af651e69129 100644
--- a/arch/powerpc/platforms/52xx/efika.c
+++ b/arch/powerpc/platforms/52xx/efika.c
@@ -212,6 +212,8 @@ static int __init efika_probe(void)
212 DMA_MODE_READ = 0x44; 212 DMA_MODE_READ = 0x44;
213 DMA_MODE_WRITE = 0x48; 213 DMA_MODE_WRITE = 0x48;
214 214
215 pm_power_off = rtas_power_off;
216
215 return 1; 217 return 1;
216} 218}
217 219
@@ -225,7 +227,6 @@ define_machine(efika)
225 .init_IRQ = mpc52xx_init_irq, 227 .init_IRQ = mpc52xx_init_irq,
226 .get_irq = mpc52xx_get_irq, 228 .get_irq = mpc52xx_get_irq,
227 .restart = rtas_restart, 229 .restart = rtas_restart,
228 .power_off = rtas_power_off,
229 .halt = rtas_halt, 230 .halt = rtas_halt,
230 .set_rtc_time = rtas_set_rtc_time, 231 .set_rtc_time = rtas_set_rtc_time,
231 .get_rtc_time = rtas_get_rtc_time, 232 .get_rtc_time = rtas_get_rtc_time,
diff --git a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
index 463fa91ee5b6..15e8021ddef9 100644
--- a/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
+++ b/arch/powerpc/platforms/83xx/mcu_mpc8349emitx.c
@@ -167,10 +167,10 @@ static int mcu_probe(struct i2c_client *client, const struct i2c_device_id *id)
167 if (ret) 167 if (ret)
168 goto err; 168 goto err;
169 169
170 /* XXX: this is potentially racy, but there is no lock for ppc_md */ 170 /* XXX: this is potentially racy, but there is no lock for pm_power_off */
171 if (!ppc_md.power_off) { 171 if (!pm_power_off) {
172 glob_mcu = mcu; 172 glob_mcu = mcu;
173 ppc_md.power_off = mcu_power_off; 173 pm_power_off = mcu_power_off;
174 dev_info(&client->dev, "will provide power-off service\n"); 174 dev_info(&client->dev, "will provide power-off service\n");
175 } 175 }
176 176
@@ -197,7 +197,7 @@ static int mcu_remove(struct i2c_client *client)
197 device_remove_file(&client->dev, &dev_attr_status); 197 device_remove_file(&client->dev, &dev_attr_status);
198 198
199 if (glob_mcu == mcu) { 199 if (glob_mcu == mcu) {
200 ppc_md.power_off = NULL; 200 pm_power_off = NULL;
201 glob_mcu = NULL; 201 glob_mcu = NULL;
202 } 202 }
203 203
diff --git a/arch/powerpc/platforms/85xx/corenet_generic.c b/arch/powerpc/platforms/85xx/corenet_generic.c
index e56b89a792ed..1f309ccb096e 100644
--- a/arch/powerpc/platforms/85xx/corenet_generic.c
+++ b/arch/powerpc/platforms/85xx/corenet_generic.c
@@ -170,7 +170,7 @@ static int __init corenet_generic_probe(void)
170 170
171 ppc_md.get_irq = ehv_pic_get_irq; 171 ppc_md.get_irq = ehv_pic_get_irq;
172 ppc_md.restart = fsl_hv_restart; 172 ppc_md.restart = fsl_hv_restart;
173 ppc_md.power_off = fsl_hv_halt; 173 pm_power_off = fsl_hv_halt;
174 ppc_md.halt = fsl_hv_halt; 174 ppc_md.halt = fsl_hv_halt;
175#ifdef CONFIG_SMP 175#ifdef CONFIG_SMP
176 /* 176 /*
diff --git a/arch/powerpc/platforms/85xx/sgy_cts1000.c b/arch/powerpc/platforms/85xx/sgy_cts1000.c
index 8162b0412117..e149c9ec26ae 100644
--- a/arch/powerpc/platforms/85xx/sgy_cts1000.c
+++ b/arch/powerpc/platforms/85xx/sgy_cts1000.c
@@ -120,7 +120,7 @@ static int gpio_halt_probe(struct platform_device *pdev)
120 120
121 /* Register our halt function */ 121 /* Register our halt function */
122 ppc_md.halt = gpio_halt_cb; 122 ppc_md.halt = gpio_halt_cb;
123 ppc_md.power_off = gpio_halt_cb; 123 pm_power_off = gpio_halt_cb;
124 124
125 printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d" 125 printk(KERN_INFO "gpio-halt: registered GPIO %d (%d trigger, %d"
126 " irq).\n", gpio, trigger, irq); 126 " irq).\n", gpio, trigger, irq);
@@ -137,7 +137,7 @@ static int gpio_halt_remove(struct platform_device *pdev)
137 free_irq(irq, halt_node); 137 free_irq(irq, halt_node);
138 138
139 ppc_md.halt = NULL; 139 ppc_md.halt = NULL;
140 ppc_md.power_off = NULL; 140 pm_power_off = NULL;
141 141
142 gpio_free(gpio); 142 gpio_free(gpio);
143 143
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig
index bd6f1a1cf922..157250426b56 100644
--- a/arch/powerpc/platforms/8xx/Kconfig
+++ b/arch/powerpc/platforms/8xx/Kconfig
@@ -1,6 +1,3 @@
1config FADS
2 bool
3
4config CPM1 1config CPM1
5 bool 2 bool
6 select CPM 3 select CPM
@@ -13,7 +10,6 @@ choice
13 10
14config MPC8XXFADS 11config MPC8XXFADS
15 bool "FADS" 12 bool "FADS"
16 select FADS
17 13
18config MPC86XADS 14config MPC86XADS
19 bool "MPC86XADS" 15 bool "MPC86XADS"
diff --git a/arch/powerpc/platforms/cell/beat_htab.c b/arch/powerpc/platforms/cell/beat_htab.c
index d4d245c0d787..bee9232fe619 100644
--- a/arch/powerpc/platforms/cell/beat_htab.c
+++ b/arch/powerpc/platforms/cell/beat_htab.c
@@ -186,7 +186,7 @@ static long beat_lpar_hpte_updatepp(unsigned long slot,
186 unsigned long newpp, 186 unsigned long newpp,
187 unsigned long vpn, 187 unsigned long vpn,
188 int psize, int apsize, 188 int psize, int apsize,
189 int ssize, int local) 189 int ssize, unsigned long flags)
190{ 190{
191 unsigned long lpar_rc; 191 unsigned long lpar_rc;
192 u64 dummy0, dummy1; 192 u64 dummy0, dummy1;
@@ -369,7 +369,7 @@ static long beat_lpar_hpte_updatepp_v3(unsigned long slot,
369 unsigned long newpp, 369 unsigned long newpp,
370 unsigned long vpn, 370 unsigned long vpn,
371 int psize, int apsize, 371 int psize, int apsize,
372 int ssize, int local) 372 int ssize, unsigned long flags)
373{ 373{
374 unsigned long lpar_rc; 374 unsigned long lpar_rc;
375 unsigned long want_v; 375 unsigned long want_v;
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c
index 2b98a36ef8fb..3ce70ded2d6a 100644
--- a/arch/powerpc/platforms/cell/celleb_pci.c
+++ b/arch/powerpc/platforms/cell/celleb_pci.c
@@ -29,7 +29,7 @@
29#include <linux/pci.h> 29#include <linux/pci.h>
30#include <linux/string.h> 30#include <linux/string.h>
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/bootmem.h> 32#include <linux/memblock.h>
33#include <linux/pci_regs.h> 33#include <linux/pci_regs.h>
34#include <linux/of.h> 34#include <linux/of.h>
35#include <linux/of_device.h> 35#include <linux/of_device.h>
@@ -401,11 +401,11 @@ error:
401 } else { 401 } else {
402 if (config && *config) { 402 if (config && *config) {
403 size = 256; 403 size = 256;
404 free_bootmem(__pa(*config), size); 404 memblock_free(__pa(*config), size);
405 } 405 }
406 if (res && *res) { 406 if (res && *res) {
407 size = sizeof(struct celleb_pci_resource); 407 size = sizeof(struct celleb_pci_resource);
408 free_bootmem(__pa(*res), size); 408 memblock_free(__pa(*res), size);
409 } 409 }
410 } 410 }
411 411
diff --git a/arch/powerpc/platforms/cell/celleb_scc_epci.c b/arch/powerpc/platforms/cell/celleb_scc_epci.c
index 844c0facb4f7..9438bbed402f 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_epci.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_epci.c
@@ -25,7 +25,6 @@
25#include <linux/pci.h> 25#include <linux/pci.h>
26#include <linux/init.h> 26#include <linux/init.h>
27#include <linux/pci_regs.h> 27#include <linux/pci_regs.h>
28#include <linux/bootmem.h>
29 28
30#include <asm/io.h> 29#include <asm/io.h>
31#include <asm/irq.h> 30#include <asm/irq.h>
diff --git a/arch/powerpc/platforms/cell/celleb_scc_pciex.c b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
index 4278acfa2ede..f22387598040 100644
--- a/arch/powerpc/platforms/cell/celleb_scc_pciex.c
+++ b/arch/powerpc/platforms/cell/celleb_scc_pciex.c
@@ -25,7 +25,6 @@
25#include <linux/string.h> 25#include <linux/string.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/init.h> 27#include <linux/init.h>
28#include <linux/bootmem.h>
29#include <linux/delay.h> 28#include <linux/delay.h>
30#include <linux/interrupt.h> 29#include <linux/interrupt.h>
31 30
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c
index 34e8ce2976aa..90be8ec51686 100644
--- a/arch/powerpc/platforms/cell/celleb_setup.c
+++ b/arch/powerpc/platforms/cell/celleb_setup.c
@@ -142,6 +142,7 @@ static int __init celleb_probe_beat(void)
142 powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS 142 powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS
143 | FW_FEATURE_BEAT | FW_FEATURE_LPAR; 143 | FW_FEATURE_BEAT | FW_FEATURE_LPAR;
144 hpte_init_beat_v3(); 144 hpte_init_beat_v3();
145 pm_power_off = beat_power_off;
145 146
146 return 1; 147 return 1;
147} 148}
@@ -190,6 +191,7 @@ static int __init celleb_probe_native(void)
190 191
191 powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS; 192 powerpc_firmware_features |= FW_FEATURE_CELLEB_ALWAYS;
192 hpte_init_native(); 193 hpte_init_native();
194 pm_power_off = rtas_power_off;
193 195
194 return 1; 196 return 1;
195} 197}
@@ -204,7 +206,6 @@ define_machine(celleb_beat) {
204 .setup_arch = celleb_setup_arch_beat, 206 .setup_arch = celleb_setup_arch_beat,
205 .show_cpuinfo = celleb_show_cpuinfo, 207 .show_cpuinfo = celleb_show_cpuinfo,
206 .restart = beat_restart, 208 .restart = beat_restart,
207 .power_off = beat_power_off,
208 .halt = beat_halt, 209 .halt = beat_halt,
209 .get_rtc_time = beat_get_rtc_time, 210 .get_rtc_time = beat_get_rtc_time,
210 .set_rtc_time = beat_set_rtc_time, 211 .set_rtc_time = beat_set_rtc_time,
@@ -230,7 +231,6 @@ define_machine(celleb_native) {
230 .setup_arch = celleb_setup_arch_native, 231 .setup_arch = celleb_setup_arch_native,
231 .show_cpuinfo = celleb_show_cpuinfo, 232 .show_cpuinfo = celleb_show_cpuinfo,
232 .restart = rtas_restart, 233 .restart = rtas_restart,
233 .power_off = rtas_power_off,
234 .halt = rtas_halt, 234 .halt = rtas_halt,
235 .get_boot_time = rtas_get_boot_time, 235 .get_boot_time = rtas_get_boot_time,
236 .get_rtc_time = rtas_get_rtc_time, 236 .get_rtc_time = rtas_get_rtc_time,
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 8a106b4172e0..4c11421847be 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -82,7 +82,7 @@ static void iic_unmask(struct irq_data *d)
82 82
83static void iic_eoi(struct irq_data *d) 83static void iic_eoi(struct irq_data *d)
84{ 84{
85 struct iic *iic = &__get_cpu_var(cpu_iic); 85 struct iic *iic = this_cpu_ptr(&cpu_iic);
86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); 86 out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]);
87 BUG_ON(iic->eoi_ptr < 0); 87 BUG_ON(iic->eoi_ptr < 0);
88} 88}
@@ -148,7 +148,7 @@ static unsigned int iic_get_irq(void)
148 struct iic *iic; 148 struct iic *iic;
149 unsigned int virq; 149 unsigned int virq;
150 150
151 iic = &__get_cpu_var(cpu_iic); 151 iic = this_cpu_ptr(&cpu_iic);
152 *(unsigned long *) &pending = 152 *(unsigned long *) &pending =
153 in_be64((u64 __iomem *) &iic->regs->pending_destr); 153 in_be64((u64 __iomem *) &iic->regs->pending_destr);
154 if (!(pending.flags & CBE_IIC_IRQ_VALID)) 154 if (!(pending.flags & CBE_IIC_IRQ_VALID))
@@ -163,7 +163,7 @@ static unsigned int iic_get_irq(void)
163 163
164void iic_setup_cpu(void) 164void iic_setup_cpu(void)
165{ 165{
166 out_be64(&__get_cpu_var(cpu_iic).regs->prio, 0xff); 166 out_be64(this_cpu_ptr(&cpu_iic.regs->prio), 0xff);
167} 167}
168 168
169u8 iic_get_target_id(int cpu) 169u8 iic_get_target_id(int cpu)
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c
index 6e3409d590ac..d328140dc6f5 100644
--- a/arch/powerpc/platforms/cell/qpace_setup.c
+++ b/arch/powerpc/platforms/cell/qpace_setup.c
@@ -127,6 +127,7 @@ static int __init qpace_probe(void)
127 return 0; 127 return 0;
128 128
129 hpte_init_native(); 129 hpte_init_native();
130 pm_power_off = rtas_power_off;
130 131
131 return 1; 132 return 1;
132} 133}
@@ -137,7 +138,6 @@ define_machine(qpace) {
137 .setup_arch = qpace_setup_arch, 138 .setup_arch = qpace_setup_arch,
138 .show_cpuinfo = qpace_show_cpuinfo, 139 .show_cpuinfo = qpace_show_cpuinfo,
139 .restart = rtas_restart, 140 .restart = rtas_restart,
140 .power_off = rtas_power_off,
141 .halt = rtas_halt, 141 .halt = rtas_halt,
142 .get_boot_time = rtas_get_boot_time, 142 .get_boot_time = rtas_get_boot_time,
143 .get_rtc_time = rtas_get_rtc_time, 143 .get_rtc_time = rtas_get_rtc_time,
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 6ae25fb62015..d62aa982d530 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -259,6 +259,7 @@ static int __init cell_probe(void)
259 return 0; 259 return 0;
260 260
261 hpte_init_native(); 261 hpte_init_native();
262 pm_power_off = rtas_power_off;
262 263
263 return 1; 264 return 1;
264} 265}
@@ -269,7 +270,6 @@ define_machine(cell) {
269 .setup_arch = cell_setup_arch, 270 .setup_arch = cell_setup_arch,
270 .show_cpuinfo = cell_show_cpuinfo, 271 .show_cpuinfo = cell_show_cpuinfo,
271 .restart = rtas_restart, 272 .restart = rtas_restart,
272 .power_off = rtas_power_off,
273 .halt = rtas_halt, 273 .halt = rtas_halt,
274 .get_boot_time = rtas_get_boot_time, 274 .get_boot_time = rtas_get_boot_time,
275 .get_rtc_time = rtas_get_rtc_time, 275 .get_rtc_time = rtas_get_rtc_time,
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index ffcbd242e669..f7af74f83693 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -181,7 +181,8 @@ static int __spu_trap_data_seg(struct spu *spu, unsigned long ea)
181 return 0; 181 return 0;
182} 182}
183 183
184extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX 184extern int hash_page(unsigned long ea, unsigned long access,
185 unsigned long trap, unsigned long dsisr); //XXX
185static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) 186static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
186{ 187{
187 int ret; 188 int ret;
@@ -196,7 +197,7 @@ static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr)
196 (REGION_ID(ea) != USER_REGION_ID)) { 197 (REGION_ID(ea) != USER_REGION_ID)) {
197 198
198 spin_unlock(&spu->register_lock); 199 spin_unlock(&spu->register_lock);
199 ret = hash_page(ea, _PAGE_PRESENT, 0x300); 200 ret = hash_page(ea, _PAGE_PRESENT, 0x300, dsisr);
200 spin_lock(&spu->register_lock); 201 spin_lock(&spu->register_lock);
201 202
202 if (!ret) { 203 if (!ret) {
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index e45894a08118..d98f845ac777 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -144,7 +144,7 @@ int spufs_handle_class1(struct spu_context *ctx)
144 access = (_PAGE_PRESENT | _PAGE_USER); 144 access = (_PAGE_PRESENT | _PAGE_USER);
145 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; 145 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL;
146 local_irq_save(flags); 146 local_irq_save(flags);
147 ret = hash_page(ea, access, 0x300); 147 ret = hash_page(ea, access, 0x300, dsisr);
148 local_irq_restore(flags); 148 local_irq_restore(flags);
149 149
150 /* hashing failed, so try the actual fault handler */ 150 /* hashing failed, so try the actual fault handler */
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index 5b77b1919fd2..860a59eb8ea2 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -585,6 +585,8 @@ static int __init chrp_probe(void)
585 DMA_MODE_READ = 0x44; 585 DMA_MODE_READ = 0x44;
586 DMA_MODE_WRITE = 0x48; 586 DMA_MODE_WRITE = 0x48;
587 587
588 pm_power_off = rtas_power_off;
589
588 return 1; 590 return 1;
589} 591}
590 592
@@ -597,7 +599,6 @@ define_machine(chrp) {
597 .show_cpuinfo = chrp_show_cpuinfo, 599 .show_cpuinfo = chrp_show_cpuinfo,
598 .init_IRQ = chrp_init_IRQ, 600 .init_IRQ = chrp_init_IRQ,
599 .restart = rtas_restart, 601 .restart = rtas_restart,
600 .power_off = rtas_power_off,
601 .halt = rtas_halt, 602 .halt = rtas_halt,
602 .time_init = chrp_time_init, 603 .time_init = chrp_time_init,
603 .set_rtc_time = chrp_set_rtc_time, 604 .set_rtc_time = chrp_set_rtc_time,
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c
index bd4ba5d7d568..fe0ed6ee285e 100644
--- a/arch/powerpc/platforms/embedded6xx/gamecube.c
+++ b/arch/powerpc/platforms/embedded6xx/gamecube.c
@@ -67,6 +67,8 @@ static int __init gamecube_probe(void)
67 if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube")) 67 if (!of_flat_dt_is_compatible(dt_root, "nintendo,gamecube"))
68 return 0; 68 return 0;
69 69
70 pm_power_off = gamecube_power_off;
71
70 return 1; 72 return 1;
71} 73}
72 74
@@ -80,7 +82,6 @@ define_machine(gamecube) {
80 .probe = gamecube_probe, 82 .probe = gamecube_probe,
81 .init_early = gamecube_init_early, 83 .init_early = gamecube_init_early,
82 .restart = gamecube_restart, 84 .restart = gamecube_restart,
83 .power_off = gamecube_power_off,
84 .halt = gamecube_halt, 85 .halt = gamecube_halt,
85 .init_IRQ = flipper_pic_probe, 86 .init_IRQ = flipper_pic_probe,
86 .get_irq = flipper_pic_get_irq, 87 .get_irq = flipper_pic_get_irq,
diff --git a/arch/powerpc/platforms/embedded6xx/linkstation.c b/arch/powerpc/platforms/embedded6xx/linkstation.c
index 168e1d80b2e5..540eeb58d3f0 100644
--- a/arch/powerpc/platforms/embedded6xx/linkstation.c
+++ b/arch/powerpc/platforms/embedded6xx/linkstation.c
@@ -147,6 +147,9 @@ static int __init linkstation_probe(void)
147 147
148 if (!of_flat_dt_is_compatible(root, "linkstation")) 148 if (!of_flat_dt_is_compatible(root, "linkstation"))
149 return 0; 149 return 0;
150
151 pm_power_off = linkstation_power_off;
152
150 return 1; 153 return 1;
151} 154}
152 155
@@ -158,7 +161,6 @@ define_machine(linkstation){
158 .show_cpuinfo = linkstation_show_cpuinfo, 161 .show_cpuinfo = linkstation_show_cpuinfo,
159 .get_irq = mpic_get_irq, 162 .get_irq = mpic_get_irq,
160 .restart = linkstation_restart, 163 .restart = linkstation_restart,
161 .power_off = linkstation_power_off,
162 .halt = linkstation_halt, 164 .halt = linkstation_halt,
163 .calibrate_decr = generic_calibrate_decr, 165 .calibrate_decr = generic_calibrate_decr,
164}; 166};
diff --git a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
index 20a8ed91962e..7feb325b636b 100644
--- a/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
+++ b/arch/powerpc/platforms/embedded6xx/usbgecko_udbg.c
@@ -247,7 +247,7 @@ void __init ug_udbg_init(void)
247 np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-exi"); 247 np = of_find_compatible_node(NULL, NULL, "nintendo,flipper-exi");
248 if (!np) { 248 if (!np) {
249 udbg_printf("%s: EXI node not found\n", __func__); 249 udbg_printf("%s: EXI node not found\n", __func__);
250 goto done; 250 goto out;
251 } 251 }
252 252
253 exi_io_base = ug_udbg_setup_exi_io_base(np); 253 exi_io_base = ug_udbg_setup_exi_io_base(np);
@@ -267,8 +267,8 @@ void __init ug_udbg_init(void)
267 } 267 }
268 268
269done: 269done:
270 if (np) 270 of_node_put(np);
271 of_node_put(np); 271out:
272 return; 272 return;
273} 273}
274 274
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c
index 388e29bab8f6..352592d3e44e 100644
--- a/arch/powerpc/platforms/embedded6xx/wii.c
+++ b/arch/powerpc/platforms/embedded6xx/wii.c
@@ -211,6 +211,8 @@ static int __init wii_probe(void)
211 if (!of_flat_dt_is_compatible(dt_root, "nintendo,wii")) 211 if (!of_flat_dt_is_compatible(dt_root, "nintendo,wii"))
212 return 0; 212 return 0;
213 213
214 pm_power_off = wii_power_off;
215
214 return 1; 216 return 1;
215} 217}
216 218
@@ -226,7 +228,6 @@ define_machine(wii) {
226 .init_early = wii_init_early, 228 .init_early = wii_init_early,
227 .setup_arch = wii_setup_arch, 229 .setup_arch = wii_setup_arch,
228 .restart = wii_restart, 230 .restart = wii_restart,
229 .power_off = wii_power_off,
230 .halt = wii_halt, 231 .halt = wii_halt,
231 .init_IRQ = wii_pic_probe, 232 .init_IRQ = wii_pic_probe,
232 .get_irq = flipper_pic_get_irq, 233 .get_irq = flipper_pic_get_irq,
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index f7136aae8bbf..d3a13067ec42 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -15,7 +15,6 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/bootmem.h>
19#include <linux/irq.h> 18#include <linux/irq.h>
20 19
21#include <asm/sections.h> 20#include <asm/sections.h>
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c
index cb1b0b35a0c6..56b85cd61aaf 100644
--- a/arch/powerpc/platforms/maple/setup.c
+++ b/arch/powerpc/platforms/maple/setup.c
@@ -169,7 +169,7 @@ static void __init maple_use_rtas_reboot_and_halt_if_present(void)
169 if (rtas_service_present("system-reboot") && 169 if (rtas_service_present("system-reboot") &&
170 rtas_service_present("power-off")) { 170 rtas_service_present("power-off")) {
171 ppc_md.restart = rtas_restart; 171 ppc_md.restart = rtas_restart;
172 ppc_md.power_off = rtas_power_off; 172 pm_power_off = rtas_power_off;
173 ppc_md.halt = rtas_halt; 173 ppc_md.halt = rtas_halt;
174 } 174 }
175} 175}
@@ -312,6 +312,7 @@ static int __init maple_probe(void)
312 alloc_dart_table(); 312 alloc_dart_table();
313 313
314 hpte_init_native(); 314 hpte_init_native();
315 pm_power_off = maple_power_off;
315 316
316 return 1; 317 return 1;
317} 318}
@@ -325,7 +326,6 @@ define_machine(maple) {
325 .pci_irq_fixup = maple_pci_irq_fixup, 326 .pci_irq_fixup = maple_pci_irq_fixup,
326 .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq, 327 .pci_get_legacy_ide_irq = maple_pci_get_legacy_ide_irq,
327 .restart = maple_restart, 328 .restart = maple_restart,
328 .power_off = maple_power_off,
329 .halt = maple_halt, 329 .halt = maple_halt,
330 .get_boot_time = maple_get_boot_time, 330 .get_boot_time = maple_get_boot_time,
331 .set_rtc_time = maple_set_rtc_time, 331 .set_rtc_time = maple_set_rtc_time,
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c
index 014d06e6d46b..60b03a1703d1 100644
--- a/arch/powerpc/platforms/powermac/nvram.c
+++ b/arch/powerpc/platforms/powermac/nvram.c
@@ -513,11 +513,7 @@ static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr)
513 printk(KERN_ERR "nvram: no address\n"); 513 printk(KERN_ERR "nvram: no address\n");
514 return -EINVAL; 514 return -EINVAL;
515 } 515 }
516 nvram_image = alloc_bootmem(NVRAM_SIZE); 516 nvram_image = memblock_virt_alloc(NVRAM_SIZE, 0);
517 if (nvram_image == NULL) {
518 printk(KERN_ERR "nvram: can't allocate ram image\n");
519 return -ENOMEM;
520 }
521 nvram_data = ioremap(addr, NVRAM_SIZE*2); 517 nvram_data = ioremap(addr, NVRAM_SIZE*2);
522 nvram_naddrs = 1; /* Make sure we get the correct case */ 518 nvram_naddrs = 1; /* Make sure we get the correct case */
523 519
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 7e868ccf3b0d..04702db35d45 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -15,7 +15,6 @@
15#include <linux/delay.h> 15#include <linux/delay.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/bootmem.h>
19#include <linux/irq.h> 18#include <linux/irq.h>
20#include <linux/of_pci.h> 19#include <linux/of_pci.h>
21 20
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c
index b127a29ac526..713d36d45d1d 100644
--- a/arch/powerpc/platforms/powermac/setup.c
+++ b/arch/powerpc/platforms/powermac/setup.c
@@ -632,6 +632,8 @@ static int __init pmac_probe(void)
632 smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL); 632 smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL);
633#endif /* CONFIG_PMAC_SMU */ 633#endif /* CONFIG_PMAC_SMU */
634 634
635 pm_power_off = pmac_power_off;
636
635 return 1; 637 return 1;
636} 638}
637 639
@@ -663,7 +665,6 @@ define_machine(powermac) {
663 .get_irq = NULL, /* changed later */ 665 .get_irq = NULL, /* changed later */
664 .pci_irq_fixup = pmac_pci_irq_fixup, 666 .pci_irq_fixup = pmac_pci_irq_fixup,
665 .restart = pmac_restart, 667 .restart = pmac_restart,
666 .power_off = pmac_power_off,
667 .halt = pmac_halt, 668 .halt = pmac_halt,
668 .time_init = pmac_time_init, 669 .time_init = pmac_time_init,
669 .get_boot_time = pmac_get_boot_time, 670 .get_boot_time = pmac_get_boot_time,
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index eba9cb10619c..2809c9895288 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -11,7 +11,6 @@
11 * (at your option) any later version. 11 * (at your option) any later version.
12 */ 12 */
13 13
14#include <linux/bootmem.h>
15#include <linux/debugfs.h> 14#include <linux/debugfs.h>
16#include <linux/delay.h> 15#include <linux/delay.h>
17#include <linux/io.h> 16#include <linux/io.h>
@@ -354,6 +353,9 @@ static int ioda_eeh_get_phb_state(struct eeh_pe *pe)
354 } else if (!(pe->state & EEH_PE_ISOLATED)) { 353 } else if (!(pe->state & EEH_PE_ISOLATED)) {
355 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 354 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
356 ioda_eeh_phb_diag(pe); 355 ioda_eeh_phb_diag(pe);
356
357 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
358 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
357 } 359 }
358 360
359 return result; 361 return result;
@@ -373,7 +375,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
373 * moving forward, we have to return operational 375 * moving forward, we have to return operational
374 * state during PE reset. 376 * state during PE reset.
375 */ 377 */
376 if (pe->state & EEH_PE_CFG_BLOCKED) { 378 if (pe->state & EEH_PE_RESET) {
377 result = (EEH_STATE_MMIO_ACTIVE | 379 result = (EEH_STATE_MMIO_ACTIVE |
378 EEH_STATE_DMA_ACTIVE | 380 EEH_STATE_DMA_ACTIVE |
379 EEH_STATE_MMIO_ENABLED | 381 EEH_STATE_MMIO_ENABLED |
@@ -452,6 +454,9 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
452 454
453 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 455 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
454 ioda_eeh_phb_diag(pe); 456 ioda_eeh_phb_diag(pe);
457
458 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
459 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
455 } 460 }
456 461
457 return result; 462 return result;
@@ -731,7 +736,8 @@ static int ioda_eeh_reset(struct eeh_pe *pe, int option)
731static int ioda_eeh_get_log(struct eeh_pe *pe, int severity, 736static int ioda_eeh_get_log(struct eeh_pe *pe, int severity,
732 char *drv_log, unsigned long len) 737 char *drv_log, unsigned long len)
733{ 738{
734 pnv_pci_dump_phb_diag_data(pe->phb, pe->data); 739 if (!eeh_has_flag(EEH_EARLY_DUMP_LOG))
740 pnv_pci_dump_phb_diag_data(pe->phb, pe->data);
735 741
736 return 0; 742 return 0;
737} 743}
@@ -1087,6 +1093,10 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
1087 !((*pe)->state & EEH_PE_ISOLATED)) { 1093 !((*pe)->state & EEH_PE_ISOLATED)) {
1088 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED); 1094 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
1089 ioda_eeh_phb_diag(*pe); 1095 ioda_eeh_phb_diag(*pe);
1096
1097 if (eeh_has_flag(EEH_EARLY_DUMP_LOG))
1098 pnv_pci_dump_phb_diag_data((*pe)->phb,
1099 (*pe)->data);
1090 } 1100 }
1091 1101
1092 /* 1102 /*
diff --git a/arch/powerpc/platforms/powernv/opal-async.c b/arch/powerpc/platforms/powernv/opal-async.c
index e462ab947d16..693b6cdac691 100644
--- a/arch/powerpc/platforms/powernv/opal-async.c
+++ b/arch/powerpc/platforms/powernv/opal-async.c
@@ -71,6 +71,7 @@ int opal_async_get_token_interruptible(void)
71 71
72 return token; 72 return token;
73} 73}
74EXPORT_SYMBOL_GPL(opal_async_get_token_interruptible);
74 75
75int __opal_async_release_token(int token) 76int __opal_async_release_token(int token)
76{ 77{
@@ -102,6 +103,7 @@ int opal_async_release_token(int token)
102 103
103 return 0; 104 return 0;
104} 105}
106EXPORT_SYMBOL_GPL(opal_async_release_token);
105 107
106int opal_async_wait_response(uint64_t token, struct opal_msg *msg) 108int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
107{ 109{
@@ -120,6 +122,7 @@ int opal_async_wait_response(uint64_t token, struct opal_msg *msg)
120 122
121 return 0; 123 return 0;
122} 124}
125EXPORT_SYMBOL_GPL(opal_async_wait_response);
123 126
124static int opal_async_comp_event(struct notifier_block *nb, 127static int opal_async_comp_event(struct notifier_block *nb,
125 unsigned long msg_type, void *msg) 128 unsigned long msg_type, void *msg)
diff --git a/arch/powerpc/platforms/powernv/opal-rtc.c b/arch/powerpc/platforms/powernv/opal-rtc.c
index 499707ddaa9c..37dbee15769f 100644
--- a/arch/powerpc/platforms/powernv/opal-rtc.c
+++ b/arch/powerpc/platforms/powernv/opal-rtc.c
@@ -15,6 +15,8 @@
15#include <linux/bcd.h> 15#include <linux/bcd.h>
16#include <linux/rtc.h> 16#include <linux/rtc.h>
17#include <linux/delay.h> 17#include <linux/delay.h>
18#include <linux/platform_device.h>
19#include <linux/of_platform.h>
18 20
19#include <asm/opal.h> 21#include <asm/opal.h>
20#include <asm/firmware.h> 22#include <asm/firmware.h>
@@ -43,7 +45,7 @@ unsigned long __init opal_get_boot_time(void)
43 long rc = OPAL_BUSY; 45 long rc = OPAL_BUSY;
44 46
45 if (!opal_check_token(OPAL_RTC_READ)) 47 if (!opal_check_token(OPAL_RTC_READ))
46 goto out; 48 return 0;
47 49
48 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 50 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
49 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 51 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
@@ -53,62 +55,33 @@ unsigned long __init opal_get_boot_time(void)
53 mdelay(10); 55 mdelay(10);
54 } 56 }
55 if (rc != OPAL_SUCCESS) 57 if (rc != OPAL_SUCCESS)
56 goto out; 58 return 0;
57 59
58 y_m_d = be32_to_cpu(__y_m_d); 60 y_m_d = be32_to_cpu(__y_m_d);
59 h_m_s_ms = be64_to_cpu(__h_m_s_ms); 61 h_m_s_ms = be64_to_cpu(__h_m_s_ms);
60 opal_to_tm(y_m_d, h_m_s_ms, &tm); 62 opal_to_tm(y_m_d, h_m_s_ms, &tm);
61 return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, 63 return mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday,
62 tm.tm_hour, tm.tm_min, tm.tm_sec); 64 tm.tm_hour, tm.tm_min, tm.tm_sec);
63out:
64 ppc_md.get_rtc_time = NULL;
65 ppc_md.set_rtc_time = NULL;
66 return 0;
67} 65}
68 66
69void opal_get_rtc_time(struct rtc_time *tm) 67static __init int opal_time_init(void)
70{ 68{
71 long rc = OPAL_BUSY; 69 struct platform_device *pdev;
72 u32 y_m_d; 70 struct device_node *rtc;
73 u64 h_m_s_ms;
74 __be32 __y_m_d;
75 __be64 __h_m_s_ms;
76 71
77 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 72 rtc = of_find_node_by_path("/ibm,opal/rtc");
78 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms); 73 if (rtc) {
79 if (rc == OPAL_BUSY_EVENT) 74 pdev = of_platform_device_create(rtc, "opal-rtc", NULL);
80 opal_poll_events(NULL); 75 of_node_put(rtc);
76 } else {
77 if (opal_check_token(OPAL_RTC_READ) ||
78 opal_check_token(OPAL_READ_TPO))
79 pdev = platform_device_register_simple("opal-rtc", -1,
80 NULL, 0);
81 else 81 else
82 mdelay(10); 82 return -ENODEV;
83 } 83 }
84 if (rc != OPAL_SUCCESS)
85 return;
86 y_m_d = be32_to_cpu(__y_m_d);
87 h_m_s_ms = be64_to_cpu(__h_m_s_ms);
88 opal_to_tm(y_m_d, h_m_s_ms, tm);
89}
90
91int opal_set_rtc_time(struct rtc_time *tm)
92{
93 long rc = OPAL_BUSY;
94 u32 y_m_d = 0;
95 u64 h_m_s_ms = 0;
96
97 y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24;
98 y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16;
99 y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8;
100 y_m_d |= ((u32)bin2bcd(tm->tm_mday));
101
102 h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56;
103 h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48;
104 h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40;
105 84
106 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) { 85 return PTR_ERR_OR_ZERO(pdev);
107 rc = opal_rtc_write(y_m_d, h_m_s_ms);
108 if (rc == OPAL_BUSY_EVENT)
109 opal_poll_events(NULL);
110 else
111 mdelay(10);
112 }
113 return rc == OPAL_SUCCESS ? 0 : -EIO;
114} 86}
87machine_subsys_initcall(powernv, opal_time_init);
diff --git a/arch/powerpc/platforms/powernv/opal-tracepoints.c b/arch/powerpc/platforms/powernv/opal-tracepoints.c
index ae14c40b4b1c..e11273b2386d 100644
--- a/arch/powerpc/platforms/powernv/opal-tracepoints.c
+++ b/arch/powerpc/platforms/powernv/opal-tracepoints.c
@@ -48,7 +48,7 @@ void __trace_opal_entry(unsigned long opcode, unsigned long *args)
48 48
49 local_irq_save(flags); 49 local_irq_save(flags);
50 50
51 depth = &__get_cpu_var(opal_trace_depth); 51 depth = this_cpu_ptr(&opal_trace_depth);
52 52
53 if (*depth) 53 if (*depth)
54 goto out; 54 goto out;
@@ -69,7 +69,7 @@ void __trace_opal_exit(long opcode, unsigned long retval)
69 69
70 local_irq_save(flags); 70 local_irq_save(flags);
71 71
72 depth = &__get_cpu_var(opal_trace_depth); 72 depth = this_cpu_ptr(&opal_trace_depth);
73 73
74 if (*depth) 74 if (*depth)
75 goto out; 75 goto out;
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index feb549aa3eea..0a299be588af 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -18,7 +18,7 @@
18 .section ".text" 18 .section ".text"
19 19
20#ifdef CONFIG_TRACEPOINTS 20#ifdef CONFIG_TRACEPOINTS
21#ifdef CONFIG_JUMP_LABEL 21#ifdef HAVE_JUMP_LABEL
22#define OPAL_BRANCH(LABEL) \ 22#define OPAL_BRANCH(LABEL) \
23 ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key) 23 ARCH_STATIC_BRANCH(LABEL, opal_tracepoint_key)
24#else 24#else
@@ -250,3 +250,7 @@ OPAL_CALL(opal_handle_hmi, OPAL_HANDLE_HMI);
250OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION); 250OPAL_CALL(opal_register_dump_region, OPAL_REGISTER_DUMP_REGION);
251OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION); 251OPAL_CALL(opal_unregister_dump_region, OPAL_UNREGISTER_DUMP_REGION);
252OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE); 252OPAL_CALL(opal_pci_set_phb_cxl_mode, OPAL_PCI_SET_PHB_CXL_MODE);
253OPAL_CALL(opal_tpo_write, OPAL_WRITE_TPO);
254OPAL_CALL(opal_tpo_read, OPAL_READ_TPO);
255OPAL_CALL(opal_ipmi_send, OPAL_IPMI_SEND);
256OPAL_CALL(opal_ipmi_recv, OPAL_IPMI_RECV);
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index d019b081df9d..cb0b6de79cd4 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -50,7 +50,6 @@ static int mc_recoverable_range_len;
50 50
51struct device_node *opal_node; 51struct device_node *opal_node;
52static DEFINE_SPINLOCK(opal_write_lock); 52static DEFINE_SPINLOCK(opal_write_lock);
53extern u64 opal_mc_secondary_handler[];
54static unsigned int *opal_irqs; 53static unsigned int *opal_irqs;
55static unsigned int opal_irq_count; 54static unsigned int opal_irq_count;
56static ATOMIC_NOTIFIER_HEAD(opal_notifier_head); 55static ATOMIC_NOTIFIER_HEAD(opal_notifier_head);
@@ -644,6 +643,16 @@ static void __init opal_dump_region_init(void)
644 pr_warn("DUMP: Failed to register kernel log buffer. " 643 pr_warn("DUMP: Failed to register kernel log buffer. "
645 "rc = %d\n", rc); 644 "rc = %d\n", rc);
646} 645}
646
647static void opal_ipmi_init(struct device_node *opal_node)
648{
649 struct device_node *np;
650
651 for_each_child_of_node(opal_node, np)
652 if (of_device_is_compatible(np, "ibm,opal-ipmi"))
653 of_platform_device_create(np, NULL, NULL);
654}
655
647static int __init opal_init(void) 656static int __init opal_init(void)
648{ 657{
649 struct device_node *np, *consoles; 658 struct device_node *np, *consoles;
@@ -707,6 +716,8 @@ static int __init opal_init(void)
707 opal_msglog_init(); 716 opal_msglog_init();
708 } 717 }
709 718
719 opal_ipmi_init(opal_node);
720
710 return 0; 721 return 0;
711} 722}
712machine_subsys_initcall(powernv, opal_init); 723machine_subsys_initcall(powernv, opal_init);
@@ -742,6 +753,8 @@ void opal_shutdown(void)
742 753
743/* Export this so that test modules can use it */ 754/* Export this so that test modules can use it */
744EXPORT_SYMBOL_GPL(opal_invalid_call); 755EXPORT_SYMBOL_GPL(opal_invalid_call);
756EXPORT_SYMBOL_GPL(opal_ipmi_send);
757EXPORT_SYMBOL_GPL(opal_ipmi_recv);
745 758
746/* Convert a region of vmalloc memory to an opal sg list */ 759/* Convert a region of vmalloc memory to an opal sg list */
747struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr, 760struct opal_sg_list *opal_vmalloc_to_sg_list(void *vmalloc_addr,
@@ -805,3 +818,9 @@ void opal_free_sg_list(struct opal_sg_list *sg)
805 sg = NULL; 818 sg = NULL;
806 } 819 }
807} 820}
821
822EXPORT_SYMBOL_GPL(opal_poll_events);
823EXPORT_SYMBOL_GPL(opal_rtc_read);
824EXPORT_SYMBOL_GPL(opal_rtc_write);
825EXPORT_SYMBOL_GPL(opal_tpo_read);
826EXPORT_SYMBOL_GPL(opal_tpo_write);
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 3ba435ec3dcd..fac88ed8a915 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -91,6 +91,24 @@ static inline bool pnv_pci_is_mem_pref_64(unsigned long flags)
91 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH)); 91 (IORESOURCE_MEM_64 | IORESOURCE_PREFETCH));
92} 92}
93 93
94static void pnv_ioda_reserve_pe(struct pnv_phb *phb, int pe_no)
95{
96 if (!(pe_no >= 0 && pe_no < phb->ioda.total_pe)) {
97 pr_warn("%s: Invalid PE %d on PHB#%x\n",
98 __func__, pe_no, phb->hose->global_number);
99 return;
100 }
101
102 if (test_and_set_bit(pe_no, phb->ioda.pe_alloc)) {
103 pr_warn("%s: PE %d was assigned on PHB#%x\n",
104 __func__, pe_no, phb->hose->global_number);
105 return;
106 }
107
108 phb->ioda.pe_array[pe_no].phb = phb;
109 phb->ioda.pe_array[pe_no].pe_number = pe_no;
110}
111
94static int pnv_ioda_alloc_pe(struct pnv_phb *phb) 112static int pnv_ioda_alloc_pe(struct pnv_phb *phb)
95{ 113{
96 unsigned long pe; 114 unsigned long pe;
@@ -172,7 +190,7 @@ fail:
172 return -EIO; 190 return -EIO;
173} 191}
174 192
175static void pnv_ioda2_alloc_m64_pe(struct pnv_phb *phb) 193static void pnv_ioda2_reserve_m64_pe(struct pnv_phb *phb)
176{ 194{
177 resource_size_t sgsz = phb->ioda.m64_segsize; 195 resource_size_t sgsz = phb->ioda.m64_segsize;
178 struct pci_dev *pdev; 196 struct pci_dev *pdev;
@@ -185,16 +203,15 @@ static void pnv_ioda2_alloc_m64_pe(struct pnv_phb *phb)
185 * instead of root bus. 203 * instead of root bus.
186 */ 204 */
187 list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) { 205 list_for_each_entry(pdev, &phb->hose->bus->devices, bus_list) {
188 for (i = PCI_BRIDGE_RESOURCES; 206 for (i = 0; i < PCI_BRIDGE_RESOURCE_NUM; i++) {
189 i <= PCI_BRIDGE_RESOURCE_END; i++) { 207 r = &pdev->resource[PCI_BRIDGE_RESOURCES + i];
190 r = &pdev->resource[i];
191 if (!r->parent || 208 if (!r->parent ||
192 !pnv_pci_is_mem_pref_64(r->flags)) 209 !pnv_pci_is_mem_pref_64(r->flags))
193 continue; 210 continue;
194 211
195 base = (r->start - phb->ioda.m64_base) / sgsz; 212 base = (r->start - phb->ioda.m64_base) / sgsz;
196 for (step = 0; step < resource_size(r) / sgsz; step++) 213 for (step = 0; step < resource_size(r) / sgsz; step++)
197 set_bit(base + step, phb->ioda.pe_alloc); 214 pnv_ioda_reserve_pe(phb, base + step);
198 } 215 }
199 } 216 }
200} 217}
@@ -287,8 +304,6 @@ done:
287 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) < 304 while ((i = find_next_bit(pe_alloc, phb->ioda.total_pe, i + 1)) <
288 phb->ioda.total_pe) { 305 phb->ioda.total_pe) {
289 pe = &phb->ioda.pe_array[i]; 306 pe = &phb->ioda.pe_array[i];
290 pe->phb = phb;
291 pe->pe_number = i;
292 307
293 if (!master_pe) { 308 if (!master_pe) {
294 pe->flags |= PNV_IODA_PE_MASTER; 309 pe->flags |= PNV_IODA_PE_MASTER;
@@ -313,6 +328,12 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
313 const u32 *r; 328 const u32 *r;
314 u64 pci_addr; 329 u64 pci_addr;
315 330
331 /* FIXME: Support M64 for P7IOC */
332 if (phb->type != PNV_PHB_IODA2) {
333 pr_info(" Not support M64 window\n");
334 return;
335 }
336
316 if (!firmware_has_feature(FW_FEATURE_OPALv3)) { 337 if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
317 pr_info(" Firmware too old to support M64 window\n"); 338 pr_info(" Firmware too old to support M64 window\n");
318 return; 339 return;
@@ -325,12 +346,6 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
325 return; 346 return;
326 } 347 }
327 348
328 /* FIXME: Support M64 for P7IOC */
329 if (phb->type != PNV_PHB_IODA2) {
330 pr_info(" Not support M64 window\n");
331 return;
332 }
333
334 res = &hose->mem_resources[1]; 349 res = &hose->mem_resources[1];
335 res->start = of_translate_address(dn, r + 2); 350 res->start = of_translate_address(dn, r + 2);
336 res->end = res->start + of_read_number(r + 4, 2) - 1; 351 res->end = res->start + of_read_number(r + 4, 2) - 1;
@@ -345,7 +360,7 @@ static void __init pnv_ioda_parse_m64_window(struct pnv_phb *phb)
345 /* Use last M64 BAR to cover M64 window */ 360 /* Use last M64 BAR to cover M64 window */
346 phb->ioda.m64_bar_idx = 15; 361 phb->ioda.m64_bar_idx = 15;
347 phb->init_m64 = pnv_ioda2_init_m64; 362 phb->init_m64 = pnv_ioda2_init_m64;
348 phb->alloc_m64_pe = pnv_ioda2_alloc_m64_pe; 363 phb->reserve_m64_pe = pnv_ioda2_reserve_m64_pe;
349 phb->pick_m64_pe = pnv_ioda2_pick_m64_pe; 364 phb->pick_m64_pe = pnv_ioda2_pick_m64_pe;
350} 365}
351 366
@@ -358,7 +373,9 @@ static void pnv_ioda_freeze_pe(struct pnv_phb *phb, int pe_no)
358 /* Fetch master PE */ 373 /* Fetch master PE */
359 if (pe->flags & PNV_IODA_PE_SLAVE) { 374 if (pe->flags & PNV_IODA_PE_SLAVE) {
360 pe = pe->master; 375 pe = pe->master;
361 WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)); 376 if (WARN_ON(!pe || !(pe->flags & PNV_IODA_PE_MASTER)))
377 return;
378
362 pe_no = pe->pe_number; 379 pe_no = pe->pe_number;
363 } 380 }
364 381
@@ -507,6 +524,106 @@ static struct pnv_ioda_pe *pnv_ioda_get_pe(struct pci_dev *dev)
507} 524}
508#endif /* CONFIG_PCI_MSI */ 525#endif /* CONFIG_PCI_MSI */
509 526
527static int pnv_ioda_set_one_peltv(struct pnv_phb *phb,
528 struct pnv_ioda_pe *parent,
529 struct pnv_ioda_pe *child,
530 bool is_add)
531{
532 const char *desc = is_add ? "adding" : "removing";
533 uint8_t op = is_add ? OPAL_ADD_PE_TO_DOMAIN :
534 OPAL_REMOVE_PE_FROM_DOMAIN;
535 struct pnv_ioda_pe *slave;
536 long rc;
537
538 /* Parent PE affects child PE */
539 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
540 child->pe_number, op);
541 if (rc != OPAL_SUCCESS) {
542 pe_warn(child, "OPAL error %ld %s to parent PELTV\n",
543 rc, desc);
544 return -ENXIO;
545 }
546
547 if (!(child->flags & PNV_IODA_PE_MASTER))
548 return 0;
549
550 /* Compound case: parent PE affects slave PEs */
551 list_for_each_entry(slave, &child->slaves, list) {
552 rc = opal_pci_set_peltv(phb->opal_id, parent->pe_number,
553 slave->pe_number, op);
554 if (rc != OPAL_SUCCESS) {
555 pe_warn(slave, "OPAL error %ld %s to parent PELTV\n",
556 rc, desc);
557 return -ENXIO;
558 }
559 }
560
561 return 0;
562}
563
564static int pnv_ioda_set_peltv(struct pnv_phb *phb,
565 struct pnv_ioda_pe *pe,
566 bool is_add)
567{
568 struct pnv_ioda_pe *slave;
569 struct pci_dev *pdev;
570 int ret;
571
572 /*
573 * Clear PE frozen state. If it's master PE, we need
574 * clear slave PE frozen state as well.
575 */
576 if (is_add) {
577 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
578 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
579 if (pe->flags & PNV_IODA_PE_MASTER) {
580 list_for_each_entry(slave, &pe->slaves, list)
581 opal_pci_eeh_freeze_clear(phb->opal_id,
582 slave->pe_number,
583 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
584 }
585 }
586
587 /*
588 * Associate PE in PELT. We need add the PE into the
589 * corresponding PELT-V as well. Otherwise, the error
590 * originated from the PE might contribute to other
591 * PEs.
592 */
593 ret = pnv_ioda_set_one_peltv(phb, pe, pe, is_add);
594 if (ret)
595 return ret;
596
597 /* For compound PEs, any one affects all of them */
598 if (pe->flags & PNV_IODA_PE_MASTER) {
599 list_for_each_entry(slave, &pe->slaves, list) {
600 ret = pnv_ioda_set_one_peltv(phb, slave, pe, is_add);
601 if (ret)
602 return ret;
603 }
604 }
605
606 if (pe->flags & (PNV_IODA_PE_BUS_ALL | PNV_IODA_PE_BUS))
607 pdev = pe->pbus->self;
608 else
609 pdev = pe->pdev->bus->self;
610 while (pdev) {
611 struct pci_dn *pdn = pci_get_pdn(pdev);
612 struct pnv_ioda_pe *parent;
613
614 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
615 parent = &phb->ioda.pe_array[pdn->pe_number];
616 ret = pnv_ioda_set_one_peltv(phb, parent, pe, is_add);
617 if (ret)
618 return ret;
619 }
620
621 pdev = pdev->bus->self;
622 }
623
624 return 0;
625}
626
510static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe) 627static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
511{ 628{
512 struct pci_dev *parent; 629 struct pci_dev *parent;
@@ -561,48 +678,36 @@ static int pnv_ioda_configure_pe(struct pnv_phb *phb, struct pnv_ioda_pe *pe)
561 return -ENXIO; 678 return -ENXIO;
562 } 679 }
563 680
564 rc = opal_pci_set_peltv(phb->opal_id, pe->pe_number, 681 /* Configure PELTV */
565 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN); 682 pnv_ioda_set_peltv(phb, pe, true);
566 if (rc)
567 pe_warn(pe, "OPAL error %d adding self to PELTV\n", rc);
568 opal_pci_eeh_freeze_clear(phb->opal_id, pe->pe_number,
569 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
570 683
571 /* Add to all parents PELT-V */
572 while (parent) {
573 struct pci_dn *pdn = pci_get_pdn(parent);
574 if (pdn && pdn->pe_number != IODA_INVALID_PE) {
575 rc = opal_pci_set_peltv(phb->opal_id, pdn->pe_number,
576 pe->pe_number, OPAL_ADD_PE_TO_DOMAIN);
577 /* XXX What to do in case of error ? */
578 }
579 parent = parent->bus->self;
580 }
581 /* Setup reverse map */ 684 /* Setup reverse map */
582 for (rid = pe->rid; rid < rid_end; rid++) 685 for (rid = pe->rid; rid < rid_end; rid++)
583 phb->ioda.pe_rmap[rid] = pe->pe_number; 686 phb->ioda.pe_rmap[rid] = pe->pe_number;
584 687
585 /* Setup one MVTs on IODA1 */ 688 /* Setup one MVTs on IODA1 */
586 if (phb->type == PNV_PHB_IODA1) { 689 if (phb->type != PNV_PHB_IODA1) {
587 pe->mve_number = pe->pe_number; 690 pe->mve_number = 0;
588 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, 691 goto out;
589 pe->pe_number); 692 }
693
694 pe->mve_number = pe->pe_number;
695 rc = opal_pci_set_mve(phb->opal_id, pe->mve_number, pe->pe_number);
696 if (rc != OPAL_SUCCESS) {
697 pe_err(pe, "OPAL error %ld setting up MVE %d\n",
698 rc, pe->mve_number);
699 pe->mve_number = -1;
700 } else {
701 rc = opal_pci_set_mve_enable(phb->opal_id,
702 pe->mve_number, OPAL_ENABLE_MVE);
590 if (rc) { 703 if (rc) {
591 pe_err(pe, "OPAL error %ld setting up MVE %d\n", 704 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
592 rc, pe->mve_number); 705 rc, pe->mve_number);
593 pe->mve_number = -1; 706 pe->mve_number = -1;
594 } else {
595 rc = opal_pci_set_mve_enable(phb->opal_id,
596 pe->mve_number, OPAL_ENABLE_MVE);
597 if (rc) {
598 pe_err(pe, "OPAL error %ld enabling MVE %d\n",
599 rc, pe->mve_number);
600 pe->mve_number = -1;
601 }
602 } 707 }
603 } else if (phb->type == PNV_PHB_IODA2) 708 }
604 pe->mve_number = 0;
605 709
710out:
606 return 0; 711 return 0;
607} 712}
608 713
@@ -837,8 +942,8 @@ static void pnv_pci_ioda_setup_PEs(void)
837 phb = hose->private_data; 942 phb = hose->private_data;
838 943
839 /* M64 layout might affect PE allocation */ 944 /* M64 layout might affect PE allocation */
840 if (phb->alloc_m64_pe) 945 if (phb->reserve_m64_pe)
841 phb->alloc_m64_pe(phb); 946 phb->reserve_m64_pe(phb);
842 947
843 pnv_ioda_setup_PEs(hose->bus); 948 pnv_ioda_setup_PEs(hose->bus);
844 } 949 }
@@ -1834,19 +1939,14 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
1834 phb_id = be64_to_cpup(prop64); 1939 phb_id = be64_to_cpup(prop64);
1835 pr_debug(" PHB-ID : 0x%016llx\n", phb_id); 1940 pr_debug(" PHB-ID : 0x%016llx\n", phb_id);
1836 1941
1837 phb = alloc_bootmem(sizeof(struct pnv_phb)); 1942 phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
1838 if (!phb) {
1839 pr_err(" Out of memory !\n");
1840 return;
1841 }
1842 1943
1843 /* Allocate PCI controller */ 1944 /* Allocate PCI controller */
1844 memset(phb, 0, sizeof(struct pnv_phb));
1845 phb->hose = hose = pcibios_alloc_controller(np); 1945 phb->hose = hose = pcibios_alloc_controller(np);
1846 if (!phb->hose) { 1946 if (!phb->hose) {
1847 pr_err(" Can't allocate PCI controller for %s\n", 1947 pr_err(" Can't allocate PCI controller for %s\n",
1848 np->full_name); 1948 np->full_name);
1849 free_bootmem((unsigned long)phb, sizeof(struct pnv_phb)); 1949 memblock_free(__pa(phb), sizeof(struct pnv_phb));
1850 return; 1950 return;
1851 } 1951 }
1852 1952
@@ -1913,8 +2013,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
1913 } 2013 }
1914 pemap_off = size; 2014 pemap_off = size;
1915 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe); 2015 size += phb->ioda.total_pe * sizeof(struct pnv_ioda_pe);
1916 aux = alloc_bootmem(size); 2016 aux = memblock_virt_alloc(size, 0);
1917 memset(aux, 0, size);
1918 phb->ioda.pe_alloc = aux; 2017 phb->ioda.pe_alloc = aux;
1919 phb->ioda.m32_segmap = aux + m32map_off; 2018 phb->ioda.m32_segmap = aux + m32map_off;
1920 if (phb->type == PNV_PHB_IODA1) 2019 if (phb->type == PNV_PHB_IODA1)
@@ -1999,8 +2098,8 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
1999 ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE); 2098 ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
2000 } 2099 }
2001 2100
2002 /* Configure M64 window */ 2101 /* Remove M64 resource if we can't configure it successfully */
2003 if (phb->init_m64 && phb->init_m64(phb)) 2102 if (!phb->init_m64 || phb->init_m64(phb))
2004 hose->mem_resources[1].flags = 0; 2103 hose->mem_resources[1].flags = 0;
2005} 2104}
2006 2105
diff --git a/arch/powerpc/platforms/powernv/pci-p5ioc2.c b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
index 94ce3481490b..6ef6d4d8e7e2 100644
--- a/arch/powerpc/platforms/powernv/pci-p5ioc2.c
+++ b/arch/powerpc/platforms/powernv/pci-p5ioc2.c
@@ -122,12 +122,9 @@ static void __init pnv_pci_init_p5ioc2_phb(struct device_node *np, u64 hub_id,
122 return; 122 return;
123 } 123 }
124 124
125 phb = alloc_bootmem(sizeof(struct pnv_phb)); 125 phb = memblock_virt_alloc(sizeof(struct pnv_phb), 0);
126 if (phb) { 126 phb->hose = pcibios_alloc_controller(np);
127 memset(phb, 0, sizeof(struct pnv_phb)); 127 if (!phb->hose) {
128 phb->hose = pcibios_alloc_controller(np);
129 }
130 if (!phb || !phb->hose) {
131 pr_err(" Failed to allocate PCI controller\n"); 128 pr_err(" Failed to allocate PCI controller\n");
132 return; 129 return;
133 } 130 }
@@ -196,16 +193,27 @@ void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
196 hub_id = be64_to_cpup(prop64); 193 hub_id = be64_to_cpup(prop64);
197 pr_info(" HUB-ID : 0x%016llx\n", hub_id); 194 pr_info(" HUB-ID : 0x%016llx\n", hub_id);
198 195
196 /* Count child PHBs and calculate TCE space per PHB */
197 for_each_child_of_node(np, phbn) {
198 if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
199 of_device_is_compatible(phbn, "ibm,p5ioc2-pciex"))
200 phb_count++;
201 }
202
203 if (phb_count <= 0) {
204 pr_info(" No PHBs for Hub %s\n", np->full_name);
205 return;
206 }
207
208 tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count);
209 pr_info(" Allocating %lld MB of TCE memory per PHB\n",
210 tce_per_phb >> 20);
211
199 /* Currently allocate 16M of TCE memory for every Hub 212 /* Currently allocate 16M of TCE memory for every Hub
200 * 213 *
201 * XXX TODO: Make it chip local if possible 214 * XXX TODO: Make it chip local if possible
202 */ 215 */
203 tce_mem = __alloc_bootmem(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY, 216 tce_mem = memblock_virt_alloc(P5IOC2_TCE_MEMORY, P5IOC2_TCE_MEMORY);
204 __pa(MAX_DMA_ADDRESS));
205 if (!tce_mem) {
206 pr_err(" Failed to allocate TCE Memory !\n");
207 return;
208 }
209 pr_debug(" TCE : 0x%016lx..0x%016lx\n", 217 pr_debug(" TCE : 0x%016lx..0x%016lx\n",
210 __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1); 218 __pa(tce_mem), __pa(tce_mem) + P5IOC2_TCE_MEMORY - 1);
211 rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem), 219 rc = opal_pci_set_hub_tce_memory(hub_id, __pa(tce_mem),
@@ -215,18 +223,6 @@ void __init pnv_pci_init_p5ioc2_hub(struct device_node *np)
215 return; 223 return;
216 } 224 }
217 225
218 /* Count child PHBs */
219 for_each_child_of_node(np, phbn) {
220 if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
221 of_device_is_compatible(phbn, "ibm,p5ioc2-pciex"))
222 phb_count++;
223 }
224
225 /* Calculate how much TCE space we can give per PHB */
226 tce_per_phb = __rounddown_pow_of_two(P5IOC2_TCE_MEMORY / phb_count);
227 pr_info(" Allocating %lld MB of TCE memory per PHB\n",
228 tce_per_phb >> 20);
229
230 /* Initialize PHBs */ 226 /* Initialize PHBs */
231 for_each_child_of_node(np, phbn) { 227 for_each_child_of_node(np, phbn) {
232 if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") || 228 if (of_device_is_compatible(phbn, "ibm,p5ioc2-pcix") ||
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 540fc6dd56b3..4945e87f12dc 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -16,7 +16,6 @@
16#include <linux/delay.h> 16#include <linux/delay.h>
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/init.h> 18#include <linux/init.h>
19#include <linux/bootmem.h>
20#include <linux/irq.h> 19#include <linux/irq.h>
21#include <linux/io.h> 20#include <linux/io.h>
22#include <linux/msi.h> 21#include <linux/msi.h>
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 34d29eb2a4de..6c02ff8dd69f 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -130,7 +130,7 @@ struct pnv_phb {
130 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn); 130 u32 (*bdfn_to_pe)(struct pnv_phb *phb, struct pci_bus *bus, u32 devfn);
131 void (*shutdown)(struct pnv_phb *phb); 131 void (*shutdown)(struct pnv_phb *phb);
132 int (*init_m64)(struct pnv_phb *phb); 132 int (*init_m64)(struct pnv_phb *phb);
133 void (*alloc_m64_pe)(struct pnv_phb *phb); 133 void (*reserve_m64_pe)(struct pnv_phb *phb);
134 int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all); 134 int (*pick_m64_pe)(struct pnv_phb *phb, struct pci_bus *bus, int all);
135 int (*get_pe_state)(struct pnv_phb *phb, int pe_no); 135 int (*get_pe_state)(struct pnv_phb *phb, int pe_no);
136 void (*freeze_pe)(struct pnv_phb *phb, int pe_no); 136 void (*freeze_pe)(struct pnv_phb *phb, int pe_no);
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 3f9546d8a51f..30b1c3e298a6 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -265,10 +265,8 @@ static unsigned long pnv_memory_block_size(void)
265static void __init pnv_setup_machdep_opal(void) 265static void __init pnv_setup_machdep_opal(void)
266{ 266{
267 ppc_md.get_boot_time = opal_get_boot_time; 267 ppc_md.get_boot_time = opal_get_boot_time;
268 ppc_md.get_rtc_time = opal_get_rtc_time;
269 ppc_md.set_rtc_time = opal_set_rtc_time;
270 ppc_md.restart = pnv_restart; 268 ppc_md.restart = pnv_restart;
271 ppc_md.power_off = pnv_power_off; 269 pm_power_off = pnv_power_off;
272 ppc_md.halt = pnv_halt; 270 ppc_md.halt = pnv_halt;
273 ppc_md.machine_check_exception = opal_machine_check; 271 ppc_md.machine_check_exception = opal_machine_check;
274 ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery; 272 ppc_md.mce_check_early_recovery = opal_mce_check_early_recovery;
@@ -285,7 +283,7 @@ static void __init pnv_setup_machdep_rtas(void)
285 ppc_md.set_rtc_time = rtas_set_rtc_time; 283 ppc_md.set_rtc_time = rtas_set_rtc_time;
286 } 284 }
287 ppc_md.restart = rtas_restart; 285 ppc_md.restart = rtas_restart;
288 ppc_md.power_off = rtas_power_off; 286 pm_power_off = rtas_power_off;
289 ppc_md.halt = rtas_halt; 287 ppc_md.halt = rtas_halt;
290} 288}
291#endif /* CONFIG_PPC_POWERNV_RTAS */ 289#endif /* CONFIG_PPC_POWERNV_RTAS */
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 4753958cd509..b716f666e48a 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -149,6 +149,7 @@ static int pnv_smp_cpu_disable(void)
149static void pnv_smp_cpu_kill_self(void) 149static void pnv_smp_cpu_kill_self(void)
150{ 150{
151 unsigned int cpu; 151 unsigned int cpu;
152 unsigned long srr1;
152 153
153 /* Standard hot unplug procedure */ 154 /* Standard hot unplug procedure */
154 local_irq_disable(); 155 local_irq_disable();
@@ -165,13 +166,25 @@ static void pnv_smp_cpu_kill_self(void)
165 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1); 166 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~(u64)LPCR_PECE1);
166 while (!generic_check_cpu_restart(cpu)) { 167 while (!generic_check_cpu_restart(cpu)) {
167 ppc64_runlatch_off(); 168 ppc64_runlatch_off();
168 power7_nap(1); 169 srr1 = power7_nap(1);
169 ppc64_runlatch_on(); 170 ppc64_runlatch_on();
170 171
171 /* Clear the IPI that woke us up */ 172 /*
172 icp_native_flush_interrupt(); 173 * If the SRR1 value indicates that we woke up due to
173 local_paca->irq_happened &= PACA_IRQ_HARD_DIS; 174 * an external interrupt, then clear the interrupt.
174 mb(); 175 * We clear the interrupt before checking for the
176 * reason, so as to avoid a race where we wake up for
177 * some other reason, find nothing and clear the interrupt
178 * just as some other cpu is sending us an interrupt.
179 * If we returned from power7_nap as a result of
180 * having finished executing in a KVM guest, then srr1
181 * contains 0.
182 */
183 if ((srr1 & SRR1_WAKEMASK) == SRR1_WAKEEE) {
184 icp_native_flush_interrupt();
185 local_paca->irq_happened &= PACA_IRQ_HARD_DIS;
186 smp_mb();
187 }
175 188
176 if (cpu_core_split_required()) 189 if (cpu_core_split_required())
177 continue; 190 continue;
diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c
index 3e270e3412ae..2f95d33cf34a 100644
--- a/arch/powerpc/platforms/ps3/htab.c
+++ b/arch/powerpc/platforms/ps3/htab.c
@@ -110,7 +110,7 @@ static long ps3_hpte_remove(unsigned long hpte_group)
110 110
111static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp, 111static long ps3_hpte_updatepp(unsigned long slot, unsigned long newpp,
112 unsigned long vpn, int psize, int apsize, 112 unsigned long vpn, int psize, int apsize,
113 int ssize, int local) 113 int ssize, unsigned long inv_flags)
114{ 114{
115 int result; 115 int result;
116 u64 hpte_v, want_v, hpte_rs; 116 u64 hpte_v, want_v, hpte_rs;
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c
index 5f3b23220b8e..a6c42f34303a 100644
--- a/arch/powerpc/platforms/ps3/interrupt.c
+++ b/arch/powerpc/platforms/ps3/interrupt.c
@@ -711,7 +711,7 @@ void __init ps3_register_ipi_irq(unsigned int cpu, unsigned int virq)
711 711
712static unsigned int ps3_get_irq(void) 712static unsigned int ps3_get_irq(void)
713{ 713{
714 struct ps3_private *pd = &__get_cpu_var(ps3_private); 714 struct ps3_private *pd = this_cpu_ptr(&ps3_private);
715 u64 x = (pd->bmp.status & pd->bmp.mask); 715 u64 x = (pd->bmp.status & pd->bmp.mask);
716 unsigned int plug; 716 unsigned int plug;
717 717
diff --git a/arch/powerpc/platforms/ps3/setup.c b/arch/powerpc/platforms/ps3/setup.c
index 3f509f86432c..799c8580ab09 100644
--- a/arch/powerpc/platforms/ps3/setup.c
+++ b/arch/powerpc/platforms/ps3/setup.c
@@ -125,12 +125,7 @@ static void __init prealloc(struct ps3_prealloc *p)
125 if (!p->size) 125 if (!p->size)
126 return; 126 return;
127 127
128 p->address = __alloc_bootmem(p->size, p->align, __pa(MAX_DMA_ADDRESS)); 128 p->address = memblock_virt_alloc(p->size, p->align);
129 if (!p->address) {
130 printk(KERN_ERR "%s: Cannot allocate %s\n", __func__,
131 p->name);
132 return;
133 }
134 129
135 printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size, 130 printk(KERN_INFO "%s: %lu bytes at %p\n", p->name, p->size,
136 p->address); 131 p->address);
@@ -248,6 +243,7 @@ static int __init ps3_probe(void)
248 ps3_mm_init(); 243 ps3_mm_init();
249 ps3_mm_vas_create(&htab_size); 244 ps3_mm_vas_create(&htab_size);
250 ps3_hpte_init(htab_size); 245 ps3_hpte_init(htab_size);
246 pm_power_off = ps3_power_off;
251 247
252 DBG(" <- %s:%d\n", __func__, __LINE__); 248 DBG(" <- %s:%d\n", __func__, __LINE__);
253 return 1; 249 return 1;
@@ -278,7 +274,6 @@ define_machine(ps3) {
278 .calibrate_decr = ps3_calibrate_decr, 274 .calibrate_decr = ps3_calibrate_decr,
279 .progress = ps3_progress, 275 .progress = ps3_progress,
280 .restart = ps3_restart, 276 .restart = ps3_restart,
281 .power_off = ps3_power_off,
282 .halt = ps3_halt, 277 .halt = ps3_halt,
283#if defined(CONFIG_KEXEC) 278#if defined(CONFIG_KEXEC)
284 .kexec_cpu_down = ps3_kexec_cpu_down, 279 .kexec_cpu_down = ps3_kexec_cpu_down,
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c
index 1062f71f5a85..39049e4884fb 100644
--- a/arch/powerpc/platforms/pseries/dtl.c
+++ b/arch/powerpc/platforms/pseries/dtl.c
@@ -75,7 +75,7 @@ static atomic_t dtl_count;
75 */ 75 */
76static void consume_dtle(struct dtl_entry *dtle, u64 index) 76static void consume_dtle(struct dtl_entry *dtle, u64 index)
77{ 77{
78 struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); 78 struct dtl_ring *dtlr = this_cpu_ptr(&dtl_rings);
79 struct dtl_entry *wp = dtlr->write_ptr; 79 struct dtl_entry *wp = dtlr->write_ptr;
80 struct lppaca *vpa = local_paca->lppaca_ptr; 80 struct lppaca *vpa = local_paca->lppaca_ptr;
81 81
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c
index 1bbb78fab530..fa41f0da5b6f 100644
--- a/arch/powerpc/platforms/pseries/hotplug-memory.c
+++ b/arch/powerpc/platforms/pseries/hotplug-memory.c
@@ -12,7 +12,6 @@
12#include <linux/of.h> 12#include <linux/of.h>
13#include <linux/of_address.h> 13#include <linux/of_address.h>
14#include <linux/memblock.h> 14#include <linux/memblock.h>
15#include <linux/vmalloc.h>
16#include <linux/memory.h> 15#include <linux/memory.h>
17#include <linux/memory_hotplug.h> 16#include <linux/memory_hotplug.h>
18 17
@@ -66,22 +65,6 @@ unsigned long pseries_memory_block_size(void)
66} 65}
67 66
68#ifdef CONFIG_MEMORY_HOTREMOVE 67#ifdef CONFIG_MEMORY_HOTREMOVE
69static int pseries_remove_memory(u64 start, u64 size)
70{
71 int ret;
72
73 /* Remove htab bolted mappings for this section of memory */
74 start = (unsigned long)__va(start);
75 ret = remove_section_mapping(start, start + size);
76
77 /* Ensure all vmalloc mappings are flushed in case they also
78 * hit that section of memory
79 */
80 vm_unmap_aliases();
81
82 return ret;
83}
84
85static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) 68static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size)
86{ 69{
87 unsigned long block_sz, start_pfn; 70 unsigned long block_sz, start_pfn;
@@ -261,10 +244,6 @@ static int __init pseries_memory_hotplug_init(void)
261 if (firmware_has_feature(FW_FEATURE_LPAR)) 244 if (firmware_has_feature(FW_FEATURE_LPAR))
262 of_reconfig_notifier_register(&pseries_mem_nb); 245 of_reconfig_notifier_register(&pseries_mem_nb);
263 246
264#ifdef CONFIG_MEMORY_HOTREMOVE
265 ppc_md.remove_memory = pseries_remove_memory;
266#endif
267
268 return 0; 247 return 0;
269} 248}
270machine_device_initcall(pseries, pseries_memory_hotplug_init); 249machine_device_initcall(pseries, pseries_memory_hotplug_init);
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S
index 3fda3f17b84e..ccd53f91e8aa 100644
--- a/arch/powerpc/platforms/pseries/hvCall.S
+++ b/arch/powerpc/platforms/pseries/hvCall.S
@@ -18,7 +18,7 @@
18 18
19#ifdef CONFIG_TRACEPOINTS 19#ifdef CONFIG_TRACEPOINTS
20 20
21#ifndef CONFIG_JUMP_LABEL 21#ifndef HAVE_JUMP_LABEL
22 .section ".toc","aw" 22 .section ".toc","aw"
23 23
24 .globl hcall_tracepoint_refcount 24 .globl hcall_tracepoint_refcount
@@ -78,7 +78,7 @@ hcall_tracepoint_refcount:
78 mr r5,BUFREG; \ 78 mr r5,BUFREG; \
79 __HCALL_INST_POSTCALL 79 __HCALL_INST_POSTCALL
80 80
81#ifdef CONFIG_JUMP_LABEL 81#ifdef HAVE_JUMP_LABEL
82#define HCALL_BRANCH(LABEL) \ 82#define HCALL_BRANCH(LABEL) \
83 ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key) 83 ARCH_STATIC_BRANCH(LABEL, hcall_tracepoint_key)
84#else 84#else
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c
index 4575f0c9e521..f02ec3ab428c 100644
--- a/arch/powerpc/platforms/pseries/hvCall_inst.c
+++ b/arch/powerpc/platforms/pseries/hvCall_inst.c
@@ -110,7 +110,7 @@ static void probe_hcall_entry(void *ignored, unsigned long opcode, unsigned long
110 if (opcode > MAX_HCALL_OPCODE) 110 if (opcode > MAX_HCALL_OPCODE)
111 return; 111 return;
112 112
113 h = &__get_cpu_var(hcall_stats)[opcode / 4]; 113 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
114 h->tb_start = mftb(); 114 h->tb_start = mftb();
115 h->purr_start = mfspr(SPRN_PURR); 115 h->purr_start = mfspr(SPRN_PURR);
116} 116}
@@ -123,7 +123,7 @@ static void probe_hcall_exit(void *ignored, unsigned long opcode, unsigned long
123 if (opcode > MAX_HCALL_OPCODE) 123 if (opcode > MAX_HCALL_OPCODE)
124 return; 124 return;
125 125
126 h = &__get_cpu_var(hcall_stats)[opcode / 4]; 126 h = this_cpu_ptr(&hcall_stats[opcode / 4]);
127 h->num_calls++; 127 h->num_calls++;
128 h->tb_total += mftb() - h->tb_start; 128 h->tb_total += mftb() - h->tb_start;
129 h->purr_total += mfspr(SPRN_PURR) - h->purr_start; 129 h->purr_total += mfspr(SPRN_PURR) - h->purr_start;
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index 3e5bfdafee63..1d3d52dc3ff3 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -199,7 +199,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
199 199
200 local_irq_save(flags); /* to protect tcep and the page behind it */ 200 local_irq_save(flags); /* to protect tcep and the page behind it */
201 201
202 tcep = __get_cpu_var(tce_page); 202 tcep = __this_cpu_read(tce_page);
203 203
204 /* This is safe to do since interrupts are off when we're called 204 /* This is safe to do since interrupts are off when we're called
205 * from iommu_alloc{,_sg}() 205 * from iommu_alloc{,_sg}()
@@ -212,7 +212,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
212 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr, 212 return tce_build_pSeriesLP(tbl, tcenum, npages, uaddr,
213 direction, attrs); 213 direction, attrs);
214 } 214 }
215 __get_cpu_var(tce_page) = tcep; 215 __this_cpu_write(tce_page, tcep);
216 } 216 }
217 217
218 rpn = __pa(uaddr) >> TCE_SHIFT; 218 rpn = __pa(uaddr) >> TCE_SHIFT;
@@ -398,7 +398,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
398 long l, limit; 398 long l, limit;
399 399
400 local_irq_disable(); /* to protect tcep and the page behind it */ 400 local_irq_disable(); /* to protect tcep and the page behind it */
401 tcep = __get_cpu_var(tce_page); 401 tcep = __this_cpu_read(tce_page);
402 402
403 if (!tcep) { 403 if (!tcep) {
404 tcep = (__be64 *)__get_free_page(GFP_ATOMIC); 404 tcep = (__be64 *)__get_free_page(GFP_ATOMIC);
@@ -406,7 +406,7 @@ static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn,
406 local_irq_enable(); 406 local_irq_enable();
407 return -ENOMEM; 407 return -ENOMEM;
408 } 408 }
409 __get_cpu_var(tce_page) = tcep; 409 __this_cpu_write(tce_page, tcep);
410 } 410 }
411 411
412 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; 412 proto_tce = TCE_PCI_READ | TCE_PCI_WRITE;
@@ -574,8 +574,7 @@ static void pci_dma_bus_setup_pSeries(struct pci_bus *bus)
574 while (isa_dn && isa_dn != dn) 574 while (isa_dn && isa_dn != dn)
575 isa_dn = isa_dn->parent; 575 isa_dn = isa_dn->parent;
576 576
577 if (isa_dn_orig) 577 of_node_put(isa_dn_orig);
578 of_node_put(isa_dn_orig);
579 578
580 /* Count number of direct PCI children of the PHB. */ 579 /* Count number of direct PCI children of the PHB. */
581 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling) 580 for (children = 0, tmp = dn->child; tmp; tmp = tmp->sibling)
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index f6880d2a40fb..469751d92004 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -284,7 +284,7 @@ static long pSeries_lpar_hpte_updatepp(unsigned long slot,
284 unsigned long newpp, 284 unsigned long newpp,
285 unsigned long vpn, 285 unsigned long vpn,
286 int psize, int apsize, 286 int psize, int apsize,
287 int ssize, int local) 287 int ssize, unsigned long inv_flags)
288{ 288{
289 unsigned long lpar_rc; 289 unsigned long lpar_rc;
290 unsigned long flags = (newpp & 7) | H_AVPN; 290 unsigned long flags = (newpp & 7) | H_AVPN;
@@ -442,7 +442,7 @@ static void __pSeries_lpar_hugepage_invalidate(unsigned long *slot,
442static void pSeries_lpar_hugepage_invalidate(unsigned long vsid, 442static void pSeries_lpar_hugepage_invalidate(unsigned long vsid,
443 unsigned long addr, 443 unsigned long addr,
444 unsigned char *hpte_slot_array, 444 unsigned char *hpte_slot_array,
445 int psize, int ssize) 445 int psize, int ssize, int local)
446{ 446{
447 int i, index = 0; 447 int i, index = 0;
448 unsigned long s_addr = addr; 448 unsigned long s_addr = addr;
@@ -515,7 +515,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local)
515 unsigned long vpn; 515 unsigned long vpn;
516 unsigned long i, pix, rc; 516 unsigned long i, pix, rc;
517 unsigned long flags = 0; 517 unsigned long flags = 0;
518 struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); 518 struct ppc64_tlb_batch *batch = this_cpu_ptr(&ppc64_tlb_batch);
519 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); 519 int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE);
520 unsigned long param[9]; 520 unsigned long param[9];
521 unsigned long hash, index, shift, hidx, slot; 521 unsigned long hash, index, shift, hidx, slot;
@@ -705,7 +705,7 @@ void __trace_hcall_entry(unsigned long opcode, unsigned long *args)
705 705
706 local_irq_save(flags); 706 local_irq_save(flags);
707 707
708 depth = &__get_cpu_var(hcall_trace_depth); 708 depth = this_cpu_ptr(&hcall_trace_depth);
709 709
710 if (*depth) 710 if (*depth)
711 goto out; 711 goto out;
@@ -730,7 +730,7 @@ void __trace_hcall_exit(long opcode, unsigned long retval,
730 730
731 local_irq_save(flags); 731 local_irq_save(flags);
732 732
733 depth = &__get_cpu_var(hcall_trace_depth); 733 depth = this_cpu_ptr(&hcall_trace_depth);
734 734
735 if (*depth) 735 if (*depth)
736 goto out; 736 goto out;
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 11a3b617ef5d..054a0ed5c7ee 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -715,6 +715,8 @@ static int nvram_pstore_init(void)
715 nvram_pstore_info.buf = oops_data; 715 nvram_pstore_info.buf = oops_data;
716 nvram_pstore_info.bufsize = oops_data_sz; 716 nvram_pstore_info.bufsize = oops_data_sz;
717 717
718 spin_lock_init(&nvram_pstore_info.buf_lock);
719
718 rc = pstore_register(&nvram_pstore_info); 720 rc = pstore_register(&nvram_pstore_info);
719 if (rc != 0) 721 if (rc != 0)
720 pr_err("nvram: pstore_register() failed, defaults to " 722 pr_err("nvram: pstore_register() failed, defaults to "
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index 67e48594040c..fe16a50700de 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -134,7 +134,7 @@ int pseries_root_bridge_prepare(struct pci_host_bridge *bridge)
134 of_node_put(pdn); 134 of_node_put(pdn);
135 135
136 if (rc) { 136 if (rc) {
137 pr_err("no ibm,pcie-link-speed-stats property\n"); 137 pr_debug("no ibm,pcie-link-speed-stats property\n");
138 return 0; 138 return 0;
139 } 139 }
140 140
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c
index 5a4d0fc03b03..c3b2a7e81ddb 100644
--- a/arch/powerpc/platforms/pseries/ras.c
+++ b/arch/powerpc/platforms/pseries/ras.c
@@ -302,8 +302,8 @@ static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs)
302 /* If it isn't an extended log we can use the per cpu 64bit buffer */ 302 /* If it isn't an extended log we can use the per cpu 64bit buffer */
303 h = (struct rtas_error_log *)&savep[1]; 303 h = (struct rtas_error_log *)&savep[1];
304 if (!rtas_error_extended(h)) { 304 if (!rtas_error_extended(h)) {
305 memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); 305 memcpy(this_cpu_ptr(&mce_data_buf), h, sizeof(__u64));
306 errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); 306 errhdr = (struct rtas_error_log *)this_cpu_ptr(&mce_data_buf);
307 } else { 307 } else {
308 int len, error_log_length; 308 int len, error_log_length;
309 309
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index ed8a90022a3d..e445b6701f50 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -500,7 +500,11 @@ static void __init pSeries_setup_arch(void)
500 500
501 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 501 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
502 long rc; 502 long rc;
503 if ((rc = pSeries_enable_reloc_on_exc()) != H_SUCCESS) { 503
504 rc = pSeries_enable_reloc_on_exc();
505 if (rc == H_P2) {
506 pr_info("Relocation on exceptions not supported\n");
507 } else if (rc != H_SUCCESS) {
504 pr_warn("Unable to enable relocation on exceptions: " 508 pr_warn("Unable to enable relocation on exceptions: "
505 "%ld\n", rc); 509 "%ld\n", rc);
506 } 510 }
@@ -660,6 +664,34 @@ static void __init pSeries_init_early(void)
660 pr_debug(" <- pSeries_init_early()\n"); 664 pr_debug(" <- pSeries_init_early()\n");
661} 665}
662 666
667/**
668 * pseries_power_off - tell firmware about how to power off the system.
669 *
670 * This function calls either the power-off rtas token in normal cases
671 * or the ibm,power-off-ups token (if present & requested) in case of
672 * a power failure. If power-off token is used, power on will only be
673 * possible with power button press. If ibm,power-off-ups token is used
674 * it will allow auto poweron after power is restored.
675 */
676static void pseries_power_off(void)
677{
678 int rc;
679 int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");
680
681 if (rtas_flash_term_hook)
682 rtas_flash_term_hook(SYS_POWER_OFF);
683
684 if (rtas_poweron_auto == 0 ||
685 rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
686 rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
687 printk(KERN_INFO "RTAS power-off returned %d\n", rc);
688 } else {
689 rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
690 printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
691 }
692 for (;;);
693}
694
663/* 695/*
664 * Called very early, MMU is off, device-tree isn't unflattened 696 * Called very early, MMU is off, device-tree isn't unflattened
665 */ 697 */
@@ -742,6 +774,8 @@ static int __init pSeries_probe(void)
742 else 774 else
743 hpte_init_native(); 775 hpte_init_native();
744 776
777 pm_power_off = pseries_power_off;
778
745 pr_debug("Machine is%s LPAR !\n", 779 pr_debug("Machine is%s LPAR !\n",
746 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not"); 780 (powerpc_firmware_features & FW_FEATURE_LPAR) ? "" : " not");
747 781
@@ -755,34 +789,6 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
755 return PCI_PROBE_NORMAL; 789 return PCI_PROBE_NORMAL;
756} 790}
757 791
758/**
759 * pSeries_power_off - tell firmware about how to power off the system.
760 *
761 * This function calls either the power-off rtas token in normal cases
762 * or the ibm,power-off-ups token (if present & requested) in case of
763 * a power failure. If power-off token is used, power on will only be
764 * possible with power button press. If ibm,power-off-ups token is used
765 * it will allow auto poweron after power is restored.
766 */
767static void pSeries_power_off(void)
768{
769 int rc;
770 int rtas_poweroff_ups_token = rtas_token("ibm,power-off-ups");
771
772 if (rtas_flash_term_hook)
773 rtas_flash_term_hook(SYS_POWER_OFF);
774
775 if (rtas_poweron_auto == 0 ||
776 rtas_poweroff_ups_token == RTAS_UNKNOWN_SERVICE) {
777 rc = rtas_call(rtas_token("power-off"), 2, 1, NULL, -1, -1);
778 printk(KERN_INFO "RTAS power-off returned %d\n", rc);
779 } else {
780 rc = rtas_call(rtas_poweroff_ups_token, 0, 1, NULL);
781 printk(KERN_INFO "RTAS ibm,power-off-ups returned %d\n", rc);
782 }
783 for (;;);
784}
785
786#ifndef CONFIG_PCI 792#ifndef CONFIG_PCI
787void pSeries_final_fixup(void) { } 793void pSeries_final_fixup(void) { }
788#endif 794#endif
@@ -797,7 +803,6 @@ define_machine(pseries) {
797 .pcibios_fixup = pSeries_final_fixup, 803 .pcibios_fixup = pSeries_final_fixup,
798 .pci_probe_mode = pSeries_pci_probe_mode, 804 .pci_probe_mode = pSeries_pci_probe_mode,
799 .restart = rtas_restart, 805 .restart = rtas_restart,
800 .power_off = pSeries_power_off,
801 .halt = rtas_halt, 806 .halt = rtas_halt,
802 .panic = rtas_os_term, 807 .panic = rtas_os_term,
803 .get_boot_time = rtas_get_boot_time, 808 .get_boot_time = rtas_get_boot_time,
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index 7aed8d0876b7..d09f4fa2c3d1 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -13,7 +13,6 @@
13 * 13 *
14 */ 14 */
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <linux/bootmem.h>
17#include <linux/msi.h> 16#include <linux/msi.h>
18#include <linux/pci.h> 17#include <linux/pci.h>
19#include <linux/slab.h> 18#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c
index 65d2ed4549e6..6455c1eada1a 100644
--- a/arch/powerpc/sysdev/fsl_pci.c
+++ b/arch/powerpc/sysdev/fsl_pci.c
@@ -23,7 +23,6 @@
23#include <linux/string.h> 23#include <linux/string.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/bootmem.h>
27#include <linux/memblock.h> 26#include <linux/memblock.h>
28#include <linux/log2.h> 27#include <linux/log2.h>
29#include <linux/slab.h> 28#include <linux/slab.h>
@@ -152,7 +151,7 @@ static int setup_one_atmu(struct ccsr_pci __iomem *pci,
152 flags |= 0x10000000; /* enable relaxed ordering */ 151 flags |= 0x10000000; /* enable relaxed ordering */
153 152
154 for (i = 0; size > 0; i++) { 153 for (i = 0; size > 0; i++) {
155 unsigned int bits = min(ilog2(size), 154 unsigned int bits = min_t(u32, ilog2(size),
156 __ffs(pci_addr | phys_addr)); 155 __ffs(pci_addr | phys_addr));
157 156
158 if (index + i >= 5) 157 if (index + i >= 5)
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c
index c04b718307c8..08d60f183dad 100644
--- a/arch/powerpc/sysdev/fsl_rio.c
+++ b/arch/powerpc/sysdev/fsl_rio.c
@@ -58,6 +58,19 @@
58#define RIO_ISR_AACR 0x10120 58#define RIO_ISR_AACR 0x10120
59#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */ 59#define RIO_ISR_AACR_AA 0x1 /* Accept All ID */
60 60
61#define RIWTAR_TRAD_VAL_SHIFT 12
62#define RIWTAR_TRAD_MASK 0x00FFFFFF
63#define RIWBAR_BADD_VAL_SHIFT 12
64#define RIWBAR_BADD_MASK 0x003FFFFF
65#define RIWAR_ENABLE 0x80000000
66#define RIWAR_TGINT_LOCAL 0x00F00000
67#define RIWAR_RDTYP_NO_SNOOP 0x00040000
68#define RIWAR_RDTYP_SNOOP 0x00050000
69#define RIWAR_WRTYP_NO_SNOOP 0x00004000
70#define RIWAR_WRTYP_SNOOP 0x00005000
71#define RIWAR_WRTYP_ALLOC 0x00006000
72#define RIWAR_SIZE_MASK 0x0000003F
73
61#define __fsl_read_rio_config(x, addr, err, op) \ 74#define __fsl_read_rio_config(x, addr, err, op) \
62 __asm__ __volatile__( \ 75 __asm__ __volatile__( \
63 "1: "op" %1,0(%2)\n" \ 76 "1: "op" %1,0(%2)\n" \
@@ -266,6 +279,89 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid,
266 return 0; 279 return 0;
267} 280}
268 281
282static void fsl_rio_inbound_mem_init(struct rio_priv *priv)
283{
284 int i;
285
286 /* close inbound windows */
287 for (i = 0; i < RIO_INB_ATMU_COUNT; i++)
288 out_be32(&priv->inb_atmu_regs[i].riwar, 0);
289}
290
291int fsl_map_inb_mem(struct rio_mport *mport, dma_addr_t lstart,
292 u64 rstart, u32 size, u32 flags)
293{
294 struct rio_priv *priv = mport->priv;
295 u32 base_size;
296 unsigned int base_size_log;
297 u64 win_start, win_end;
298 u32 riwar;
299 int i;
300
301 if ((size & (size - 1)) != 0)
302 return -EINVAL;
303
304 base_size_log = ilog2(size);
305 base_size = 1 << base_size_log;
306
307 /* check if addresses are aligned with the window size */
308 if (lstart & (base_size - 1))
309 return -EINVAL;
310 if (rstart & (base_size - 1))
311 return -EINVAL;
312
313 /* check for conflicting ranges */
314 for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
315 riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
316 if ((riwar & RIWAR_ENABLE) == 0)
317 continue;
318 win_start = ((u64)(in_be32(&priv->inb_atmu_regs[i].riwbar) & RIWBAR_BADD_MASK))
319 << RIWBAR_BADD_VAL_SHIFT;
320 win_end = win_start + ((1 << ((riwar & RIWAR_SIZE_MASK) + 1)) - 1);
321 if (rstart < win_end && (rstart + size) > win_start)
322 return -EINVAL;
323 }
324
325 /* find unused atmu */
326 for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
327 riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
328 if ((riwar & RIWAR_ENABLE) == 0)
329 break;
330 }
331 if (i >= RIO_INB_ATMU_COUNT)
332 return -ENOMEM;
333
334 out_be32(&priv->inb_atmu_regs[i].riwtar, lstart >> RIWTAR_TRAD_VAL_SHIFT);
335 out_be32(&priv->inb_atmu_regs[i].riwbar, rstart >> RIWBAR_BADD_VAL_SHIFT);
336 out_be32(&priv->inb_atmu_regs[i].riwar, RIWAR_ENABLE | RIWAR_TGINT_LOCAL |
337 RIWAR_RDTYP_SNOOP | RIWAR_WRTYP_SNOOP | (base_size_log - 1));
338
339 return 0;
340}
341
342void fsl_unmap_inb_mem(struct rio_mport *mport, dma_addr_t lstart)
343{
344 u32 win_start_shift, base_start_shift;
345 struct rio_priv *priv = mport->priv;
346 u32 riwar, riwtar;
347 int i;
348
349 /* skip default window */
350 base_start_shift = lstart >> RIWTAR_TRAD_VAL_SHIFT;
351 for (i = 0; i < RIO_INB_ATMU_COUNT; i++) {
352 riwar = in_be32(&priv->inb_atmu_regs[i].riwar);
353 if ((riwar & RIWAR_ENABLE) == 0)
354 continue;
355
356 riwtar = in_be32(&priv->inb_atmu_regs[i].riwtar);
357 win_start_shift = riwtar & RIWTAR_TRAD_MASK;
358 if (win_start_shift == base_start_shift) {
359 out_be32(&priv->inb_atmu_regs[i].riwar, riwar & ~RIWAR_ENABLE);
360 return;
361 }
362 }
363}
364
269void fsl_rio_port_error_handler(int offset) 365void fsl_rio_port_error_handler(int offset)
270{ 366{
271 /*XXX: Error recovery is not implemented, we just clear errors */ 367 /*XXX: Error recovery is not implemented, we just clear errors */
@@ -389,6 +485,8 @@ int fsl_rio_setup(struct platform_device *dev)
389 ops->add_outb_message = fsl_add_outb_message; 485 ops->add_outb_message = fsl_add_outb_message;
390 ops->add_inb_buffer = fsl_add_inb_buffer; 486 ops->add_inb_buffer = fsl_add_inb_buffer;
391 ops->get_inb_message = fsl_get_inb_message; 487 ops->get_inb_message = fsl_get_inb_message;
488 ops->map_inb = fsl_map_inb_mem;
489 ops->unmap_inb = fsl_unmap_inb_mem;
392 490
393 rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0); 491 rmu_node = of_parse_phandle(dev->dev.of_node, "fsl,srio-rmu-handle", 0);
394 if (!rmu_node) { 492 if (!rmu_node) {
@@ -602,6 +700,11 @@ int fsl_rio_setup(struct platform_device *dev)
602 RIO_ATMU_REGS_PORT2_OFFSET)); 700 RIO_ATMU_REGS_PORT2_OFFSET));
603 701
604 priv->maint_atmu_regs = priv->atmu_regs + 1; 702 priv->maint_atmu_regs = priv->atmu_regs + 1;
703 priv->inb_atmu_regs = (struct rio_inb_atmu_regs __iomem *)
704 (priv->regs_win +
705 ((i == 0) ? RIO_INB_ATMU_REGS_PORT1_OFFSET :
706 RIO_INB_ATMU_REGS_PORT2_OFFSET));
707
605 708
606 /* Set to receive any dist ID for serial RapidIO controller. */ 709 /* Set to receive any dist ID for serial RapidIO controller. */
607 if (port->phy_type == RIO_PHY_SERIAL) 710 if (port->phy_type == RIO_PHY_SERIAL)
@@ -620,6 +723,7 @@ int fsl_rio_setup(struct platform_device *dev)
620 rio_law_start = range_start; 723 rio_law_start = range_start;
621 724
622 fsl_rio_setup_rmu(port, rmu_np[i]); 725 fsl_rio_setup_rmu(port, rmu_np[i]);
726 fsl_rio_inbound_mem_init(priv);
623 727
624 dbell->mport[i] = port; 728 dbell->mport[i] = port;
625 729
diff --git a/arch/powerpc/sysdev/fsl_rio.h b/arch/powerpc/sysdev/fsl_rio.h
index ae8e27405a0d..d53407a34f32 100644
--- a/arch/powerpc/sysdev/fsl_rio.h
+++ b/arch/powerpc/sysdev/fsl_rio.h
@@ -50,9 +50,12 @@
50#define RIO_S_DBELL_REGS_OFFSET 0x13400 50#define RIO_S_DBELL_REGS_OFFSET 0x13400
51#define RIO_S_PW_REGS_OFFSET 0x134e0 51#define RIO_S_PW_REGS_OFFSET 0x134e0
52#define RIO_ATMU_REGS_DBELL_OFFSET 0x10C40 52#define RIO_ATMU_REGS_DBELL_OFFSET 0x10C40
53#define RIO_INB_ATMU_REGS_PORT1_OFFSET 0x10d60
54#define RIO_INB_ATMU_REGS_PORT2_OFFSET 0x10f60
53 55
54#define MAX_MSG_UNIT_NUM 2 56#define MAX_MSG_UNIT_NUM 2
55#define MAX_PORT_NUM 4 57#define MAX_PORT_NUM 4
58#define RIO_INB_ATMU_COUNT 4
56 59
57struct rio_atmu_regs { 60struct rio_atmu_regs {
58 u32 rowtar; 61 u32 rowtar;
@@ -63,6 +66,15 @@ struct rio_atmu_regs {
63 u32 pad2[3]; 66 u32 pad2[3];
64}; 67};
65 68
69struct rio_inb_atmu_regs {
70 u32 riwtar;
71 u32 pad1;
72 u32 riwbar;
73 u32 pad2;
74 u32 riwar;
75 u32 pad3[3];
76};
77
66struct rio_dbell_ring { 78struct rio_dbell_ring {
67 void *virt; 79 void *virt;
68 dma_addr_t phys; 80 dma_addr_t phys;
@@ -99,6 +111,7 @@ struct rio_priv {
99 void __iomem *regs_win; 111 void __iomem *regs_win;
100 struct rio_atmu_regs __iomem *atmu_regs; 112 struct rio_atmu_regs __iomem *atmu_regs;
101 struct rio_atmu_regs __iomem *maint_atmu_regs; 113 struct rio_atmu_regs __iomem *maint_atmu_regs;
114 struct rio_inb_atmu_regs __iomem *inb_atmu_regs;
102 void __iomem *maint_win; 115 void __iomem *maint_win;
103 void *rmm_handle; /* RapidIO message manager(unit) Handle */ 116 void *rmm_handle; /* RapidIO message manager(unit) Handle */
104}; 117};
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c
index ffd1169ebaab..99269c041615 100644
--- a/arch/powerpc/sysdev/fsl_soc.c
+++ b/arch/powerpc/sysdev/fsl_soc.c
@@ -197,8 +197,7 @@ static int __init setup_rstcr(void)
197 if (!rstcr && ppc_md.restart == fsl_rstcr_restart) 197 if (!rstcr && ppc_md.restart == fsl_rstcr_restart)
198 printk(KERN_ERR "No RSTCR register, warm reboot won't work\n"); 198 printk(KERN_ERR "No RSTCR register, warm reboot won't work\n");
199 199
200 if (np) 200 of_node_put(np);
201 of_node_put(np);
202 201
203 return 0; 202 return 0;
204} 203}
@@ -238,7 +237,7 @@ void fsl_hv_restart(char *cmd)
238/* 237/*
239 * Halt the current partition 238 * Halt the current partition
240 * 239 *
241 * This function should be assigned to the ppc_md.power_off and ppc_md.halt 240 * This function should be assigned to the pm_power_off and ppc_md.halt
242 * function pointers, to shut down the partition when we're running under 241 * function pointers, to shut down the partition when we're running under
243 * the Freescale hypervisor. 242 * the Freescale hypervisor.
244 */ 243 */
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c
index b50f97811c25..b28733727ed3 100644
--- a/arch/powerpc/sysdev/ipic.c
+++ b/arch/powerpc/sysdev/ipic.c
@@ -20,7 +20,6 @@
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/syscore_ops.h> 21#include <linux/syscore_ops.h>
22#include <linux/device.h> 22#include <linux/device.h>
23#include <linux/bootmem.h>
24#include <linux/spinlock.h> 23#include <linux/spinlock.h>
25#include <linux/fsl_devices.h> 24#include <linux/fsl_devices.h>
26#include <asm/irq.h> 25#include <asm/irq.h>
diff --git a/arch/powerpc/sysdev/mpc5xxx_clocks.c b/arch/powerpc/sysdev/mpc5xxx_clocks.c
index 5492dc5f56f4..f4f0301b9a60 100644
--- a/arch/powerpc/sysdev/mpc5xxx_clocks.c
+++ b/arch/powerpc/sysdev/mpc5xxx_clocks.c
@@ -26,8 +26,7 @@ unsigned long mpc5xxx_get_bus_frequency(struct device_node *node)
26 of_node_put(node); 26 of_node_put(node);
27 node = np; 27 node = np;
28 } 28 }
29 if (node) 29 of_node_put(node);
30 of_node_put(node);
31 30
32 return p_bus_freq ? *p_bus_freq : 0; 31 return p_bus_freq ? *p_bus_freq : 0;
33} 32}
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c
index 89cec0ed6a58..c4648ad5c1f3 100644
--- a/arch/powerpc/sysdev/mpic.c
+++ b/arch/powerpc/sysdev/mpic.c
@@ -24,7 +24,6 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/smp.h> 25#include <linux/smp.h>
26#include <linux/interrupt.h> 26#include <linux/interrupt.h>
27#include <linux/bootmem.h>
28#include <linux/spinlock.h> 27#include <linux/spinlock.h>
29#include <linux/pci.h> 28#include <linux/pci.h>
30#include <linux/slab.h> 29#include <linux/slab.h>
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c
index 45c114bc430b..a3f660eed6de 100644
--- a/arch/powerpc/sysdev/mpic_pasemi_msi.c
+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c
@@ -16,7 +16,6 @@
16#undef DEBUG 16#undef DEBUG
17 17
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/bootmem.h>
20#include <linux/msi.h> 19#include <linux/msi.h>
21#include <asm/mpic.h> 20#include <asm/mpic.h>
22#include <asm/prom.h> 21#include <asm/prom.h>
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c
index 0dff1cd44481..b2cef1809389 100644
--- a/arch/powerpc/sysdev/mpic_u3msi.c
+++ b/arch/powerpc/sysdev/mpic_u3msi.c
@@ -10,7 +10,6 @@
10 */ 10 */
11 11
12#include <linux/irq.h> 12#include <linux/irq.h>
13#include <linux/bootmem.h>
14#include <linux/msi.h> 13#include <linux/msi.h>
15#include <asm/mpic.h> 14#include <asm/mpic.h>
16#include <asm/prom.h> 15#include <asm/prom.h>
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c
index 82e2cfe35c62..ba95adf81d8d 100644
--- a/arch/powerpc/sysdev/ppc4xx_cpm.c
+++ b/arch/powerpc/sysdev/ppc4xx_cpm.c
@@ -281,7 +281,7 @@ static int __init cpm_init(void)
281 printk(KERN_ERR "cpm: could not parse dcr property for %s\n", 281 printk(KERN_ERR "cpm: could not parse dcr property for %s\n",
282 np->full_name); 282 np->full_name);
283 ret = -EINVAL; 283 ret = -EINVAL;
284 goto out; 284 goto node_put;
285 } 285 }
286 286
287 cpm.dcr_host = dcr_map(np, dcr_base, dcr_len); 287 cpm.dcr_host = dcr_map(np, dcr_base, dcr_len);
@@ -290,7 +290,7 @@ static int __init cpm_init(void)
290 printk(KERN_ERR "cpm: failed to map dcr property for %s\n", 290 printk(KERN_ERR "cpm: failed to map dcr property for %s\n",
291 np->full_name); 291 np->full_name);
292 ret = -EINVAL; 292 ret = -EINVAL;
293 goto out; 293 goto node_put;
294 } 294 }
295 295
296 /* All 4xx SoCs with a CPM controller have one of two 296 /* All 4xx SoCs with a CPM controller have one of two
@@ -330,9 +330,9 @@ static int __init cpm_init(void)
330 330
331 if (cpm.standby || cpm.suspend) 331 if (cpm.standby || cpm.suspend)
332 suspend_set_ops(&cpm_suspend_ops); 332 suspend_set_ops(&cpm_suspend_ops);
333node_put:
334 of_node_put(np);
333out: 335out:
334 if (np)
335 of_node_put(np);
336 return ret; 336 return ret;
337} 337}
338 338
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c
index 518eabbe0bdc..5e622c0544c4 100644
--- a/arch/powerpc/sysdev/ppc4xx_msi.c
+++ b/arch/powerpc/sysdev/ppc4xx_msi.c
@@ -22,7 +22,6 @@
22 */ 22 */
23 23
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/bootmem.h>
26#include <linux/pci.h> 25#include <linux/pci.h>
27#include <linux/msi.h> 26#include <linux/msi.h>
28#include <linux/of_platform.h> 27#include <linux/of_platform.h>
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.c b/arch/powerpc/sysdev/ppc4xx_pci.c
index df6e2fc4ff92..086aca69ecae 100644
--- a/arch/powerpc/sysdev/ppc4xx_pci.c
+++ b/arch/powerpc/sysdev/ppc4xx_pci.c
@@ -22,7 +22,6 @@
22#include <linux/pci.h> 22#include <linux/pci.h>
23#include <linux/init.h> 23#include <linux/init.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/bootmem.h>
26#include <linux/delay.h> 25#include <linux/delay.h>
27#include <linux/slab.h> 26#include <linux/slab.h>
28 27
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c
index 238a07b97f2c..b584debbcd9c 100644
--- a/arch/powerpc/sysdev/qe_lib/qe.c
+++ b/arch/powerpc/sysdev/qe_lib/qe.c
@@ -22,7 +22,6 @@
22#include <linux/spinlock.h> 22#include <linux/spinlock.h>
23#include <linux/mm.h> 23#include <linux/mm.h>
24#include <linux/interrupt.h> 24#include <linux/interrupt.h>
25#include <linux/bootmem.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/delay.h> 26#include <linux/delay.h>
28#include <linux/ioport.h> 27#include <linux/ioport.h>
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c
index b2b87c30e266..543765e1ef14 100644
--- a/arch/powerpc/sysdev/qe_lib/qe_ic.c
+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c
@@ -23,7 +23,6 @@
23#include <linux/sched.h> 23#include <linux/sched.h>
24#include <linux/signal.h> 24#include <linux/signal.h>
25#include <linux/device.h> 25#include <linux/device.h>
26#include <linux/bootmem.h>
27#include <linux/spinlock.h> 26#include <linux/spinlock.h>
28#include <asm/irq.h> 27#include <asm/irq.h>
29#include <asm/io.h> 28#include <asm/io.h>
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c
index 92033936a8f7..7c37157d4c24 100644
--- a/arch/powerpc/sysdev/uic.c
+++ b/arch/powerpc/sysdev/uic.c
@@ -19,7 +19,6 @@
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/signal.h> 20#include <linux/signal.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/bootmem.h>
23#include <linux/spinlock.h> 22#include <linux/spinlock.h>
24#include <linux/irq.h> 23#include <linux/irq.h>
25#include <linux/interrupt.h> 24#include <linux/interrupt.h>
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c
index fe0cca477164..365249cd346b 100644
--- a/arch/powerpc/sysdev/xics/xics-common.c
+++ b/arch/powerpc/sysdev/xics/xics-common.c
@@ -155,7 +155,7 @@ int __init xics_smp_probe(void)
155 155
156void xics_teardown_cpu(void) 156void xics_teardown_cpu(void)
157{ 157{
158 struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); 158 struct xics_cppr *os_cppr = this_cpu_ptr(&xics_cppr);
159 159
160 /* 160 /*
161 * we have to reset the cppr index to 0 because we're 161 * we have to reset the cppr index to 0 because we're
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index c8efbb37d6e0..5b150f0c5df9 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -51,6 +51,12 @@
51#include <asm/paca.h> 51#include <asm/paca.h>
52#endif 52#endif
53 53
54#if defined(CONFIG_PPC_SPLPAR)
55#include <asm/plpar_wrappers.h>
56#else
57static inline long plapr_set_ciabr(unsigned long ciabr) {return 0; };
58#endif
59
54#include "nonstdio.h" 60#include "nonstdio.h"
55#include "dis-asm.h" 61#include "dis-asm.h"
56 62
@@ -88,10 +94,9 @@ struct bpt {
88}; 94};
89 95
90/* Bits in bpt.enabled */ 96/* Bits in bpt.enabled */
91#define BP_IABR_TE 1 /* IABR translation enabled */ 97#define BP_CIABR 1
92#define BP_IABR 2 98#define BP_TRAP 2
93#define BP_TRAP 8 99#define BP_DABR 4
94#define BP_DABR 0x10
95 100
96#define NBPTS 256 101#define NBPTS 256
97static struct bpt bpts[NBPTS]; 102static struct bpt bpts[NBPTS];
@@ -270,6 +275,45 @@ static inline void cinval(void *p)
270 asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p)); 275 asm volatile ("dcbi 0,%0; icbi 0,%0" : : "r" (p));
271} 276}
272 277
278/**
279 * write_ciabr() - write the CIABR SPR
280 * @ciabr: The value to write.
281 *
282 * This function writes a value to the CIARB register either directly
283 * through mtspr instruction if the kernel is in HV privilege mode or
284 * call a hypervisor function to achieve the same in case the kernel
285 * is in supervisor privilege mode.
286 */
287static void write_ciabr(unsigned long ciabr)
288{
289 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
290 return;
291
292 if (cpu_has_feature(CPU_FTR_HVMODE)) {
293 mtspr(SPRN_CIABR, ciabr);
294 return;
295 }
296 plapr_set_ciabr(ciabr);
297}
298
299/**
300 * set_ciabr() - set the CIABR
301 * @addr: The value to set.
302 *
303 * This function sets the correct privilege value into the the HW
304 * breakpoint address before writing it up in the CIABR register.
305 */
306static void set_ciabr(unsigned long addr)
307{
308 addr &= ~CIABR_PRIV;
309
310 if (cpu_has_feature(CPU_FTR_HVMODE))
311 addr |= CIABR_PRIV_HYPER;
312 else
313 addr |= CIABR_PRIV_SUPER;
314 write_ciabr(addr);
315}
316
273/* 317/*
274 * Disable surveillance (the service processor watchdog function) 318 * Disable surveillance (the service processor watchdog function)
275 * while we are in xmon. 319 * while we are in xmon.
@@ -727,7 +771,7 @@ static void insert_bpts(void)
727 771
728 bp = bpts; 772 bp = bpts;
729 for (i = 0; i < NBPTS; ++i, ++bp) { 773 for (i = 0; i < NBPTS; ++i, ++bp) {
730 if ((bp->enabled & (BP_TRAP|BP_IABR)) == 0) 774 if ((bp->enabled & (BP_TRAP|BP_CIABR)) == 0)
731 continue; 775 continue;
732 if (mread(bp->address, &bp->instr[0], 4) != 4) { 776 if (mread(bp->address, &bp->instr[0], 4) != 4) {
733 printf("Couldn't read instruction at %lx, " 777 printf("Couldn't read instruction at %lx, "
@@ -742,7 +786,7 @@ static void insert_bpts(void)
742 continue; 786 continue;
743 } 787 }
744 store_inst(&bp->instr[0]); 788 store_inst(&bp->instr[0]);
745 if (bp->enabled & BP_IABR) 789 if (bp->enabled & BP_CIABR)
746 continue; 790 continue;
747 if (mwrite(bp->address, &bpinstr, 4) != 4) { 791 if (mwrite(bp->address, &bpinstr, 4) != 4) {
748 printf("Couldn't write instruction at %lx, " 792 printf("Couldn't write instruction at %lx, "
@@ -764,9 +808,9 @@ static void insert_cpu_bpts(void)
764 brk.len = 8; 808 brk.len = 8;
765 __set_breakpoint(&brk); 809 __set_breakpoint(&brk);
766 } 810 }
767 if (iabr && cpu_has_feature(CPU_FTR_IABR)) 811
768 mtspr(SPRN_IABR, iabr->address 812 if (iabr)
769 | (iabr->enabled & (BP_IABR|BP_IABR_TE))); 813 set_ciabr(iabr->address);
770} 814}
771 815
772static void remove_bpts(void) 816static void remove_bpts(void)
@@ -777,7 +821,7 @@ static void remove_bpts(void)
777 821
778 bp = bpts; 822 bp = bpts;
779 for (i = 0; i < NBPTS; ++i, ++bp) { 823 for (i = 0; i < NBPTS; ++i, ++bp) {
780 if ((bp->enabled & (BP_TRAP|BP_IABR)) != BP_TRAP) 824 if ((bp->enabled & (BP_TRAP|BP_CIABR)) != BP_TRAP)
781 continue; 825 continue;
782 if (mread(bp->address, &instr, 4) == 4 826 if (mread(bp->address, &instr, 4) == 4
783 && instr == bpinstr 827 && instr == bpinstr
@@ -792,8 +836,7 @@ static void remove_bpts(void)
792static void remove_cpu_bpts(void) 836static void remove_cpu_bpts(void)
793{ 837{
794 hw_breakpoint_disable(); 838 hw_breakpoint_disable();
795 if (cpu_has_feature(CPU_FTR_IABR)) 839 write_ciabr(0);
796 mtspr(SPRN_IABR, 0);
797} 840}
798 841
799/* Command interpreting routine */ 842/* Command interpreting routine */
@@ -907,7 +950,7 @@ cmds(struct pt_regs *excp)
907 case 'u': 950 case 'u':
908 dump_segments(); 951 dump_segments();
909 break; 952 break;
910#elif defined(CONFIG_4xx) 953#elif defined(CONFIG_44x)
911 case 'u': 954 case 'u':
912 dump_tlb_44x(); 955 dump_tlb_44x();
913 break; 956 break;
@@ -981,7 +1024,8 @@ static void bootcmds(void)
981 else if (cmd == 'h') 1024 else if (cmd == 'h')
982 ppc_md.halt(); 1025 ppc_md.halt();
983 else if (cmd == 'p') 1026 else if (cmd == 'p')
984 ppc_md.power_off(); 1027 if (pm_power_off)
1028 pm_power_off();
985} 1029}
986 1030
987static int cpu_cmd(void) 1031static int cpu_cmd(void)
@@ -1127,7 +1171,7 @@ static char *breakpoint_help_string =
1127 "b <addr> [cnt] set breakpoint at given instr addr\n" 1171 "b <addr> [cnt] set breakpoint at given instr addr\n"
1128 "bc clear all breakpoints\n" 1172 "bc clear all breakpoints\n"
1129 "bc <n/addr> clear breakpoint number n or at addr\n" 1173 "bc <n/addr> clear breakpoint number n or at addr\n"
1130 "bi <addr> [cnt] set hardware instr breakpoint (POWER3/RS64 only)\n" 1174 "bi <addr> [cnt] set hardware instr breakpoint (POWER8 only)\n"
1131 "bd <addr> [cnt] set hardware data breakpoint\n" 1175 "bd <addr> [cnt] set hardware data breakpoint\n"
1132 ""; 1176 "";
1133 1177
@@ -1166,13 +1210,13 @@ bpt_cmds(void)
1166 break; 1210 break;
1167 1211
1168 case 'i': /* bi - hardware instr breakpoint */ 1212 case 'i': /* bi - hardware instr breakpoint */
1169 if (!cpu_has_feature(CPU_FTR_IABR)) { 1213 if (!cpu_has_feature(CPU_FTR_ARCH_207S)) {
1170 printf("Hardware instruction breakpoint " 1214 printf("Hardware instruction breakpoint "
1171 "not supported on this cpu\n"); 1215 "not supported on this cpu\n");
1172 break; 1216 break;
1173 } 1217 }
1174 if (iabr) { 1218 if (iabr) {
1175 iabr->enabled &= ~(BP_IABR | BP_IABR_TE); 1219 iabr->enabled &= ~BP_CIABR;
1176 iabr = NULL; 1220 iabr = NULL;
1177 } 1221 }
1178 if (!scanhex(&a)) 1222 if (!scanhex(&a))
@@ -1181,7 +1225,7 @@ bpt_cmds(void)
1181 break; 1225 break;
1182 bp = new_breakpoint(a); 1226 bp = new_breakpoint(a);
1183 if (bp != NULL) { 1227 if (bp != NULL) {
1184 bp->enabled |= BP_IABR | BP_IABR_TE; 1228 bp->enabled |= BP_CIABR;
1185 iabr = bp; 1229 iabr = bp;
1186 } 1230 }
1187 break; 1231 break;
@@ -1238,7 +1282,7 @@ bpt_cmds(void)
1238 if (!bp->enabled) 1282 if (!bp->enabled)
1239 continue; 1283 continue;
1240 printf("%2x %s ", BP_NUM(bp), 1284 printf("%2x %s ", BP_NUM(bp),
1241 (bp->enabled & BP_IABR)? "inst": "trap"); 1285 (bp->enabled & BP_CIABR) ? "inst": "trap");
1242 xmon_print_symbol(bp->address, " ", "\n"); 1286 xmon_print_symbol(bp->address, " ", "\n");
1243 } 1287 }
1244 break; 1288 break;
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 4236408070e5..6259895fcd97 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -11,200 +11,28 @@
11#include <linux/types.h> 11#include <linux/types.h>
12#include <linux/bug.h> 12#include <linux/bug.h>
13 13
14extern void __xchg_called_with_bad_pointer(void); 14#define cmpxchg(ptr, o, n) \
15 15({ \
16static inline unsigned long __xchg(unsigned long x, void *ptr, int size) 16 __typeof__(*(ptr)) __o = (o); \
17{ 17 __typeof__(*(ptr)) __n = (n); \
18 unsigned long addr, old; 18 (__typeof__(*(ptr))) __sync_val_compare_and_swap((ptr),__o,__n);\
19 int shift;
20
21 switch (size) {
22 case 1:
23 addr = (unsigned long) ptr;
24 shift = (3 ^ (addr & 3)) << 3;
25 addr ^= addr & 3;
26 asm volatile(
27 " l %0,%4\n"
28 "0: lr 0,%0\n"
29 " nr 0,%3\n"
30 " or 0,%2\n"
31 " cs %0,0,%4\n"
32 " jl 0b\n"
33 : "=&d" (old), "=Q" (*(int *) addr)
34 : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
35 "Q" (*(int *) addr) : "memory", "cc", "0");
36 return old >> shift;
37 case 2:
38 addr = (unsigned long) ptr;
39 shift = (2 ^ (addr & 2)) << 3;
40 addr ^= addr & 2;
41 asm volatile(
42 " l %0,%4\n"
43 "0: lr 0,%0\n"
44 " nr 0,%3\n"
45 " or 0,%2\n"
46 " cs %0,0,%4\n"
47 " jl 0b\n"
48 : "=&d" (old), "=Q" (*(int *) addr)
49 : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
50 "Q" (*(int *) addr) : "memory", "cc", "0");
51 return old >> shift;
52 case 4:
53 asm volatile(
54 " l %0,%3\n"
55 "0: cs %0,%2,%3\n"
56 " jl 0b\n"
57 : "=&d" (old), "=Q" (*(int *) ptr)
58 : "d" (x), "Q" (*(int *) ptr)
59 : "memory", "cc");
60 return old;
61#ifdef CONFIG_64BIT
62 case 8:
63 asm volatile(
64 " lg %0,%3\n"
65 "0: csg %0,%2,%3\n"
66 " jl 0b\n"
67 : "=&d" (old), "=m" (*(long *) ptr)
68 : "d" (x), "Q" (*(long *) ptr)
69 : "memory", "cc");
70 return old;
71#endif /* CONFIG_64BIT */
72 }
73 __xchg_called_with_bad_pointer();
74 return x;
75}
76
77#define xchg(ptr, x) \
78({ \
79 __typeof__(*(ptr)) __ret; \
80 __ret = (__typeof__(*(ptr))) \
81 __xchg((unsigned long)(x), (void *)(ptr), sizeof(*(ptr)));\
82 __ret; \
83}) 19})
84 20
85/* 21#define cmpxchg64 cmpxchg
86 * Atomic compare and exchange. Compare OLD with MEM, if identical, 22#define cmpxchg_local cmpxchg
87 * store NEW in MEM. Return the initial value in MEM. Success is 23#define cmpxchg64_local cmpxchg
88 * indicated by comparing RETURN with OLD.
89 */
90
91#define __HAVE_ARCH_CMPXCHG
92
93extern void __cmpxchg_called_with_bad_pointer(void);
94
95static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
96 unsigned long new, int size)
97{
98 unsigned long addr, prev, tmp;
99 int shift;
100
101 switch (size) {
102 case 1:
103 addr = (unsigned long) ptr;
104 shift = (3 ^ (addr & 3)) << 3;
105 addr ^= addr & 3;
106 asm volatile(
107 " l %0,%2\n"
108 "0: nr %0,%5\n"
109 " lr %1,%0\n"
110 " or %0,%3\n"
111 " or %1,%4\n"
112 " cs %0,%1,%2\n"
113 " jnl 1f\n"
114 " xr %1,%0\n"
115 " nr %1,%5\n"
116 " jnz 0b\n"
117 "1:"
118 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
119 : "d" ((old & 0xff) << shift),
120 "d" ((new & 0xff) << shift),
121 "d" (~(0xff << shift))
122 : "memory", "cc");
123 return prev >> shift;
124 case 2:
125 addr = (unsigned long) ptr;
126 shift = (2 ^ (addr & 2)) << 3;
127 addr ^= addr & 2;
128 asm volatile(
129 " l %0,%2\n"
130 "0: nr %0,%5\n"
131 " lr %1,%0\n"
132 " or %0,%3\n"
133 " or %1,%4\n"
134 " cs %0,%1,%2\n"
135 " jnl 1f\n"
136 " xr %1,%0\n"
137 " nr %1,%5\n"
138 " jnz 0b\n"
139 "1:"
140 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
141 : "d" ((old & 0xffff) << shift),
142 "d" ((new & 0xffff) << shift),
143 "d" (~(0xffff << shift))
144 : "memory", "cc");
145 return prev >> shift;
146 case 4:
147 asm volatile(
148 " cs %0,%3,%1\n"
149 : "=&d" (prev), "=Q" (*(int *) ptr)
150 : "0" (old), "d" (new), "Q" (*(int *) ptr)
151 : "memory", "cc");
152 return prev;
153#ifdef CONFIG_64BIT
154 case 8:
155 asm volatile(
156 " csg %0,%3,%1\n"
157 : "=&d" (prev), "=Q" (*(long *) ptr)
158 : "0" (old), "d" (new), "Q" (*(long *) ptr)
159 : "memory", "cc");
160 return prev;
161#endif /* CONFIG_64BIT */
162 }
163 __cmpxchg_called_with_bad_pointer();
164 return old;
165}
166
167#define cmpxchg(ptr, o, n) \
168({ \
169 __typeof__(*(ptr)) __ret; \
170 __ret = (__typeof__(*(ptr))) \
171 __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
172 sizeof(*(ptr))); \
173 __ret; \
174})
175 24
176#ifdef CONFIG_64BIT 25#define xchg(ptr, x) \
177#define cmpxchg64(ptr, o, n) \
178({ \ 26({ \
179 cmpxchg((ptr), (o), (n)); \ 27 __typeof__(ptr) __ptr = (ptr); \
28 __typeof__(*(ptr)) __old; \
29 do { \
30 __old = *__ptr; \
31 } while (!__sync_bool_compare_and_swap(__ptr, __old, x)); \
32 __old; \
180}) 33})
181#else /* CONFIG_64BIT */
182static inline unsigned long long __cmpxchg64(void *ptr,
183 unsigned long long old,
184 unsigned long long new)
185{
186 register_pair rp_old = {.pair = old};
187 register_pair rp_new = {.pair = new};
188 unsigned long long *ullptr = ptr;
189 34
190 asm volatile( 35#define __HAVE_ARCH_CMPXCHG
191 " cds %0,%2,%1"
192 : "+d" (rp_old), "+Q" (*ullptr)
193 : "d" (rp_new)
194 : "memory", "cc");
195 return rp_old.pair;
196}
197
198#define cmpxchg64(ptr, o, n) \
199({ \
200 __typeof__(*(ptr)) __ret; \
201 __ret = (__typeof__(*(ptr))) \
202 __cmpxchg64((ptr), \
203 (unsigned long long)(o), \
204 (unsigned long long)(n)); \
205 __ret; \
206})
207#endif /* CONFIG_64BIT */
208 36
209#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \ 37#define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn) \
210({ \ 38({ \
@@ -265,40 +93,4 @@ extern void __cmpxchg_double_called_with_bad_pointer(void);
265 93
266#define system_has_cmpxchg_double() 1 94#define system_has_cmpxchg_double() 1
267 95
268#include <asm-generic/cmpxchg-local.h>
269
270static inline unsigned long __cmpxchg_local(void *ptr,
271 unsigned long old,
272 unsigned long new, int size)
273{
274 switch (size) {
275 case 1:
276 case 2:
277 case 4:
278#ifdef CONFIG_64BIT
279 case 8:
280#endif
281 return __cmpxchg(ptr, old, new, size);
282 default:
283 return __cmpxchg_local_generic(ptr, old, new, size);
284 }
285
286 return old;
287}
288
289/*
290 * cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
291 * them available.
292 */
293#define cmpxchg_local(ptr, o, n) \
294({ \
295 __typeof__(*(ptr)) __ret; \
296 __ret = (__typeof__(*(ptr))) \
297 __cmpxchg_local((ptr), (unsigned long)(o), \
298 (unsigned long)(n), sizeof(*(ptr))); \
299 __ret; \
300})
301
302#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
303
304#endif /* __ASM_CMPXCHG_H */ 96#endif /* __ASM_CMPXCHG_H */
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index f8c196984853..b91e960e4045 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -10,6 +10,8 @@
10#include <linux/types.h> 10#include <linux/types.h>
11#include <asm/div64.h> 11#include <asm/div64.h>
12 12
13#define CPUTIME_PER_USEC 4096ULL
14#define CPUTIME_PER_SEC (CPUTIME_PER_USEC * USEC_PER_SEC)
13 15
14/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */ 16/* We want to use full resolution of the CPU timer: 2**-12 micro-seconds. */
15 17
@@ -38,24 +40,24 @@ static inline unsigned long __div(unsigned long long n, unsigned long base)
38 */ 40 */
39static inline unsigned long cputime_to_jiffies(const cputime_t cputime) 41static inline unsigned long cputime_to_jiffies(const cputime_t cputime)
40{ 42{
41 return __div((__force unsigned long long) cputime, 4096000000ULL / HZ); 43 return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / HZ);
42} 44}
43 45
44static inline cputime_t jiffies_to_cputime(const unsigned int jif) 46static inline cputime_t jiffies_to_cputime(const unsigned int jif)
45{ 47{
46 return (__force cputime_t)(jif * (4096000000ULL / HZ)); 48 return (__force cputime_t)(jif * (CPUTIME_PER_SEC / HZ));
47} 49}
48 50
49static inline u64 cputime64_to_jiffies64(cputime64_t cputime) 51static inline u64 cputime64_to_jiffies64(cputime64_t cputime)
50{ 52{
51 unsigned long long jif = (__force unsigned long long) cputime; 53 unsigned long long jif = (__force unsigned long long) cputime;
52 do_div(jif, 4096000000ULL / HZ); 54 do_div(jif, CPUTIME_PER_SEC / HZ);
53 return jif; 55 return jif;
54} 56}
55 57
56static inline cputime64_t jiffies64_to_cputime64(const u64 jif) 58static inline cputime64_t jiffies64_to_cputime64(const u64 jif)
57{ 59{
58 return (__force cputime64_t)(jif * (4096000000ULL / HZ)); 60 return (__force cputime64_t)(jif * (CPUTIME_PER_SEC / HZ));
59} 61}
60 62
61/* 63/*
@@ -68,7 +70,7 @@ static inline unsigned int cputime_to_usecs(const cputime_t cputime)
68 70
69static inline cputime_t usecs_to_cputime(const unsigned int m) 71static inline cputime_t usecs_to_cputime(const unsigned int m)
70{ 72{
71 return (__force cputime_t)(m * 4096ULL); 73 return (__force cputime_t)(m * CPUTIME_PER_USEC);
72} 74}
73 75
74#define usecs_to_cputime64(m) usecs_to_cputime(m) 76#define usecs_to_cputime64(m) usecs_to_cputime(m)
@@ -78,12 +80,12 @@ static inline cputime_t usecs_to_cputime(const unsigned int m)
78 */ 80 */
79static inline unsigned int cputime_to_secs(const cputime_t cputime) 81static inline unsigned int cputime_to_secs(const cputime_t cputime)
80{ 82{
81 return __div((__force unsigned long long) cputime, 2048000000) >> 1; 83 return __div((__force unsigned long long) cputime, CPUTIME_PER_SEC / 2) >> 1;
82} 84}
83 85
84static inline cputime_t secs_to_cputime(const unsigned int s) 86static inline cputime_t secs_to_cputime(const unsigned int s)
85{ 87{
86 return (__force cputime_t)(s * 4096000000ULL); 88 return (__force cputime_t)(s * CPUTIME_PER_SEC);
87} 89}
88 90
89/* 91/*
@@ -91,8 +93,8 @@ static inline cputime_t secs_to_cputime(const unsigned int s)
91 */ 93 */
92static inline cputime_t timespec_to_cputime(const struct timespec *value) 94static inline cputime_t timespec_to_cputime(const struct timespec *value)
93{ 95{
94 unsigned long long ret = value->tv_sec * 4096000000ULL; 96 unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
95 return (__force cputime_t)(ret + value->tv_nsec * 4096 / 1000); 97 return (__force cputime_t)(ret + __div(value->tv_nsec * CPUTIME_PER_USEC, NSEC_PER_USEC));
96} 98}
97 99
98static inline void cputime_to_timespec(const cputime_t cputime, 100static inline void cputime_to_timespec(const cputime_t cputime,
@@ -103,12 +105,12 @@ static inline void cputime_to_timespec(const cputime_t cputime,
103 register_pair rp; 105 register_pair rp;
104 106
105 rp.pair = __cputime >> 1; 107 rp.pair = __cputime >> 1;
106 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 108 asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_SEC / 2));
107 value->tv_nsec = rp.subreg.even * 1000 / 4096; 109 value->tv_nsec = rp.subreg.even * NSEC_PER_USEC / CPUTIME_PER_USEC;
108 value->tv_sec = rp.subreg.odd; 110 value->tv_sec = rp.subreg.odd;
109#else 111#else
110 value->tv_nsec = (__cputime % 4096000000ULL) * 1000 / 4096; 112 value->tv_nsec = (__cputime % CPUTIME_PER_SEC) * NSEC_PER_USEC / CPUTIME_PER_USEC;
111 value->tv_sec = __cputime / 4096000000ULL; 113 value->tv_sec = __cputime / CPUTIME_PER_SEC;
112#endif 114#endif
113} 115}
114 116
@@ -119,8 +121,8 @@ static inline void cputime_to_timespec(const cputime_t cputime,
119 */ 121 */
120static inline cputime_t timeval_to_cputime(const struct timeval *value) 122static inline cputime_t timeval_to_cputime(const struct timeval *value)
121{ 123{
122 unsigned long long ret = value->tv_sec * 4096000000ULL; 124 unsigned long long ret = value->tv_sec * CPUTIME_PER_SEC;
123 return (__force cputime_t)(ret + value->tv_usec * 4096ULL); 125 return (__force cputime_t)(ret + value->tv_usec * CPUTIME_PER_USEC);
124} 126}
125 127
126static inline void cputime_to_timeval(const cputime_t cputime, 128static inline void cputime_to_timeval(const cputime_t cputime,
@@ -131,12 +133,12 @@ static inline void cputime_to_timeval(const cputime_t cputime,
131 register_pair rp; 133 register_pair rp;
132 134
133 rp.pair = __cputime >> 1; 135 rp.pair = __cputime >> 1;
134 asm ("dr %0,%1" : "+d" (rp) : "d" (2048000000UL)); 136 asm ("dr %0,%1" : "+d" (rp) : "d" (CPUTIME_PER_USEC / 2));
135 value->tv_usec = rp.subreg.even / 4096; 137 value->tv_usec = rp.subreg.even / CPUTIME_PER_USEC;
136 value->tv_sec = rp.subreg.odd; 138 value->tv_sec = rp.subreg.odd;
137#else 139#else
138 value->tv_usec = (__cputime % 4096000000ULL) / 4096; 140 value->tv_usec = (__cputime % CPUTIME_PER_SEC) / CPUTIME_PER_USEC;
139 value->tv_sec = __cputime / 4096000000ULL; 141 value->tv_sec = __cputime / CPUTIME_PER_SEC;
140#endif 142#endif
141} 143}
142 144
@@ -146,13 +148,13 @@ static inline void cputime_to_timeval(const cputime_t cputime,
146static inline clock_t cputime_to_clock_t(cputime_t cputime) 148static inline clock_t cputime_to_clock_t(cputime_t cputime)
147{ 149{
148 unsigned long long clock = (__force unsigned long long) cputime; 150 unsigned long long clock = (__force unsigned long long) cputime;
149 do_div(clock, 4096000000ULL / USER_HZ); 151 do_div(clock, CPUTIME_PER_SEC / USER_HZ);
150 return clock; 152 return clock;
151} 153}
152 154
153static inline cputime_t clock_t_to_cputime(unsigned long x) 155static inline cputime_t clock_t_to_cputime(unsigned long x)
154{ 156{
155 return (__force cputime_t)(x * (4096000000ULL / USER_HZ)); 157 return (__force cputime_t)(x * (CPUTIME_PER_SEC / USER_HZ));
156} 158}
157 159
158/* 160/*
@@ -161,7 +163,7 @@ static inline cputime_t clock_t_to_cputime(unsigned long x)
161static inline clock_t cputime64_to_clock_t(cputime64_t cputime) 163static inline clock_t cputime64_to_clock_t(cputime64_t cputime)
162{ 164{
163 unsigned long long clock = (__force unsigned long long) cputime; 165 unsigned long long clock = (__force unsigned long long) cputime;
164 do_div(clock, 4096000000ULL / USER_HZ); 166 do_div(clock, CPUTIME_PER_SEC / USER_HZ);
165 return clock; 167 return clock;
166} 168}
167 169
diff --git a/arch/s390/include/asm/debug.h b/arch/s390/include/asm/debug.h
index 530c15eb01e9..0206c8052328 100644
--- a/arch/s390/include/asm/debug.h
+++ b/arch/s390/include/asm/debug.h
@@ -151,9 +151,21 @@ debug_text_event(debug_info_t* id, int level, const char* txt)
151 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! 151 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
152 */ 152 */
153extern debug_entry_t * 153extern debug_entry_t *
154debug_sprintf_event(debug_info_t* id,int level,char *string,...) 154__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
155 __attribute__ ((format(printf, 3, 4))); 155 __attribute__ ((format(printf, 3, 4)));
156 156
157#define debug_sprintf_event(_id, _level, _fmt, ...) \
158({ \
159 debug_entry_t *__ret; \
160 debug_info_t *__id = _id; \
161 int __level = _level; \
162 if ((!__id) || (__level > __id->level)) \
163 __ret = NULL; \
164 else \
165 __ret = __debug_sprintf_event(__id, __level, \
166 _fmt, ## __VA_ARGS__); \
167 __ret; \
168})
157 169
158static inline debug_entry_t* 170static inline debug_entry_t*
159debug_exception(debug_info_t* id, int level, void* data, int length) 171debug_exception(debug_info_t* id, int level, void* data, int length)
@@ -194,9 +206,22 @@ debug_text_exception(debug_info_t* id, int level, const char* txt)
194 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details! 206 * stored in the s390dbf. See Documentation/s390/s390dbf.txt for more details!
195 */ 207 */
196extern debug_entry_t * 208extern debug_entry_t *
197debug_sprintf_exception(debug_info_t* id,int level,char *string,...) 209__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
198 __attribute__ ((format(printf, 3, 4))); 210 __attribute__ ((format(printf, 3, 4)));
199 211
212#define debug_sprintf_exception(_id, _level, _fmt, ...) \
213({ \
214 debug_entry_t *__ret; \
215 debug_info_t *__id = _id; \
216 int __level = _level; \
217 if ((!__id) || (__level > __id->level)) \
218 __ret = NULL; \
219 else \
220 __ret = __debug_sprintf_exception(__id, __level, \
221 _fmt, ## __VA_ARGS__);\
222 __ret; \
223})
224
200int debug_register_view(debug_info_t* id, struct debug_view* view); 225int debug_register_view(debug_info_t* id, struct debug_view* view);
201int debug_unregister_view(debug_info_t* id, struct debug_view* view); 226int debug_unregister_view(debug_info_t* id, struct debug_view* view);
202 227
diff --git a/arch/s390/include/asm/ftrace.h b/arch/s390/include/asm/ftrace.h
index 3aef8afec336..abb618f1ead2 100644
--- a/arch/s390/include/asm/ftrace.h
+++ b/arch/s390/include/asm/ftrace.h
@@ -1,25 +1,69 @@
1#ifndef _ASM_S390_FTRACE_H 1#ifndef _ASM_S390_FTRACE_H
2#define _ASM_S390_FTRACE_H 2#define _ASM_S390_FTRACE_H
3 3
4#define ARCH_SUPPORTS_FTRACE_OPS 1
5
6#define MCOUNT_INSN_SIZE 24
7#define MCOUNT_RETURN_FIXUP 18
8
4#ifndef __ASSEMBLY__ 9#ifndef __ASSEMBLY__
5 10
6extern void _mcount(void); 11#define ftrace_return_address(n) __builtin_return_address(n)
12
13void _mcount(void);
14void ftrace_caller(void);
15
7extern char ftrace_graph_caller_end; 16extern char ftrace_graph_caller_end;
17extern unsigned long ftrace_plt;
8 18
9struct dyn_arch_ftrace { }; 19struct dyn_arch_ftrace { };
10 20
11#define MCOUNT_ADDR ((long)_mcount) 21#define MCOUNT_ADDR ((unsigned long)_mcount)
22#define FTRACE_ADDR ((unsigned long)ftrace_caller)
12 23
24#define KPROBE_ON_FTRACE_NOP 0
25#define KPROBE_ON_FTRACE_CALL 1
13 26
14static inline unsigned long ftrace_call_adjust(unsigned long addr) 27static inline unsigned long ftrace_call_adjust(unsigned long addr)
15{ 28{
16 return addr; 29 return addr;
17} 30}
18 31
19#endif /* __ASSEMBLY__ */ 32struct ftrace_insn {
33 u16 opc;
34 s32 disp;
35} __packed;
36
37static inline void ftrace_generate_nop_insn(struct ftrace_insn *insn)
38{
39#ifdef CONFIG_FUNCTION_TRACER
40 /* jg .+24 */
41 insn->opc = 0xc0f4;
42 insn->disp = MCOUNT_INSN_SIZE / 2;
43#endif
44}
20 45
21#define MCOUNT_INSN_SIZE 18 46static inline int is_ftrace_nop(struct ftrace_insn *insn)
47{
48#ifdef CONFIG_FUNCTION_TRACER
49 if (insn->disp == MCOUNT_INSN_SIZE / 2)
50 return 1;
51#endif
52 return 0;
53}
22 54
23#define ARCH_SUPPORTS_FTRACE_OPS 1 55static inline void ftrace_generate_call_insn(struct ftrace_insn *insn,
56 unsigned long ip)
57{
58#ifdef CONFIG_FUNCTION_TRACER
59 unsigned long target;
60
61 /* brasl r0,ftrace_caller */
62 target = is_module_addr((void *) ip) ? ftrace_plt : FTRACE_ADDR;
63 insn->opc = 0xc005;
64 insn->disp = (target - ip) / 2;
65#endif
66}
24 67
68#endif /* __ASSEMBLY__ */
25#endif /* _ASM_S390_FTRACE_H */ 69#endif /* _ASM_S390_FTRACE_H */
diff --git a/arch/s390/include/asm/idle.h b/arch/s390/include/asm/idle.h
index 6af037f574b8..113cd963dbbe 100644
--- a/arch/s390/include/asm/idle.h
+++ b/arch/s390/include/asm/idle.h
@@ -9,9 +9,10 @@
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/device.h> 11#include <linux/device.h>
12#include <linux/seqlock.h>
12 13
13struct s390_idle_data { 14struct s390_idle_data {
14 unsigned int sequence; 15 seqcount_t seqcount;
15 unsigned long long idle_count; 16 unsigned long long idle_count;
16 unsigned long long idle_time; 17 unsigned long long idle_time;
17 unsigned long long clock_idle_enter; 18 unsigned long long clock_idle_enter;
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 6ad9013c67e7..30fd5c84680e 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -39,6 +39,15 @@ static inline void iounmap(volatile void __iomem *addr)
39{ 39{
40} 40}
41 41
42static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
43{
44 return NULL;
45}
46
47static inline void ioport_unmap(void __iomem *p)
48{
49}
50
42/* 51/*
43 * s390 needs a private implementation of pci_iomap since ioremap with its 52 * s390 needs a private implementation of pci_iomap since ioremap with its
44 * offset parameter isn't sufficient. That's because BAR spaces are not 53 * offset parameter isn't sufficient. That's because BAR spaces are not
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index b0d5f0a97a01..343ea7c987aa 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -1,11 +1,11 @@
1#ifndef _ASM_IRQ_H 1#ifndef _ASM_IRQ_H
2#define _ASM_IRQ_H 2#define _ASM_IRQ_H
3 3
4#define EXT_INTERRUPT 1 4#define EXT_INTERRUPT 0
5#define IO_INTERRUPT 2 5#define IO_INTERRUPT 1
6#define THIN_INTERRUPT 3 6#define THIN_INTERRUPT 2
7 7
8#define NR_IRQS_BASE 4 8#define NR_IRQS_BASE 3
9 9
10#ifdef CONFIG_PCI_NR_MSI 10#ifdef CONFIG_PCI_NR_MSI
11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI) 11# define NR_IRQS (NR_IRQS_BASE + CONFIG_PCI_NR_MSI)
@@ -13,9 +13,6 @@
13# define NR_IRQS NR_IRQS_BASE 13# define NR_IRQS NR_IRQS_BASE
14#endif 14#endif
15 15
16/* This number is used when no interrupt has been assigned */
17#define NO_IRQ 0
18
19/* External interruption codes */ 16/* External interruption codes */
20#define EXT_IRQ_INTERRUPT_KEY 0x0040 17#define EXT_IRQ_INTERRUPT_KEY 0x0040
21#define EXT_IRQ_CLK_COMP 0x1004 18#define EXT_IRQ_CLK_COMP 0x1004
diff --git a/arch/s390/include/asm/kprobes.h b/arch/s390/include/asm/kprobes.h
index 98629173ce3b..b47ad3b642cc 100644
--- a/arch/s390/include/asm/kprobes.h
+++ b/arch/s390/include/asm/kprobes.h
@@ -60,6 +60,7 @@ typedef u16 kprobe_opcode_t;
60struct arch_specific_insn { 60struct arch_specific_insn {
61 /* copy of original instruction */ 61 /* copy of original instruction */
62 kprobe_opcode_t *insn; 62 kprobe_opcode_t *insn;
63 unsigned int is_ftrace_insn : 1;
63}; 64};
64 65
65struct prev_kprobe { 66struct prev_kprobe {
diff --git a/arch/s390/include/asm/lowcore.h b/arch/s390/include/asm/lowcore.h
index 6cc51fe84410..34fbcac61133 100644
--- a/arch/s390/include/asm/lowcore.h
+++ b/arch/s390/include/asm/lowcore.h
@@ -147,7 +147,7 @@ struct _lowcore {
147 __u32 softirq_pending; /* 0x02ec */ 147 __u32 softirq_pending; /* 0x02ec */
148 __u32 percpu_offset; /* 0x02f0 */ 148 __u32 percpu_offset; /* 0x02f0 */
149 __u32 machine_flags; /* 0x02f4 */ 149 __u32 machine_flags; /* 0x02f4 */
150 __u32 ftrace_func; /* 0x02f8 */ 150 __u8 pad_0x02f8[0x02fc-0x02f8]; /* 0x02f8 */
151 __u32 spinlock_lockval; /* 0x02fc */ 151 __u32 spinlock_lockval; /* 0x02fc */
152 152
153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */ 153 __u8 pad_0x0300[0x0e00-0x0300]; /* 0x0300 */
@@ -297,7 +297,7 @@ struct _lowcore {
297 __u64 percpu_offset; /* 0x0378 */ 297 __u64 percpu_offset; /* 0x0378 */
298 __u64 vdso_per_cpu_data; /* 0x0380 */ 298 __u64 vdso_per_cpu_data; /* 0x0380 */
299 __u64 machine_flags; /* 0x0388 */ 299 __u64 machine_flags; /* 0x0388 */
300 __u64 ftrace_func; /* 0x0390 */ 300 __u8 pad_0x0390[0x0398-0x0390]; /* 0x0390 */
301 __u64 gmap; /* 0x0398 */ 301 __u64 gmap; /* 0x0398 */
302 __u32 spinlock_lockval; /* 0x03a0 */ 302 __u32 spinlock_lockval; /* 0x03a0 */
303 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */ 303 __u8 pad_0x03a0[0x0400-0x03a4]; /* 0x03a4 */
diff --git a/arch/s390/include/asm/pci.h b/arch/s390/include/asm/pci.h
index c030900320e0..ef803c202d42 100644
--- a/arch/s390/include/asm/pci.h
+++ b/arch/s390/include/asm/pci.h
@@ -50,10 +50,6 @@ struct zpci_fmb {
50 atomic64_t unmapped_pages; 50 atomic64_t unmapped_pages;
51} __packed __aligned(16); 51} __packed __aligned(16);
52 52
53#define ZPCI_MSI_VEC_BITS 11
54#define ZPCI_MSI_VEC_MAX (1 << ZPCI_MSI_VEC_BITS)
55#define ZPCI_MSI_VEC_MASK (ZPCI_MSI_VEC_MAX - 1)
56
57enum zpci_state { 53enum zpci_state {
58 ZPCI_FN_STATE_RESERVED, 54 ZPCI_FN_STATE_RESERVED,
59 ZPCI_FN_STATE_STANDBY, 55 ZPCI_FN_STATE_STANDBY,
@@ -90,6 +86,7 @@ struct zpci_dev {
90 86
91 /* IRQ stuff */ 87 /* IRQ stuff */
92 u64 msi_addr; /* MSI address */ 88 u64 msi_addr; /* MSI address */
89 unsigned int max_msi; /* maximum number of MSI's */
93 struct airq_iv *aibv; /* adapter interrupt bit vector */ 90 struct airq_iv *aibv; /* adapter interrupt bit vector */
94 unsigned int aisb; /* number of the summary bit */ 91 unsigned int aisb; /* number of the summary bit */
95 92
diff --git a/arch/s390/include/asm/pci_io.h b/arch/s390/include/asm/pci_io.h
index d194d544d694..f664e96f48c7 100644
--- a/arch/s390/include/asm/pci_io.h
+++ b/arch/s390/include/asm/pci_io.h
@@ -139,7 +139,8 @@ static inline int zpci_memcpy_fromio(void *dst,
139 int size, rc = 0; 139 int size, rc = 0;
140 140
141 while (n > 0) { 141 while (n > 0) {
142 size = zpci_get_max_write_size((u64) src, (u64) dst, n, 8); 142 size = zpci_get_max_write_size((u64 __force) src,
143 (u64) dst, n, 8);
143 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); 144 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
144 rc = zpci_read_single(req, dst, offset, size); 145 rc = zpci_read_single(req, dst, offset, size);
145 if (rc) 146 if (rc)
@@ -162,7 +163,8 @@ static inline int zpci_memcpy_toio(volatile void __iomem *dst,
162 return -EINVAL; 163 return -EINVAL;
163 164
164 while (n > 0) { 165 while (n > 0) {
165 size = zpci_get_max_write_size((u64) dst, (u64) src, n, 128); 166 size = zpci_get_max_write_size((u64 __force) dst,
167 (u64) src, n, 128);
166 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size); 168 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, size);
167 169
168 if (size > 8) /* main path */ 170 if (size > 8) /* main path */
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index d39a31c3cdf2..e510b9460efa 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -22,8 +22,6 @@ unsigned long *page_table_alloc(struct mm_struct *);
22void page_table_free(struct mm_struct *, unsigned long *); 22void page_table_free(struct mm_struct *, unsigned long *);
23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); 23void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
24 24
25void page_table_reset_pgste(struct mm_struct *, unsigned long, unsigned long,
26 bool init_skey);
27int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 25int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
28 unsigned long key, bool nq); 26 unsigned long key, bool nq);
29 27
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 57c882761dea..5e102422c9ab 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -133,6 +133,18 @@ extern unsigned long MODULES_END;
133#define MODULES_LEN (1UL << 31) 133#define MODULES_LEN (1UL << 31)
134#endif 134#endif
135 135
136static inline int is_module_addr(void *addr)
137{
138#ifdef CONFIG_64BIT
139 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
140 if (addr < (void *)MODULES_VADDR)
141 return 0;
142 if (addr > (void *)MODULES_END)
143 return 0;
144#endif
145 return 1;
146}
147
136/* 148/*
137 * A 31 bit pagetable entry of S390 has following format: 149 * A 31 bit pagetable entry of S390 has following format:
138 * | PFRA | | OS | 150 * | PFRA | | OS |
@@ -479,6 +491,11 @@ static inline int mm_has_pgste(struct mm_struct *mm)
479 return 0; 491 return 0;
480} 492}
481 493
494/*
495 * In the case that a guest uses storage keys
496 * faults should no longer be backed by zero pages
497 */
498#define mm_forbids_zeropage mm_use_skey
482static inline int mm_use_skey(struct mm_struct *mm) 499static inline int mm_use_skey(struct mm_struct *mm)
483{ 500{
484#ifdef CONFIG_PGSTE 501#ifdef CONFIG_PGSTE
@@ -1634,6 +1651,19 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
1634 return pmd; 1651 return pmd;
1635} 1652}
1636 1653
1654#define __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
1655static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
1656 unsigned long address,
1657 pmd_t *pmdp, int full)
1658{
1659 pmd_t pmd = *pmdp;
1660
1661 if (!full)
1662 pmdp_flush_lazy(mm, address, pmdp);
1663 pmd_clear(pmdp);
1664 return pmd;
1665}
1666
1637#define __HAVE_ARCH_PMDP_CLEAR_FLUSH 1667#define __HAVE_ARCH_PMDP_CLEAR_FLUSH
1638static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma, 1668static inline pmd_t pmdp_clear_flush(struct vm_area_struct *vma,
1639 unsigned long address, pmd_t *pmdp) 1669 unsigned long address, pmd_t *pmdp)
@@ -1746,7 +1776,8 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1746extern int vmem_add_mapping(unsigned long start, unsigned long size); 1776extern int vmem_add_mapping(unsigned long start, unsigned long size);
1747extern int vmem_remove_mapping(unsigned long start, unsigned long size); 1777extern int vmem_remove_mapping(unsigned long start, unsigned long size);
1748extern int s390_enable_sie(void); 1778extern int s390_enable_sie(void);
1749extern void s390_enable_skey(void); 1779extern int s390_enable_skey(void);
1780extern void s390_reset_cmma(struct mm_struct *mm);
1750 1781
1751/* 1782/*
1752 * No page table caches to initialise 1783 * No page table caches to initialise
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index d559bdb03d18..bed05ea7ec27 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -217,8 +217,6 @@ static inline unsigned short stap(void)
217 */ 217 */
218static inline void cpu_relax(void) 218static inline void cpu_relax(void)
219{ 219{
220 if (MACHINE_HAS_DIAG44)
221 asm volatile("diag 0,0,68");
222 barrier(); 220 barrier();
223} 221}
224 222
diff --git a/arch/s390/include/asm/spinlock.h b/arch/s390/include/asm/spinlock.h
index d6bdf906caa5..0e37cd041241 100644
--- a/arch/s390/include/asm/spinlock.h
+++ b/arch/s390/include/asm/spinlock.h
@@ -18,14 +18,7 @@ extern int spin_retry;
18static inline int 18static inline int
19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new) 19_raw_compare_and_swap(unsigned int *lock, unsigned int old, unsigned int new)
20{ 20{
21 unsigned int old_expected = old; 21 return __sync_bool_compare_and_swap(lock, old, new);
22
23 asm volatile(
24 " cs %0,%3,%1"
25 : "=d" (old), "=Q" (*lock)
26 : "0" (old), "d" (new), "Q" (*lock)
27 : "cc", "memory" );
28 return old == old_expected;
29} 22}
30 23
31/* 24/*
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 572c59949004..06d8741ad6f4 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -121,6 +121,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
121#ifdef CONFIG_64BIT 121#ifdef CONFIG_64BIT
122 if (tlb->mm->context.asce_limit <= (1UL << 31)) 122 if (tlb->mm->context.asce_limit <= (1UL << 31))
123 return; 123 return;
124 pgtable_pmd_page_dtor(virt_to_page(pmd));
124 tlb_remove_table(tlb, pmd); 125 tlb_remove_table(tlb, pmd);
125#endif 126#endif
126} 127}
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 4197c89c52d4..2b446cf0cc65 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -287,7 +287,9 @@
287#define __NR_getrandom 349 287#define __NR_getrandom 349
288#define __NR_memfd_create 350 288#define __NR_memfd_create 350
289#define __NR_bpf 351 289#define __NR_bpf 351
290#define NR_syscalls 352 290#define __NR_s390_pci_mmio_write 352
291#define __NR_s390_pci_mmio_read 353
292#define NR_syscalls 354
291 293
292/* 294/*
293 * There are some system calls that are not present on 64 bit, some 295 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index ef279a136801..e07e91605353 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -17,8 +17,8 @@
17 * Make sure that the compiler is new enough. We want a compiler that 17 * Make sure that the compiler is new enough. We want a compiler that
18 * is known to work with the "Q" assembler constraint. 18 * is known to work with the "Q" assembler constraint.
19 */ 19 */
20#if __GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 20#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)
21#error Your compiler is too old; please use version 3.3.3 or newer 21#error Your compiler is too old; please use version 4.3 or newer
22#endif 22#endif
23 23
24int main(void) 24int main(void)
@@ -156,7 +156,6 @@ int main(void)
156 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock)); 156 DEFINE(__LC_INT_CLOCK, offsetof(struct _lowcore, int_clock));
157 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock)); 157 DEFINE(__LC_MCCK_CLOCK, offsetof(struct _lowcore, mcck_clock));
158 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags)); 158 DEFINE(__LC_MACHINE_FLAGS, offsetof(struct _lowcore, machine_flags));
159 DEFINE(__LC_FTRACE_FUNC, offsetof(struct _lowcore, ftrace_func));
160 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib)); 159 DEFINE(__LC_DUMP_REIPL, offsetof(struct _lowcore, ipib));
161 BLANK(); 160 BLANK();
162 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area)); 161 DEFINE(__LC_CPU_TIMER_SAVE_AREA, offsetof(struct _lowcore, cpu_timer_save_area));
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 009f5eb11125..34d5fa7b01b5 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -434,7 +434,7 @@ static int setup_frame32(struct ksignal *ksig, sigset_t *set,
434 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE; 434 ksig->ka.sa.sa_restorer | PSW32_ADDR_AMODE;
435 } else { 435 } else {
436 /* Signal frames without vectors registers are short ! */ 436 /* Signal frames without vectors registers are short ! */
437 __u16 __user *svc = (void *) frame + frame_size - 2; 437 __u16 __user *svc = (void __user *) frame + frame_size - 2;
438 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) 438 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
439 return -EFAULT; 439 return -EFAULT;
440 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE; 440 restorer = (unsigned long __force) svc | PSW32_ADDR_AMODE;
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index c4f7a3d655b8..d7fa2f0f1425 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -218,3 +218,5 @@ COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char
218COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) 218COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
219COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) 219COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
220COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size); 220COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
221COMPAT_SYSCALL_WRAP3(s390_pci_mmio_write, const unsigned long, mmio_addr, const void __user *, user_buffer, const size_t, length);
222COMPAT_SYSCALL_WRAP3(s390_pci_mmio_read, const unsigned long, mmio_addr, void __user *, user_buffer, const size_t, length);
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index ee8390da6ea7..c1f21aca76e7 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -1019,7 +1019,7 @@ debug_count_numargs(char *string)
1019 */ 1019 */
1020 1020
1021debug_entry_t* 1021debug_entry_t*
1022debug_sprintf_event(debug_info_t* id, int level,char *string,...) 1022__debug_sprintf_event(debug_info_t *id, int level, char *string, ...)
1023{ 1023{
1024 va_list ap; 1024 va_list ap;
1025 int numargs,idx; 1025 int numargs,idx;
@@ -1027,8 +1027,6 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
1027 debug_sprintf_entry_t *curr_event; 1027 debug_sprintf_entry_t *curr_event;
1028 debug_entry_t *active; 1028 debug_entry_t *active;
1029 1029
1030 if((!id) || (level > id->level))
1031 return NULL;
1032 if (!debug_active || !id->areas) 1030 if (!debug_active || !id->areas)
1033 return NULL; 1031 return NULL;
1034 numargs=debug_count_numargs(string); 1032 numargs=debug_count_numargs(string);
@@ -1050,14 +1048,14 @@ debug_sprintf_event(debug_info_t* id, int level,char *string,...)
1050 1048
1051 return active; 1049 return active;
1052} 1050}
1053EXPORT_SYMBOL(debug_sprintf_event); 1051EXPORT_SYMBOL(__debug_sprintf_event);
1054 1052
1055/* 1053/*
1056 * debug_sprintf_exception: 1054 * debug_sprintf_exception:
1057 */ 1055 */
1058 1056
1059debug_entry_t* 1057debug_entry_t*
1060debug_sprintf_exception(debug_info_t* id, int level,char *string,...) 1058__debug_sprintf_exception(debug_info_t *id, int level, char *string, ...)
1061{ 1059{
1062 va_list ap; 1060 va_list ap;
1063 int numargs,idx; 1061 int numargs,idx;
@@ -1065,8 +1063,6 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
1065 debug_sprintf_entry_t *curr_event; 1063 debug_sprintf_entry_t *curr_event;
1066 debug_entry_t *active; 1064 debug_entry_t *active;
1067 1065
1068 if((!id) || (level > id->level))
1069 return NULL;
1070 if (!debug_active || !id->areas) 1066 if (!debug_active || !id->areas)
1071 return NULL; 1067 return NULL;
1072 1068
@@ -1089,7 +1085,7 @@ debug_sprintf_exception(debug_info_t* id, int level,char *string,...)
1089 1085
1090 return active; 1086 return active;
1091} 1087}
1092EXPORT_SYMBOL(debug_sprintf_exception); 1088EXPORT_SYMBOL(__debug_sprintf_exception);
1093 1089
1094/* 1090/*
1095 * debug_register_view: 1091 * debug_register_view:
diff --git a/arch/s390/kernel/dumpstack.c b/arch/s390/kernel/dumpstack.c
index acb412442e5e..a99852e96a77 100644
--- a/arch/s390/kernel/dumpstack.c
+++ b/arch/s390/kernel/dumpstack.c
@@ -191,7 +191,8 @@ void die(struct pt_regs *regs, const char *str)
191 console_verbose(); 191 console_verbose();
192 spin_lock_irq(&die_lock); 192 spin_lock_irq(&die_lock);
193 bust_spinlocks(1); 193 bust_spinlocks(1);
194 printk("%s: %04x [#%d] ", str, regs->int_code & 0xffff, ++die_counter); 194 printk("%s: %04x ilc:%d [#%d] ", str, regs->int_code & 0xffff,
195 regs->int_code >> 17, ++die_counter);
195#ifdef CONFIG_PREEMPT 196#ifdef CONFIG_PREEMPT
196 printk("PREEMPT "); 197 printk("PREEMPT ");
197#endif 198#endif
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index cef2879edff3..302ac1f7f8e7 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -12,7 +12,6 @@
12#include <linux/errno.h> 12#include <linux/errno.h>
13#include <linux/string.h> 13#include <linux/string.h>
14#include <linux/ctype.h> 14#include <linux/ctype.h>
15#include <linux/ftrace.h>
16#include <linux/lockdep.h> 15#include <linux/lockdep.h>
17#include <linux/module.h> 16#include <linux/module.h>
18#include <linux/pfn.h> 17#include <linux/pfn.h>
@@ -490,8 +489,5 @@ void __init startup_init(void)
490 detect_machine_facilities(); 489 detect_machine_facilities();
491 setup_topology(); 490 setup_topology();
492 sclp_early_detect(); 491 sclp_early_detect();
493#ifdef CONFIG_DYNAMIC_FTRACE
494 S390_lowcore.ftrace_func = (unsigned long)ftrace_caller;
495#endif
496 lockdep_on(); 492 lockdep_on();
497} 493}
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 70203265196f..398329b2b518 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -53,7 +53,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
53 .macro TRACE_IRQS_ON 53 .macro TRACE_IRQS_ON
54#ifdef CONFIG_TRACE_IRQFLAGS 54#ifdef CONFIG_TRACE_IRQFLAGS
55 basr %r2,%r0 55 basr %r2,%r0
56 l %r1,BASED(.Lhardirqs_on) 56 l %r1,BASED(.Lc_hardirqs_on)
57 basr %r14,%r1 # call trace_hardirqs_on_caller 57 basr %r14,%r1 # call trace_hardirqs_on_caller
58#endif 58#endif
59 .endm 59 .endm
@@ -61,7 +61,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
61 .macro TRACE_IRQS_OFF 61 .macro TRACE_IRQS_OFF
62#ifdef CONFIG_TRACE_IRQFLAGS 62#ifdef CONFIG_TRACE_IRQFLAGS
63 basr %r2,%r0 63 basr %r2,%r0
64 l %r1,BASED(.Lhardirqs_off) 64 l %r1,BASED(.Lc_hardirqs_off)
65 basr %r14,%r1 # call trace_hardirqs_off_caller 65 basr %r14,%r1 # call trace_hardirqs_off_caller
66#endif 66#endif
67 .endm 67 .endm
@@ -70,7 +70,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
70#ifdef CONFIG_LOCKDEP 70#ifdef CONFIG_LOCKDEP
71 tm __PT_PSW+1(%r11),0x01 # returning to user ? 71 tm __PT_PSW+1(%r11),0x01 # returning to user ?
72 jz .+10 72 jz .+10
73 l %r1,BASED(.Llockdep_sys_exit) 73 l %r1,BASED(.Lc_lockdep_sys_exit)
74 basr %r14,%r1 # call lockdep_sys_exit 74 basr %r14,%r1 # call lockdep_sys_exit
75#endif 75#endif
76 .endm 76 .endm
@@ -87,8 +87,8 @@ _PIF_WORK = (_PIF_PER_TRAP)
87 tmh %r8,0x0001 # interrupting from user ? 87 tmh %r8,0x0001 # interrupting from user ?
88 jnz 1f 88 jnz 1f
89 lr %r14,%r9 89 lr %r14,%r9
90 sl %r14,BASED(.Lcritical_start) 90 sl %r14,BASED(.Lc_critical_start)
91 cl %r14,BASED(.Lcritical_length) 91 cl %r14,BASED(.Lc_critical_length)
92 jhe 0f 92 jhe 0f
93 la %r11,\savearea # inside critical section, do cleanup 93 la %r11,\savearea # inside critical section, do cleanup
94 bras %r14,cleanup_critical 94 bras %r14,cleanup_critical
@@ -162,7 +162,7 @@ ENTRY(__switch_to)
162 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 162 lm %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
163 br %r14 163 br %r14
164 164
165__critical_start: 165.L__critical_start:
166/* 166/*
167 * SVC interrupt handler routine. System calls are synchronous events and 167 * SVC interrupt handler routine. System calls are synchronous events and
168 * are executed with interrupts enabled. 168 * are executed with interrupts enabled.
@@ -170,145 +170,145 @@ __critical_start:
170 170
171ENTRY(system_call) 171ENTRY(system_call)
172 stpt __LC_SYNC_ENTER_TIMER 172 stpt __LC_SYNC_ENTER_TIMER
173sysc_stm: 173.Lsysc_stm:
174 stm %r8,%r15,__LC_SAVE_AREA_SYNC 174 stm %r8,%r15,__LC_SAVE_AREA_SYNC
175 l %r12,__LC_THREAD_INFO 175 l %r12,__LC_THREAD_INFO
176 l %r13,__LC_SVC_NEW_PSW+4 176 l %r13,__LC_SVC_NEW_PSW+4
177 lhi %r14,_PIF_SYSCALL 177 lhi %r14,_PIF_SYSCALL
178sysc_per: 178.Lsysc_per:
179 l %r15,__LC_KERNEL_STACK 179 l %r15,__LC_KERNEL_STACK
180 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 180 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
181sysc_vtime: 181.Lsysc_vtime:
182 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER 182 UPDATE_VTIME %r8,%r9,__LC_SYNC_ENTER_TIMER
183 stm %r0,%r7,__PT_R0(%r11) 183 stm %r0,%r7,__PT_R0(%r11)
184 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC 184 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_SYNC
185 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW 185 mvc __PT_PSW(8,%r11),__LC_SVC_OLD_PSW
186 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 186 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
187 st %r14,__PT_FLAGS(%r11) 187 st %r14,__PT_FLAGS(%r11)
188sysc_do_svc: 188.Lsysc_do_svc:
189 l %r10,__TI_sysc_table(%r12) # 31 bit system call table 189 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
190 lh %r8,__PT_INT_CODE+2(%r11) 190 lh %r8,__PT_INT_CODE+2(%r11)
191 sla %r8,2 # shift and test for svc0 191 sla %r8,2 # shift and test for svc0
192 jnz sysc_nr_ok 192 jnz .Lsysc_nr_ok
193 # svc 0: system call number in %r1 193 # svc 0: system call number in %r1
194 cl %r1,BASED(.Lnr_syscalls) 194 cl %r1,BASED(.Lnr_syscalls)
195 jnl sysc_nr_ok 195 jnl .Lsysc_nr_ok
196 sth %r1,__PT_INT_CODE+2(%r11) 196 sth %r1,__PT_INT_CODE+2(%r11)
197 lr %r8,%r1 197 lr %r8,%r1
198 sla %r8,2 198 sla %r8,2
199sysc_nr_ok: 199.Lsysc_nr_ok:
200 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 200 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
201 st %r2,__PT_ORIG_GPR2(%r11) 201 st %r2,__PT_ORIG_GPR2(%r11)
202 st %r7,STACK_FRAME_OVERHEAD(%r15) 202 st %r7,STACK_FRAME_OVERHEAD(%r15)
203 l %r9,0(%r8,%r10) # get system call addr. 203 l %r9,0(%r8,%r10) # get system call addr.
204 tm __TI_flags+3(%r12),_TIF_TRACE 204 tm __TI_flags+3(%r12),_TIF_TRACE
205 jnz sysc_tracesys 205 jnz .Lsysc_tracesys
206 basr %r14,%r9 # call sys_xxxx 206 basr %r14,%r9 # call sys_xxxx
207 st %r2,__PT_R2(%r11) # store return value 207 st %r2,__PT_R2(%r11) # store return value
208 208
209sysc_return: 209.Lsysc_return:
210 LOCKDEP_SYS_EXIT 210 LOCKDEP_SYS_EXIT
211sysc_tif: 211.Lsysc_tif:
212 tm __PT_PSW+1(%r11),0x01 # returning to user ? 212 tm __PT_PSW+1(%r11),0x01 # returning to user ?
213 jno sysc_restore 213 jno .Lsysc_restore
214 tm __PT_FLAGS+3(%r11),_PIF_WORK 214 tm __PT_FLAGS+3(%r11),_PIF_WORK
215 jnz sysc_work 215 jnz .Lsysc_work
216 tm __TI_flags+3(%r12),_TIF_WORK 216 tm __TI_flags+3(%r12),_TIF_WORK
217 jnz sysc_work # check for thread work 217 jnz .Lsysc_work # check for thread work
218 tm __LC_CPU_FLAGS+3,_CIF_WORK 218 tm __LC_CPU_FLAGS+3,_CIF_WORK
219 jnz sysc_work 219 jnz .Lsysc_work
220sysc_restore: 220.Lsysc_restore:
221 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 221 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
222 stpt __LC_EXIT_TIMER 222 stpt __LC_EXIT_TIMER
223 lm %r0,%r15,__PT_R0(%r11) 223 lm %r0,%r15,__PT_R0(%r11)
224 lpsw __LC_RETURN_PSW 224 lpsw __LC_RETURN_PSW
225sysc_done: 225.Lsysc_done:
226 226
227# 227#
228# One of the work bits is on. Find out which one. 228# One of the work bits is on. Find out which one.
229# 229#
230sysc_work: 230.Lsysc_work:
231 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING 231 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
232 jo sysc_mcck_pending 232 jo .Lsysc_mcck_pending
233 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 233 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
234 jo sysc_reschedule 234 jo .Lsysc_reschedule
235 tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP 235 tm __PT_FLAGS+3(%r11),_PIF_PER_TRAP
236 jo sysc_singlestep 236 jo .Lsysc_singlestep
237 tm __TI_flags+3(%r12),_TIF_SIGPENDING 237 tm __TI_flags+3(%r12),_TIF_SIGPENDING
238 jo sysc_sigpending 238 jo .Lsysc_sigpending
239 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 239 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
240 jo sysc_notify_resume 240 jo .Lsysc_notify_resume
241 tm __LC_CPU_FLAGS+3,_CIF_ASCE 241 tm __LC_CPU_FLAGS+3,_CIF_ASCE
242 jo sysc_uaccess 242 jo .Lsysc_uaccess
243 j sysc_return # beware of critical section cleanup 243 j .Lsysc_return # beware of critical section cleanup
244 244
245# 245#
246# _TIF_NEED_RESCHED is set, call schedule 246# _TIF_NEED_RESCHED is set, call schedule
247# 247#
248sysc_reschedule: 248.Lsysc_reschedule:
249 l %r1,BASED(.Lschedule) 249 l %r1,BASED(.Lc_schedule)
250 la %r14,BASED(sysc_return) 250 la %r14,BASED(.Lsysc_return)
251 br %r1 # call schedule 251 br %r1 # call schedule
252 252
253# 253#
254# _CIF_MCCK_PENDING is set, call handler 254# _CIF_MCCK_PENDING is set, call handler
255# 255#
256sysc_mcck_pending: 256.Lsysc_mcck_pending:
257 l %r1,BASED(.Lhandle_mcck) 257 l %r1,BASED(.Lc_handle_mcck)
258 la %r14,BASED(sysc_return) 258 la %r14,BASED(.Lsysc_return)
259 br %r1 # TIF bit will be cleared by handler 259 br %r1 # TIF bit will be cleared by handler
260 260
261# 261#
262# _CIF_ASCE is set, load user space asce 262# _CIF_ASCE is set, load user space asce
263# 263#
264sysc_uaccess: 264.Lsysc_uaccess:
265 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE 265 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
266 lctl %c1,%c1,__LC_USER_ASCE # load primary asce 266 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
267 j sysc_return 267 j .Lsysc_return
268 268
269# 269#
270# _TIF_SIGPENDING is set, call do_signal 270# _TIF_SIGPENDING is set, call do_signal
271# 271#
272sysc_sigpending: 272.Lsysc_sigpending:
273 lr %r2,%r11 # pass pointer to pt_regs 273 lr %r2,%r11 # pass pointer to pt_regs
274 l %r1,BASED(.Ldo_signal) 274 l %r1,BASED(.Lc_do_signal)
275 basr %r14,%r1 # call do_signal 275 basr %r14,%r1 # call do_signal
276 tm __PT_FLAGS+3(%r11),_PIF_SYSCALL 276 tm __PT_FLAGS+3(%r11),_PIF_SYSCALL
277 jno sysc_return 277 jno .Lsysc_return
278 lm %r2,%r7,__PT_R2(%r11) # load svc arguments 278 lm %r2,%r7,__PT_R2(%r11) # load svc arguments
279 l %r10,__TI_sysc_table(%r12) # 31 bit system call table 279 l %r10,__TI_sysc_table(%r12) # 31 bit system call table
280 xr %r8,%r8 # svc 0 returns -ENOSYS 280 xr %r8,%r8 # svc 0 returns -ENOSYS
281 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2) 281 clc __PT_INT_CODE+2(2,%r11),BASED(.Lnr_syscalls+2)
282 jnl sysc_nr_ok # invalid svc number -> do svc 0 282 jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
283 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number 283 lh %r8,__PT_INT_CODE+2(%r11) # load new svc number
284 sla %r8,2 284 sla %r8,2
285 j sysc_nr_ok # restart svc 285 j .Lsysc_nr_ok # restart svc
286 286
287# 287#
288# _TIF_NOTIFY_RESUME is set, call do_notify_resume 288# _TIF_NOTIFY_RESUME is set, call do_notify_resume
289# 289#
290sysc_notify_resume: 290.Lsysc_notify_resume:
291 lr %r2,%r11 # pass pointer to pt_regs 291 lr %r2,%r11 # pass pointer to pt_regs
292 l %r1,BASED(.Ldo_notify_resume) 292 l %r1,BASED(.Lc_do_notify_resume)
293 la %r14,BASED(sysc_return) 293 la %r14,BASED(.Lsysc_return)
294 br %r1 # call do_notify_resume 294 br %r1 # call do_notify_resume
295 295
296# 296#
297# _PIF_PER_TRAP is set, call do_per_trap 297# _PIF_PER_TRAP is set, call do_per_trap
298# 298#
299sysc_singlestep: 299.Lsysc_singlestep:
300 ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP 300 ni __PT_FLAGS+3(%r11),255-_PIF_PER_TRAP
301 lr %r2,%r11 # pass pointer to pt_regs 301 lr %r2,%r11 # pass pointer to pt_regs
302 l %r1,BASED(.Ldo_per_trap) 302 l %r1,BASED(.Lc_do_per_trap)
303 la %r14,BASED(sysc_return) 303 la %r14,BASED(.Lsysc_return)
304 br %r1 # call do_per_trap 304 br %r1 # call do_per_trap
305 305
306# 306#
307# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 307# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
308# and after the system call 308# and after the system call
309# 309#
310sysc_tracesys: 310.Lsysc_tracesys:
311 l %r1,BASED(.Ltrace_enter) 311 l %r1,BASED(.Lc_trace_enter)
312 lr %r2,%r11 # pass pointer to pt_regs 312 lr %r2,%r11 # pass pointer to pt_regs
313 la %r3,0 313 la %r3,0
314 xr %r0,%r0 314 xr %r0,%r0
@@ -316,22 +316,22 @@ sysc_tracesys:
316 st %r0,__PT_R2(%r11) 316 st %r0,__PT_R2(%r11)
317 basr %r14,%r1 # call do_syscall_trace_enter 317 basr %r14,%r1 # call do_syscall_trace_enter
318 cl %r2,BASED(.Lnr_syscalls) 318 cl %r2,BASED(.Lnr_syscalls)
319 jnl sysc_tracenogo 319 jnl .Lsysc_tracenogo
320 lr %r8,%r2 320 lr %r8,%r2
321 sll %r8,2 321 sll %r8,2
322 l %r9,0(%r8,%r10) 322 l %r9,0(%r8,%r10)
323sysc_tracego: 323.Lsysc_tracego:
324 lm %r3,%r7,__PT_R3(%r11) 324 lm %r3,%r7,__PT_R3(%r11)
325 st %r7,STACK_FRAME_OVERHEAD(%r15) 325 st %r7,STACK_FRAME_OVERHEAD(%r15)
326 l %r2,__PT_ORIG_GPR2(%r11) 326 l %r2,__PT_ORIG_GPR2(%r11)
327 basr %r14,%r9 # call sys_xxx 327 basr %r14,%r9 # call sys_xxx
328 st %r2,__PT_R2(%r11) # store return value 328 st %r2,__PT_R2(%r11) # store return value
329sysc_tracenogo: 329.Lsysc_tracenogo:
330 tm __TI_flags+3(%r12),_TIF_TRACE 330 tm __TI_flags+3(%r12),_TIF_TRACE
331 jz sysc_return 331 jz .Lsysc_return
332 l %r1,BASED(.Ltrace_exit) 332 l %r1,BASED(.Lc_trace_exit)
333 lr %r2,%r11 # pass pointer to pt_regs 333 lr %r2,%r11 # pass pointer to pt_regs
334 la %r14,BASED(sysc_return) 334 la %r14,BASED(.Lsysc_return)
335 br %r1 # call do_syscall_trace_exit 335 br %r1 # call do_syscall_trace_exit
336 336
337# 337#
@@ -341,18 +341,18 @@ ENTRY(ret_from_fork)
341 la %r11,STACK_FRAME_OVERHEAD(%r15) 341 la %r11,STACK_FRAME_OVERHEAD(%r15)
342 l %r12,__LC_THREAD_INFO 342 l %r12,__LC_THREAD_INFO
343 l %r13,__LC_SVC_NEW_PSW+4 343 l %r13,__LC_SVC_NEW_PSW+4
344 l %r1,BASED(.Lschedule_tail) 344 l %r1,BASED(.Lc_schedule_tail)
345 basr %r14,%r1 # call schedule_tail 345 basr %r14,%r1 # call schedule_tail
346 TRACE_IRQS_ON 346 TRACE_IRQS_ON
347 ssm __LC_SVC_NEW_PSW # reenable interrupts 347 ssm __LC_SVC_NEW_PSW # reenable interrupts
348 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 348 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
349 jne sysc_tracenogo 349 jne .Lsysc_tracenogo
350 # it's a kernel thread 350 # it's a kernel thread
351 lm %r9,%r10,__PT_R9(%r11) # load gprs 351 lm %r9,%r10,__PT_R9(%r11) # load gprs
352ENTRY(kernel_thread_starter) 352ENTRY(kernel_thread_starter)
353 la %r2,0(%r10) 353 la %r2,0(%r10)
354 basr %r14,%r9 354 basr %r14,%r9
355 j sysc_tracenogo 355 j .Lsysc_tracenogo
356 356
357/* 357/*
358 * Program check handler routine 358 * Program check handler routine
@@ -369,7 +369,7 @@ ENTRY(pgm_check_handler)
369 tmh %r8,0x4000 # PER bit set in old PSW ? 369 tmh %r8,0x4000 # PER bit set in old PSW ?
370 jnz 0f # -> enabled, can't be a double fault 370 jnz 0f # -> enabled, can't be a double fault
371 tm __LC_PGM_ILC+3,0x80 # check for per exception 371 tm __LC_PGM_ILC+3,0x80 # check for per exception
372 jnz pgm_svcper # -> single stepped svc 372 jnz .Lpgm_svcper # -> single stepped svc
3730: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 3730: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
374 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 374 ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
375 j 2f 375 j 2f
@@ -386,42 +386,42 @@ ENTRY(pgm_check_handler)
386 jz 0f 386 jz 0f
387 l %r1,__TI_task(%r12) 387 l %r1,__TI_task(%r12)
388 tmh %r8,0x0001 # kernel per event ? 388 tmh %r8,0x0001 # kernel per event ?
389 jz pgm_kprobe 389 jz .Lpgm_kprobe
390 oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP 390 oi __PT_FLAGS+3(%r11),_PIF_PER_TRAP
391 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS 391 mvc __THREAD_per_address(4,%r1),__LC_PER_ADDRESS
392 mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE 392 mvc __THREAD_per_cause(2,%r1),__LC_PER_CODE
393 mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID 393 mvc __THREAD_per_paid(1,%r1),__LC_PER_ACCESS_ID
3940: REENABLE_IRQS 3940: REENABLE_IRQS
395 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 395 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
396 l %r1,BASED(.Ljump_table) 396 l %r1,BASED(.Lc_jump_table)
397 la %r10,0x7f 397 la %r10,0x7f
398 n %r10,__PT_INT_CODE(%r11) 398 n %r10,__PT_INT_CODE(%r11)
399 je sysc_return 399 je .Lsysc_return
400 sll %r10,2 400 sll %r10,2
401 l %r1,0(%r10,%r1) # load address of handler routine 401 l %r1,0(%r10,%r1) # load address of handler routine
402 lr %r2,%r11 # pass pointer to pt_regs 402 lr %r2,%r11 # pass pointer to pt_regs
403 basr %r14,%r1 # branch to interrupt-handler 403 basr %r14,%r1 # branch to interrupt-handler
404 j sysc_return 404 j .Lsysc_return
405 405
406# 406#
407# PER event in supervisor state, must be kprobes 407# PER event in supervisor state, must be kprobes
408# 408#
409pgm_kprobe: 409.Lpgm_kprobe:
410 REENABLE_IRQS 410 REENABLE_IRQS
411 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 411 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
412 l %r1,BASED(.Ldo_per_trap) 412 l %r1,BASED(.Lc_do_per_trap)
413 lr %r2,%r11 # pass pointer to pt_regs 413 lr %r2,%r11 # pass pointer to pt_regs
414 basr %r14,%r1 # call do_per_trap 414 basr %r14,%r1 # call do_per_trap
415 j sysc_return 415 j .Lsysc_return
416 416
417# 417#
418# single stepped system call 418# single stepped system call
419# 419#
420pgm_svcper: 420.Lpgm_svcper:
421 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW 421 mvc __LC_RETURN_PSW(4),__LC_SVC_NEW_PSW
422 mvc __LC_RETURN_PSW+4(4),BASED(.Lsysc_per) 422 mvc __LC_RETURN_PSW+4(4),BASED(.Lc_sysc_per)
423 lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 423 lhi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
424 lpsw __LC_RETURN_PSW # branch to sysc_per and enable irqs 424 lpsw __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
425 425
426/* 426/*
427 * IO interrupt handler routine 427 * IO interrupt handler routine
@@ -435,9 +435,9 @@ ENTRY(io_int_handler)
435 l %r13,__LC_SVC_NEW_PSW+4 435 l %r13,__LC_SVC_NEW_PSW+4
436 lm %r8,%r9,__LC_IO_OLD_PSW 436 lm %r8,%r9,__LC_IO_OLD_PSW
437 tmh %r8,0x0001 # interrupting from user ? 437 tmh %r8,0x0001 # interrupting from user ?
438 jz io_skip 438 jz .Lio_skip
439 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 439 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
440io_skip: 440.Lio_skip:
441 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 441 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
442 stm %r0,%r7,__PT_R0(%r11) 442 stm %r0,%r7,__PT_R0(%r11)
443 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 443 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
@@ -446,35 +446,35 @@ io_skip:
446 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 446 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
447 TRACE_IRQS_OFF 447 TRACE_IRQS_OFF
448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 448 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
449io_loop: 449.Lio_loop:
450 l %r1,BASED(.Ldo_IRQ) 450 l %r1,BASED(.Lc_do_IRQ)
451 lr %r2,%r11 # pass pointer to pt_regs 451 lr %r2,%r11 # pass pointer to pt_regs
452 lhi %r3,IO_INTERRUPT 452 lhi %r3,IO_INTERRUPT
453 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 453 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
454 jz io_call 454 jz .Lio_call
455 lhi %r3,THIN_INTERRUPT 455 lhi %r3,THIN_INTERRUPT
456io_call: 456.Lio_call:
457 basr %r14,%r1 # call do_IRQ 457 basr %r14,%r1 # call do_IRQ
458 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR 458 tm __LC_MACHINE_FLAGS+2,0x10 # MACHINE_FLAG_LPAR
459 jz io_return 459 jz .Lio_return
460 tpi 0 460 tpi 0
461 jz io_return 461 jz .Lio_return
462 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 462 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
463 j io_loop 463 j .Lio_loop
464io_return: 464.Lio_return:
465 LOCKDEP_SYS_EXIT 465 LOCKDEP_SYS_EXIT
466 TRACE_IRQS_ON 466 TRACE_IRQS_ON
467io_tif: 467.Lio_tif:
468 tm __TI_flags+3(%r12),_TIF_WORK 468 tm __TI_flags+3(%r12),_TIF_WORK
469 jnz io_work # there is work to do (signals etc.) 469 jnz .Lio_work # there is work to do (signals etc.)
470 tm __LC_CPU_FLAGS+3,_CIF_WORK 470 tm __LC_CPU_FLAGS+3,_CIF_WORK
471 jnz io_work 471 jnz .Lio_work
472io_restore: 472.Lio_restore:
473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11) 473 mvc __LC_RETURN_PSW(8),__PT_PSW(%r11)
474 stpt __LC_EXIT_TIMER 474 stpt __LC_EXIT_TIMER
475 lm %r0,%r15,__PT_R0(%r11) 475 lm %r0,%r15,__PT_R0(%r11)
476 lpsw __LC_RETURN_PSW 476 lpsw __LC_RETURN_PSW
477io_done: 477.Lio_done:
478 478
479# 479#
480# There is work todo, find out in which context we have been interrupted: 480# There is work todo, find out in which context we have been interrupted:
@@ -483,15 +483,15 @@ io_done:
483# the preemption counter and if it is zero call preempt_schedule_irq 483# the preemption counter and if it is zero call preempt_schedule_irq
484# Before any work can be done, a switch to the kernel stack is required. 484# Before any work can be done, a switch to the kernel stack is required.
485# 485#
486io_work: 486.Lio_work:
487 tm __PT_PSW+1(%r11),0x01 # returning to user ? 487 tm __PT_PSW+1(%r11),0x01 # returning to user ?
488 jo io_work_user # yes -> do resched & signal 488 jo .Lio_work_user # yes -> do resched & signal
489#ifdef CONFIG_PREEMPT 489#ifdef CONFIG_PREEMPT
490 # check for preemptive scheduling 490 # check for preemptive scheduling
491 icm %r0,15,__TI_precount(%r12) 491 icm %r0,15,__TI_precount(%r12)
492 jnz io_restore # preemption disabled 492 jnz .Lio_restore # preemption disabled
493 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 493 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
494 jno io_restore 494 jno .Lio_restore
495 # switch to kernel stack 495 # switch to kernel stack
496 l %r1,__PT_R15(%r11) 496 l %r1,__PT_R15(%r11)
497 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 497 ahi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@@ -499,20 +499,20 @@ io_work:
499 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 499 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
500 la %r11,STACK_FRAME_OVERHEAD(%r1) 500 la %r11,STACK_FRAME_OVERHEAD(%r1)
501 lr %r15,%r1 501 lr %r15,%r1
502 # TRACE_IRQS_ON already done at io_return, call 502 # TRACE_IRQS_ON already done at .Lio_return, call
503 # TRACE_IRQS_OFF to keep things symmetrical 503 # TRACE_IRQS_OFF to keep things symmetrical
504 TRACE_IRQS_OFF 504 TRACE_IRQS_OFF
505 l %r1,BASED(.Lpreempt_irq) 505 l %r1,BASED(.Lc_preempt_irq)
506 basr %r14,%r1 # call preempt_schedule_irq 506 basr %r14,%r1 # call preempt_schedule_irq
507 j io_return 507 j .Lio_return
508#else 508#else
509 j io_restore 509 j .Lio_restore
510#endif 510#endif
511 511
512# 512#
513# Need to do work before returning to userspace, switch to kernel stack 513# Need to do work before returning to userspace, switch to kernel stack
514# 514#
515io_work_user: 515.Lio_work_user:
516 l %r1,__LC_KERNEL_STACK 516 l %r1,__LC_KERNEL_STACK
517 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 517 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
518 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 518 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
@@ -522,74 +522,74 @@ io_work_user:
522# 522#
523# One of the work bits is on. Find out which one. 523# One of the work bits is on. Find out which one.
524# 524#
525io_work_tif: 525.Lio_work_tif:
526 tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING 526 tm __LC_CPU_FLAGS+3(%r12),_CIF_MCCK_PENDING
527 jo io_mcck_pending 527 jo .Lio_mcck_pending
528 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED 528 tm __TI_flags+3(%r12),_TIF_NEED_RESCHED
529 jo io_reschedule 529 jo .Lio_reschedule
530 tm __TI_flags+3(%r12),_TIF_SIGPENDING 530 tm __TI_flags+3(%r12),_TIF_SIGPENDING
531 jo io_sigpending 531 jo .Lio_sigpending
532 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME 532 tm __TI_flags+3(%r12),_TIF_NOTIFY_RESUME
533 jo io_notify_resume 533 jo .Lio_notify_resume
534 tm __LC_CPU_FLAGS+3,_CIF_ASCE 534 tm __LC_CPU_FLAGS+3,_CIF_ASCE
535 jo io_uaccess 535 jo .Lio_uaccess
536 j io_return # beware of critical section cleanup 536 j .Lio_return # beware of critical section cleanup
537 537
538# 538#
539# _CIF_MCCK_PENDING is set, call handler 539# _CIF_MCCK_PENDING is set, call handler
540# 540#
541io_mcck_pending: 541.Lio_mcck_pending:
542 # TRACE_IRQS_ON already done at io_return 542 # TRACE_IRQS_ON already done at .Lio_return
543 l %r1,BASED(.Lhandle_mcck) 543 l %r1,BASED(.Lc_handle_mcck)
544 basr %r14,%r1 # TIF bit will be cleared by handler 544 basr %r14,%r1 # TIF bit will be cleared by handler
545 TRACE_IRQS_OFF 545 TRACE_IRQS_OFF
546 j io_return 546 j .Lio_return
547 547
548# 548#
549# _CIF_ASCE is set, load user space asce 549# _CIF_ASCE is set, load user space asce
550# 550#
551io_uaccess: 551.Lio_uaccess:
552 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE 552 ni __LC_CPU_FLAGS+3,255-_CIF_ASCE
553 lctl %c1,%c1,__LC_USER_ASCE # load primary asce 553 lctl %c1,%c1,__LC_USER_ASCE # load primary asce
554 j io_return 554 j .Lio_return
555 555
556# 556#
557# _TIF_NEED_RESCHED is set, call schedule 557# _TIF_NEED_RESCHED is set, call schedule
558# 558#
559io_reschedule: 559.Lio_reschedule:
560 # TRACE_IRQS_ON already done at io_return 560 # TRACE_IRQS_ON already done at .Lio_return
561 l %r1,BASED(.Lschedule) 561 l %r1,BASED(.Lc_schedule)
562 ssm __LC_SVC_NEW_PSW # reenable interrupts 562 ssm __LC_SVC_NEW_PSW # reenable interrupts
563 basr %r14,%r1 # call scheduler 563 basr %r14,%r1 # call scheduler
564 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 564 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
565 TRACE_IRQS_OFF 565 TRACE_IRQS_OFF
566 j io_return 566 j .Lio_return
567 567
568# 568#
569# _TIF_SIGPENDING is set, call do_signal 569# _TIF_SIGPENDING is set, call do_signal
570# 570#
571io_sigpending: 571.Lio_sigpending:
572 # TRACE_IRQS_ON already done at io_return 572 # TRACE_IRQS_ON already done at .Lio_return
573 l %r1,BASED(.Ldo_signal) 573 l %r1,BASED(.Lc_do_signal)
574 ssm __LC_SVC_NEW_PSW # reenable interrupts 574 ssm __LC_SVC_NEW_PSW # reenable interrupts
575 lr %r2,%r11 # pass pointer to pt_regs 575 lr %r2,%r11 # pass pointer to pt_regs
576 basr %r14,%r1 # call do_signal 576 basr %r14,%r1 # call do_signal
577 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 577 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
578 TRACE_IRQS_OFF 578 TRACE_IRQS_OFF
579 j io_return 579 j .Lio_return
580 580
581# 581#
582# _TIF_SIGPENDING is set, call do_signal 582# _TIF_SIGPENDING is set, call do_signal
583# 583#
584io_notify_resume: 584.Lio_notify_resume:
585 # TRACE_IRQS_ON already done at io_return 585 # TRACE_IRQS_ON already done at .Lio_return
586 l %r1,BASED(.Ldo_notify_resume) 586 l %r1,BASED(.Lc_do_notify_resume)
587 ssm __LC_SVC_NEW_PSW # reenable interrupts 587 ssm __LC_SVC_NEW_PSW # reenable interrupts
588 lr %r2,%r11 # pass pointer to pt_regs 588 lr %r2,%r11 # pass pointer to pt_regs
589 basr %r14,%r1 # call do_notify_resume 589 basr %r14,%r1 # call do_notify_resume
590 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 590 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
591 TRACE_IRQS_OFF 591 TRACE_IRQS_OFF
592 j io_return 592 j .Lio_return
593 593
594/* 594/*
595 * External interrupt handler routine 595 * External interrupt handler routine
@@ -603,9 +603,9 @@ ENTRY(ext_int_handler)
603 l %r13,__LC_SVC_NEW_PSW+4 603 l %r13,__LC_SVC_NEW_PSW+4
604 lm %r8,%r9,__LC_EXT_OLD_PSW 604 lm %r8,%r9,__LC_EXT_OLD_PSW
605 tmh %r8,0x0001 # interrupting from user ? 605 tmh %r8,0x0001 # interrupting from user ?
606 jz ext_skip 606 jz .Lext_skip
607 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER 607 UPDATE_VTIME %r14,%r15,__LC_ASYNC_ENTER_TIMER
608ext_skip: 608.Lext_skip:
609 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 609 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
610 stm %r0,%r7,__PT_R0(%r11) 610 stm %r0,%r7,__PT_R0(%r11)
611 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC 611 mvc __PT_R8(32,%r11),__LC_SAVE_AREA_ASYNC
@@ -614,29 +614,29 @@ ext_skip:
614 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS 614 mvc __PT_INT_PARM(4,%r11),__LC_EXT_PARAMS
615 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 615 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
616 TRACE_IRQS_OFF 616 TRACE_IRQS_OFF
617 l %r1,BASED(.Ldo_IRQ) 617 l %r1,BASED(.Lc_do_IRQ)
618 lr %r2,%r11 # pass pointer to pt_regs 618 lr %r2,%r11 # pass pointer to pt_regs
619 lhi %r3,EXT_INTERRUPT 619 lhi %r3,EXT_INTERRUPT
620 basr %r14,%r1 # call do_IRQ 620 basr %r14,%r1 # call do_IRQ
621 j io_return 621 j .Lio_return
622 622
623/* 623/*
624 * Load idle PSW. The second "half" of this function is in cleanup_idle. 624 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
625 */ 625 */
626ENTRY(psw_idle) 626ENTRY(psw_idle)
627 st %r3,__SF_EMPTY(%r15) 627 st %r3,__SF_EMPTY(%r15)
628 basr %r1,0 628 basr %r1,0
629 la %r1,psw_idle_lpsw+4-.(%r1) 629 la %r1,.Lpsw_idle_lpsw+4-.(%r1)
630 st %r1,__SF_EMPTY+4(%r15) 630 st %r1,__SF_EMPTY+4(%r15)
631 oi __SF_EMPTY+4(%r15),0x80 631 oi __SF_EMPTY+4(%r15),0x80
632 stck __CLOCK_IDLE_ENTER(%r2) 632 stck __CLOCK_IDLE_ENTER(%r2)
633 stpt __TIMER_IDLE_ENTER(%r2) 633 stpt __TIMER_IDLE_ENTER(%r2)
634psw_idle_lpsw: 634.Lpsw_idle_lpsw:
635 lpsw __SF_EMPTY(%r15) 635 lpsw __SF_EMPTY(%r15)
636 br %r14 636 br %r14
637psw_idle_end: 637.Lpsw_idle_end:
638 638
639__critical_end: 639.L__critical_end:
640 640
641/* 641/*
642 * Machine check handler routines 642 * Machine check handler routines
@@ -650,7 +650,7 @@ ENTRY(mcck_int_handler)
650 l %r13,__LC_SVC_NEW_PSW+4 650 l %r13,__LC_SVC_NEW_PSW+4
651 lm %r8,%r9,__LC_MCK_OLD_PSW 651 lm %r8,%r9,__LC_MCK_OLD_PSW
652 tm __LC_MCCK_CODE,0x80 # system damage? 652 tm __LC_MCCK_CODE,0x80 # system damage?
653 jo mcck_panic # yes -> rest of mcck code invalid 653 jo .Lmcck_panic # yes -> rest of mcck code invalid
654 la %r14,__LC_CPU_TIMER_SAVE_AREA 654 la %r14,__LC_CPU_TIMER_SAVE_AREA
655 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 655 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
656 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 656 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
@@ -668,22 +668,22 @@ ENTRY(mcck_int_handler)
6682: spt 0(%r14) 6682: spt 0(%r14)
669 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 669 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
6703: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 6703: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
671 jno mcck_panic # no -> skip cleanup critical 671 jno .Lmcck_panic # no -> skip cleanup critical
672 tm %r8,0x0001 # interrupting from user ? 672 tm %r8,0x0001 # interrupting from user ?
673 jz mcck_skip 673 jz .Lmcck_skip
674 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER 674 UPDATE_VTIME %r14,%r15,__LC_MCCK_ENTER_TIMER
675mcck_skip: 675.Lmcck_skip:
676 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT 676 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+32,__LC_PANIC_STACK,PAGE_SHIFT
677 stm %r0,%r7,__PT_R0(%r11) 677 stm %r0,%r7,__PT_R0(%r11)
678 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32 678 mvc __PT_R8(32,%r11),__LC_GPREGS_SAVE_AREA+32
679 stm %r8,%r9,__PT_PSW(%r11) 679 stm %r8,%r9,__PT_PSW(%r11)
680 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11) 680 xc __PT_FLAGS(4,%r11),__PT_FLAGS(%r11)
681 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15) 681 xc __SF_BACKCHAIN(4,%r15),__SF_BACKCHAIN(%r15)
682 l %r1,BASED(.Ldo_machine_check) 682 l %r1,BASED(.Lc_do_machine_check)
683 lr %r2,%r11 # pass pointer to pt_regs 683 lr %r2,%r11 # pass pointer to pt_regs
684 basr %r14,%r1 # call s390_do_machine_check 684 basr %r14,%r1 # call s390_do_machine_check
685 tm __PT_PSW+1(%r11),0x01 # returning to user ? 685 tm __PT_PSW+1(%r11),0x01 # returning to user ?
686 jno mcck_return 686 jno .Lmcck_return
687 l %r1,__LC_KERNEL_STACK # switch to kernel stack 687 l %r1,__LC_KERNEL_STACK # switch to kernel stack
688 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 688 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
689 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1) 689 xc __SF_BACKCHAIN(4,%r1),__SF_BACKCHAIN(%r1)
@@ -691,12 +691,12 @@ mcck_skip:
691 lr %r15,%r1 691 lr %r15,%r1
692 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 692 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
693 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING 693 tm __LC_CPU_FLAGS+3,_CIF_MCCK_PENDING
694 jno mcck_return 694 jno .Lmcck_return
695 TRACE_IRQS_OFF 695 TRACE_IRQS_OFF
696 l %r1,BASED(.Lhandle_mcck) 696 l %r1,BASED(.Lc_handle_mcck)
697 basr %r14,%r1 # call s390_handle_mcck 697 basr %r14,%r1 # call s390_handle_mcck
698 TRACE_IRQS_ON 698 TRACE_IRQS_ON
699mcck_return: 699.Lmcck_return:
700 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW 700 mvc __LC_RETURN_MCCK_PSW(8),__PT_PSW(%r11) # move return PSW
701 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ? 701 tm __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
702 jno 0f 702 jno 0f
@@ -706,15 +706,15 @@ mcck_return:
7060: lm %r0,%r15,__PT_R0(%r11) 7060: lm %r0,%r15,__PT_R0(%r11)
707 lpsw __LC_RETURN_MCCK_PSW 707 lpsw __LC_RETURN_MCCK_PSW
708 708
709mcck_panic: 709.Lmcck_panic:
710 l %r14,__LC_PANIC_STACK 710 l %r14,__LC_PANIC_STACK
711 slr %r14,%r15 711 slr %r14,%r15
712 sra %r14,PAGE_SHIFT 712 sra %r14,PAGE_SHIFT
713 jz 0f 713 jz 0f
714 l %r15,__LC_PANIC_STACK 714 l %r15,__LC_PANIC_STACK
715 j mcck_skip 715 j .Lmcck_skip
7160: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 7160: ahi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
717 j mcck_skip 717 j .Lmcck_skip
718 718
719# 719#
720# PSW restart interrupt handler 720# PSW restart interrupt handler
@@ -764,58 +764,58 @@ stack_overflow:
7641: .long kernel_stack_overflow 7641: .long kernel_stack_overflow
765#endif 765#endif
766 766
767cleanup_table: 767.Lcleanup_table:
768 .long system_call + 0x80000000 768 .long system_call + 0x80000000
769 .long sysc_do_svc + 0x80000000 769 .long .Lsysc_do_svc + 0x80000000
770 .long sysc_tif + 0x80000000 770 .long .Lsysc_tif + 0x80000000
771 .long sysc_restore + 0x80000000 771 .long .Lsysc_restore + 0x80000000
772 .long sysc_done + 0x80000000 772 .long .Lsysc_done + 0x80000000
773 .long io_tif + 0x80000000 773 .long .Lio_tif + 0x80000000
774 .long io_restore + 0x80000000 774 .long .Lio_restore + 0x80000000
775 .long io_done + 0x80000000 775 .long .Lio_done + 0x80000000
776 .long psw_idle + 0x80000000 776 .long psw_idle + 0x80000000
777 .long psw_idle_end + 0x80000000 777 .long .Lpsw_idle_end + 0x80000000
778 778
779cleanup_critical: 779cleanup_critical:
780 cl %r9,BASED(cleanup_table) # system_call 780 cl %r9,BASED(.Lcleanup_table) # system_call
781 jl 0f 781 jl 0f
782 cl %r9,BASED(cleanup_table+4) # sysc_do_svc 782 cl %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc
783 jl cleanup_system_call 783 jl .Lcleanup_system_call
784 cl %r9,BASED(cleanup_table+8) # sysc_tif 784 cl %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif
785 jl 0f 785 jl 0f
786 cl %r9,BASED(cleanup_table+12) # sysc_restore 786 cl %r9,BASED(.Lcleanup_table+12) # .Lsysc_restore
787 jl cleanup_sysc_tif 787 jl .Lcleanup_sysc_tif
788 cl %r9,BASED(cleanup_table+16) # sysc_done 788 cl %r9,BASED(.Lcleanup_table+16) # .Lsysc_done
789 jl cleanup_sysc_restore 789 jl .Lcleanup_sysc_restore
790 cl %r9,BASED(cleanup_table+20) # io_tif 790 cl %r9,BASED(.Lcleanup_table+20) # .Lio_tif
791 jl 0f 791 jl 0f
792 cl %r9,BASED(cleanup_table+24) # io_restore 792 cl %r9,BASED(.Lcleanup_table+24) # .Lio_restore
793 jl cleanup_io_tif 793 jl .Lcleanup_io_tif
794 cl %r9,BASED(cleanup_table+28) # io_done 794 cl %r9,BASED(.Lcleanup_table+28) # .Lio_done
795 jl cleanup_io_restore 795 jl .Lcleanup_io_restore
796 cl %r9,BASED(cleanup_table+32) # psw_idle 796 cl %r9,BASED(.Lcleanup_table+32) # psw_idle
797 jl 0f 797 jl 0f
798 cl %r9,BASED(cleanup_table+36) # psw_idle_end 798 cl %r9,BASED(.Lcleanup_table+36) # .Lpsw_idle_end
799 jl cleanup_idle 799 jl .Lcleanup_idle
8000: br %r14 8000: br %r14
801 801
802cleanup_system_call: 802.Lcleanup_system_call:
803 # check if stpt has been executed 803 # check if stpt has been executed
804 cl %r9,BASED(cleanup_system_call_insn) 804 cl %r9,BASED(.Lcleanup_system_call_insn)
805 jh 0f 805 jh 0f
806 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 806 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
807 chi %r11,__LC_SAVE_AREA_ASYNC 807 chi %r11,__LC_SAVE_AREA_ASYNC
808 je 0f 808 je 0f
809 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 809 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8100: # check if stm has been executed 8100: # check if stm has been executed
811 cl %r9,BASED(cleanup_system_call_insn+4) 811 cl %r9,BASED(.Lcleanup_system_call_insn+4)
812 jh 0f 812 jh 0f
813 mvc __LC_SAVE_AREA_SYNC(32),0(%r11) 813 mvc __LC_SAVE_AREA_SYNC(32),0(%r11)
8140: # set up saved registers r12, and r13 8140: # set up saved registers r12, and r13
815 st %r12,16(%r11) # r12 thread-info pointer 815 st %r12,16(%r11) # r12 thread-info pointer
816 st %r13,20(%r11) # r13 literal-pool pointer 816 st %r13,20(%r11) # r13 literal-pool pointer
817 # check if the user time calculation has been done 817 # check if the user time calculation has been done
818 cl %r9,BASED(cleanup_system_call_insn+8) 818 cl %r9,BASED(.Lcleanup_system_call_insn+8)
819 jh 0f 819 jh 0f
820 l %r10,__LC_EXIT_TIMER 820 l %r10,__LC_EXIT_TIMER
821 l %r15,__LC_EXIT_TIMER+4 821 l %r15,__LC_EXIT_TIMER+4
@@ -824,7 +824,7 @@ cleanup_system_call:
824 st %r10,__LC_USER_TIMER 824 st %r10,__LC_USER_TIMER
825 st %r15,__LC_USER_TIMER+4 825 st %r15,__LC_USER_TIMER+4
8260: # check if the system time calculation has been done 8260: # check if the system time calculation has been done
827 cl %r9,BASED(cleanup_system_call_insn+12) 827 cl %r9,BASED(.Lcleanup_system_call_insn+12)
828 jh 0f 828 jh 0f
829 l %r10,__LC_LAST_UPDATE_TIMER 829 l %r10,__LC_LAST_UPDATE_TIMER
830 l %r15,__LC_LAST_UPDATE_TIMER+4 830 l %r15,__LC_LAST_UPDATE_TIMER+4
@@ -848,20 +848,20 @@ cleanup_system_call:
848 # setup saved register 15 848 # setup saved register 15
849 st %r15,28(%r11) # r15 stack pointer 849 st %r15,28(%r11) # r15 stack pointer
850 # set new psw address and exit 850 # set new psw address and exit
851 l %r9,BASED(cleanup_table+4) # sysc_do_svc + 0x80000000 851 l %r9,BASED(.Lcleanup_table+4) # .Lsysc_do_svc + 0x80000000
852 br %r14 852 br %r14
853cleanup_system_call_insn: 853.Lcleanup_system_call_insn:
854 .long system_call + 0x80000000 854 .long system_call + 0x80000000
855 .long sysc_stm + 0x80000000 855 .long .Lsysc_stm + 0x80000000
856 .long sysc_vtime + 0x80000000 + 36 856 .long .Lsysc_vtime + 0x80000000 + 36
857 .long sysc_vtime + 0x80000000 + 76 857 .long .Lsysc_vtime + 0x80000000 + 76
858 858
859cleanup_sysc_tif: 859.Lcleanup_sysc_tif:
860 l %r9,BASED(cleanup_table+8) # sysc_tif + 0x80000000 860 l %r9,BASED(.Lcleanup_table+8) # .Lsysc_tif + 0x80000000
861 br %r14 861 br %r14
862 862
863cleanup_sysc_restore: 863.Lcleanup_sysc_restore:
864 cl %r9,BASED(cleanup_sysc_restore_insn) 864 cl %r9,BASED(.Lcleanup_sysc_restore_insn)
865 jhe 0f 865 jhe 0f
866 l %r9,12(%r11) # get saved pointer to pt_regs 866 l %r9,12(%r11) # get saved pointer to pt_regs
867 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 867 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
@@ -869,15 +869,15 @@ cleanup_sysc_restore:
869 lm %r0,%r7,__PT_R0(%r9) 869 lm %r0,%r7,__PT_R0(%r9)
8700: lm %r8,%r9,__LC_RETURN_PSW 8700: lm %r8,%r9,__LC_RETURN_PSW
871 br %r14 871 br %r14
872cleanup_sysc_restore_insn: 872.Lcleanup_sysc_restore_insn:
873 .long sysc_done - 4 + 0x80000000 873 .long .Lsysc_done - 4 + 0x80000000
874 874
875cleanup_io_tif: 875.Lcleanup_io_tif:
876 l %r9,BASED(cleanup_table+20) # io_tif + 0x80000000 876 l %r9,BASED(.Lcleanup_table+20) # .Lio_tif + 0x80000000
877 br %r14 877 br %r14
878 878
879cleanup_io_restore: 879.Lcleanup_io_restore:
880 cl %r9,BASED(cleanup_io_restore_insn) 880 cl %r9,BASED(.Lcleanup_io_restore_insn)
881 jhe 0f 881 jhe 0f
882 l %r9,12(%r11) # get saved r11 pointer to pt_regs 882 l %r9,12(%r11) # get saved r11 pointer to pt_regs
883 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9) 883 mvc __LC_RETURN_PSW(8),__PT_PSW(%r9)
@@ -885,10 +885,10 @@ cleanup_io_restore:
885 lm %r0,%r7,__PT_R0(%r9) 885 lm %r0,%r7,__PT_R0(%r9)
8860: lm %r8,%r9,__LC_RETURN_PSW 8860: lm %r8,%r9,__LC_RETURN_PSW
887 br %r14 887 br %r14
888cleanup_io_restore_insn: 888.Lcleanup_io_restore_insn:
889 .long io_done - 4 + 0x80000000 889 .long .Lio_done - 4 + 0x80000000
890 890
891cleanup_idle: 891.Lcleanup_idle:
892 # copy interrupt clock & cpu timer 892 # copy interrupt clock & cpu timer
893 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 893 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
894 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 894 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
@@ -897,7 +897,7 @@ cleanup_idle:
897 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 897 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
898 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 898 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
8990: # check if stck has been executed 8990: # check if stck has been executed
900 cl %r9,BASED(cleanup_idle_insn) 900 cl %r9,BASED(.Lcleanup_idle_insn)
901 jhe 1f 901 jhe 1f
902 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 902 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
903 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3) 903 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r3)
@@ -913,12 +913,12 @@ cleanup_idle:
913 stm %r9,%r10,__LC_SYSTEM_TIMER 913 stm %r9,%r10,__LC_SYSTEM_TIMER
914 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2) 914 mvc __LC_LAST_UPDATE_TIMER(8),__TIMER_IDLE_EXIT(%r2)
915 # prepare return psw 915 # prepare return psw
916 n %r8,BASED(cleanup_idle_wait) # clear irq & wait state bits 916 n %r8,BASED(.Lcleanup_idle_wait) # clear irq & wait state bits
917 l %r9,24(%r11) # return from psw_idle 917 l %r9,24(%r11) # return from psw_idle
918 br %r14 918 br %r14
919cleanup_idle_insn: 919.Lcleanup_idle_insn:
920 .long psw_idle_lpsw + 0x80000000 920 .long .Lpsw_idle_lpsw + 0x80000000
921cleanup_idle_wait: 921.Lcleanup_idle_wait:
922 .long 0xfcfdffff 922 .long 0xfcfdffff
923 923
924/* 924/*
@@ -933,30 +933,30 @@ cleanup_idle_wait:
933/* 933/*
934 * Symbol constants 934 * Symbol constants
935 */ 935 */
936.Ldo_machine_check: .long s390_do_machine_check 936.Lc_do_machine_check: .long s390_do_machine_check
937.Lhandle_mcck: .long s390_handle_mcck 937.Lc_handle_mcck: .long s390_handle_mcck
938.Ldo_IRQ: .long do_IRQ 938.Lc_do_IRQ: .long do_IRQ
939.Ldo_signal: .long do_signal 939.Lc_do_signal: .long do_signal
940.Ldo_notify_resume: .long do_notify_resume 940.Lc_do_notify_resume: .long do_notify_resume
941.Ldo_per_trap: .long do_per_trap 941.Lc_do_per_trap: .long do_per_trap
942.Ljump_table: .long pgm_check_table 942.Lc_jump_table: .long pgm_check_table
943.Lschedule: .long schedule 943.Lc_schedule: .long schedule
944#ifdef CONFIG_PREEMPT 944#ifdef CONFIG_PREEMPT
945.Lpreempt_irq: .long preempt_schedule_irq 945.Lc_preempt_irq: .long preempt_schedule_irq
946#endif 946#endif
947.Ltrace_enter: .long do_syscall_trace_enter 947.Lc_trace_enter: .long do_syscall_trace_enter
948.Ltrace_exit: .long do_syscall_trace_exit 948.Lc_trace_exit: .long do_syscall_trace_exit
949.Lschedule_tail: .long schedule_tail 949.Lc_schedule_tail: .long schedule_tail
950.Lsysc_per: .long sysc_per + 0x80000000 950.Lc_sysc_per: .long .Lsysc_per + 0x80000000
951#ifdef CONFIG_TRACE_IRQFLAGS 951#ifdef CONFIG_TRACE_IRQFLAGS
952.Lhardirqs_on: .long trace_hardirqs_on_caller 952.Lc_hardirqs_on: .long trace_hardirqs_on_caller
953.Lhardirqs_off: .long trace_hardirqs_off_caller 953.Lc_hardirqs_off: .long trace_hardirqs_off_caller
954#endif 954#endif
955#ifdef CONFIG_LOCKDEP 955#ifdef CONFIG_LOCKDEP
956.Llockdep_sys_exit: .long lockdep_sys_exit 956.Lc_lockdep_sys_exit: .long lockdep_sys_exit
957#endif 957#endif
958.Lcritical_start: .long __critical_start + 0x80000000 958.Lc_critical_start: .long .L__critical_start + 0x80000000
959.Lcritical_length: .long __critical_end - __critical_start 959.Lc_critical_length: .long .L__critical_end - .L__critical_start
960 960
961 .section .rodata, "a" 961 .section .rodata, "a"
962#define SYSCALL(esa,esame,emu) .long esa 962#define SYSCALL(esa,esame,emu) .long esa
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 0554b9771c9f..8e61393c8275 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -74,4 +74,6 @@ struct old_sigaction;
74long sys_s390_personality(unsigned int personality); 74long sys_s390_personality(unsigned int personality);
75long sys_s390_runtime_instr(int command, int signum); 75long sys_s390_runtime_instr(int command, int signum);
76 76
77long sys_s390_pci_mmio_write(unsigned long, const void __user *, size_t);
78long sys_s390_pci_mmio_read(unsigned long, void __user *, size_t);
77#endif /* _ENTRY_H */ 79#endif /* _ENTRY_H */
diff --git a/arch/s390/kernel/entry64.S b/arch/s390/kernel/entry64.S
index 7b2e03afd017..c329446a951d 100644
--- a/arch/s390/kernel/entry64.S
+++ b/arch/s390/kernel/entry64.S
@@ -91,7 +91,7 @@ _PIF_WORK = (_PIF_PER_TRAP)
91 .if \reason==1 91 .if \reason==1
92 # Some program interrupts are suppressing (e.g. protection). 92 # Some program interrupts are suppressing (e.g. protection).
93 # We must also check the instruction after SIE in that case. 93 # We must also check the instruction after SIE in that case.
94 # do_protection_exception will rewind to rewind_pad 94 # do_protection_exception will rewind to .Lrewind_pad
95 jh .+42 95 jh .+42
96 .else 96 .else
97 jhe .+42 97 jhe .+42
@@ -192,7 +192,7 @@ ENTRY(__switch_to)
192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
193 br %r14 193 br %r14
194 194
195__critical_start: 195.L__critical_start:
196/* 196/*
197 * SVC interrupt handler routine. System calls are synchronous events and 197 * SVC interrupt handler routine. System calls are synchronous events and
198 * are executed with interrupts enabled. 198 * are executed with interrupts enabled.
@@ -200,15 +200,15 @@ __critical_start:
200 200
201ENTRY(system_call) 201ENTRY(system_call)
202 stpt __LC_SYNC_ENTER_TIMER 202 stpt __LC_SYNC_ENTER_TIMER
203sysc_stmg: 203.Lsysc_stmg:
204 stmg %r8,%r15,__LC_SAVE_AREA_SYNC 204 stmg %r8,%r15,__LC_SAVE_AREA_SYNC
205 lg %r10,__LC_LAST_BREAK 205 lg %r10,__LC_LAST_BREAK
206 lg %r12,__LC_THREAD_INFO 206 lg %r12,__LC_THREAD_INFO
207 lghi %r14,_PIF_SYSCALL 207 lghi %r14,_PIF_SYSCALL
208sysc_per: 208.Lsysc_per:
209 lg %r15,__LC_KERNEL_STACK 209 lg %r15,__LC_KERNEL_STACK
210 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs 210 la %r11,STACK_FRAME_OVERHEAD(%r15) # pointer to pt_regs
211sysc_vtime: 211.Lsysc_vtime:
212 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER 212 UPDATE_VTIME %r13,__LC_SYNC_ENTER_TIMER
213 LAST_BREAK %r13 213 LAST_BREAK %r13
214 stmg %r0,%r7,__PT_R0(%r11) 214 stmg %r0,%r7,__PT_R0(%r11)
@@ -216,39 +216,39 @@ sysc_vtime:
216 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW 216 mvc __PT_PSW(16,%r11),__LC_SVC_OLD_PSW
217 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC 217 mvc __PT_INT_CODE(4,%r11),__LC_SVC_ILC
218 stg %r14,__PT_FLAGS(%r11) 218 stg %r14,__PT_FLAGS(%r11)
219sysc_do_svc: 219.Lsysc_do_svc:
220 lg %r10,__TI_sysc_table(%r12) # address of system call table 220 lg %r10,__TI_sysc_table(%r12) # address of system call table
221 llgh %r8,__PT_INT_CODE+2(%r11) 221 llgh %r8,__PT_INT_CODE+2(%r11)
222 slag %r8,%r8,2 # shift and test for svc 0 222 slag %r8,%r8,2 # shift and test for svc 0
223 jnz sysc_nr_ok 223 jnz .Lsysc_nr_ok
224 # svc 0: system call number in %r1 224 # svc 0: system call number in %r1
225 llgfr %r1,%r1 # clear high word in r1 225 llgfr %r1,%r1 # clear high word in r1
226 cghi %r1,NR_syscalls 226 cghi %r1,NR_syscalls
227 jnl sysc_nr_ok 227 jnl .Lsysc_nr_ok
228 sth %r1,__PT_INT_CODE+2(%r11) 228 sth %r1,__PT_INT_CODE+2(%r11)
229 slag %r8,%r1,2 229 slag %r8,%r1,2
230sysc_nr_ok: 230.Lsysc_nr_ok:
231 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 231 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
232 stg %r2,__PT_ORIG_GPR2(%r11) 232 stg %r2,__PT_ORIG_GPR2(%r11)
233 stg %r7,STACK_FRAME_OVERHEAD(%r15) 233 stg %r7,STACK_FRAME_OVERHEAD(%r15)
234 lgf %r9,0(%r8,%r10) # get system call add. 234 lgf %r9,0(%r8,%r10) # get system call add.
235 tm __TI_flags+7(%r12),_TIF_TRACE 235 tm __TI_flags+7(%r12),_TIF_TRACE
236 jnz sysc_tracesys 236 jnz .Lsysc_tracesys
237 basr %r14,%r9 # call sys_xxxx 237 basr %r14,%r9 # call sys_xxxx
238 stg %r2,__PT_R2(%r11) # store return value 238 stg %r2,__PT_R2(%r11) # store return value
239 239
240sysc_return: 240.Lsysc_return:
241 LOCKDEP_SYS_EXIT 241 LOCKDEP_SYS_EXIT
242sysc_tif: 242.Lsysc_tif:
243 tm __PT_PSW+1(%r11),0x01 # returning to user ? 243 tm __PT_PSW+1(%r11),0x01 # returning to user ?
244 jno sysc_restore 244 jno .Lsysc_restore
245 tm __PT_FLAGS+7(%r11),_PIF_WORK 245 tm __PT_FLAGS+7(%r11),_PIF_WORK
246 jnz sysc_work 246 jnz .Lsysc_work
247 tm __TI_flags+7(%r12),_TIF_WORK 247 tm __TI_flags+7(%r12),_TIF_WORK
248 jnz sysc_work # check for work 248 jnz .Lsysc_work # check for work
249 tm __LC_CPU_FLAGS+7,_CIF_WORK 249 tm __LC_CPU_FLAGS+7,_CIF_WORK
250 jnz sysc_work 250 jnz .Lsysc_work
251sysc_restore: 251.Lsysc_restore:
252 lg %r14,__LC_VDSO_PER_CPU 252 lg %r14,__LC_VDSO_PER_CPU
253 lmg %r0,%r10,__PT_R0(%r11) 253 lmg %r0,%r10,__PT_R0(%r11)
254 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 254 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
@@ -256,101 +256,101 @@ sysc_restore:
256 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 256 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
257 lmg %r11,%r15,__PT_R11(%r11) 257 lmg %r11,%r15,__PT_R11(%r11)
258 lpswe __LC_RETURN_PSW 258 lpswe __LC_RETURN_PSW
259sysc_done: 259.Lsysc_done:
260 260
261# 261#
262# One of the work bits is on. Find out which one. 262# One of the work bits is on. Find out which one.
263# 263#
264sysc_work: 264.Lsysc_work:
265 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 265 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
266 jo sysc_mcck_pending 266 jo .Lsysc_mcck_pending
267 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 267 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
268 jo sysc_reschedule 268 jo .Lsysc_reschedule
269#ifdef CONFIG_UPROBES 269#ifdef CONFIG_UPROBES
270 tm __TI_flags+7(%r12),_TIF_UPROBE 270 tm __TI_flags+7(%r12),_TIF_UPROBE
271 jo sysc_uprobe_notify 271 jo .Lsysc_uprobe_notify
272#endif 272#endif
273 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP 273 tm __PT_FLAGS+7(%r11),_PIF_PER_TRAP
274 jo sysc_singlestep 274 jo .Lsysc_singlestep
275 tm __TI_flags+7(%r12),_TIF_SIGPENDING 275 tm __TI_flags+7(%r12),_TIF_SIGPENDING
276 jo sysc_sigpending 276 jo .Lsysc_sigpending
277 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 277 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
278 jo sysc_notify_resume 278 jo .Lsysc_notify_resume
279 tm __LC_CPU_FLAGS+7,_CIF_ASCE 279 tm __LC_CPU_FLAGS+7,_CIF_ASCE
280 jo sysc_uaccess 280 jo .Lsysc_uaccess
281 j sysc_return # beware of critical section cleanup 281 j .Lsysc_return # beware of critical section cleanup
282 282
283# 283#
284# _TIF_NEED_RESCHED is set, call schedule 284# _TIF_NEED_RESCHED is set, call schedule
285# 285#
286sysc_reschedule: 286.Lsysc_reschedule:
287 larl %r14,sysc_return 287 larl %r14,.Lsysc_return
288 jg schedule 288 jg schedule
289 289
290# 290#
291# _CIF_MCCK_PENDING is set, call handler 291# _CIF_MCCK_PENDING is set, call handler
292# 292#
293sysc_mcck_pending: 293.Lsysc_mcck_pending:
294 larl %r14,sysc_return 294 larl %r14,.Lsysc_return
295 jg s390_handle_mcck # TIF bit will be cleared by handler 295 jg s390_handle_mcck # TIF bit will be cleared by handler
296 296
297# 297#
298# _CIF_ASCE is set, load user space asce 298# _CIF_ASCE is set, load user space asce
299# 299#
300sysc_uaccess: 300.Lsysc_uaccess:
301 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE 301 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
302 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 302 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
303 j sysc_return 303 j .Lsysc_return
304 304
305# 305#
306# _TIF_SIGPENDING is set, call do_signal 306# _TIF_SIGPENDING is set, call do_signal
307# 307#
308sysc_sigpending: 308.Lsysc_sigpending:
309 lgr %r2,%r11 # pass pointer to pt_regs 309 lgr %r2,%r11 # pass pointer to pt_regs
310 brasl %r14,do_signal 310 brasl %r14,do_signal
311 tm __PT_FLAGS+7(%r11),_PIF_SYSCALL 311 tm __PT_FLAGS+7(%r11),_PIF_SYSCALL
312 jno sysc_return 312 jno .Lsysc_return
313 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments 313 lmg %r2,%r7,__PT_R2(%r11) # load svc arguments
314 lg %r10,__TI_sysc_table(%r12) # address of system call table 314 lg %r10,__TI_sysc_table(%r12) # address of system call table
315 lghi %r8,0 # svc 0 returns -ENOSYS 315 lghi %r8,0 # svc 0 returns -ENOSYS
316 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number 316 llgh %r1,__PT_INT_CODE+2(%r11) # load new svc number
317 cghi %r1,NR_syscalls 317 cghi %r1,NR_syscalls
318 jnl sysc_nr_ok # invalid svc number -> do svc 0 318 jnl .Lsysc_nr_ok # invalid svc number -> do svc 0
319 slag %r8,%r1,2 319 slag %r8,%r1,2
320 j sysc_nr_ok # restart svc 320 j .Lsysc_nr_ok # restart svc
321 321
322# 322#
323# _TIF_NOTIFY_RESUME is set, call do_notify_resume 323# _TIF_NOTIFY_RESUME is set, call do_notify_resume
324# 324#
325sysc_notify_resume: 325.Lsysc_notify_resume:
326 lgr %r2,%r11 # pass pointer to pt_regs 326 lgr %r2,%r11 # pass pointer to pt_regs
327 larl %r14,sysc_return 327 larl %r14,.Lsysc_return
328 jg do_notify_resume 328 jg do_notify_resume
329 329
330# 330#
331# _TIF_UPROBE is set, call uprobe_notify_resume 331# _TIF_UPROBE is set, call uprobe_notify_resume
332# 332#
333#ifdef CONFIG_UPROBES 333#ifdef CONFIG_UPROBES
334sysc_uprobe_notify: 334.Lsysc_uprobe_notify:
335 lgr %r2,%r11 # pass pointer to pt_regs 335 lgr %r2,%r11 # pass pointer to pt_regs
336 larl %r14,sysc_return 336 larl %r14,.Lsysc_return
337 jg uprobe_notify_resume 337 jg uprobe_notify_resume
338#endif 338#endif
339 339
340# 340#
341# _PIF_PER_TRAP is set, call do_per_trap 341# _PIF_PER_TRAP is set, call do_per_trap
342# 342#
343sysc_singlestep: 343.Lsysc_singlestep:
344 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP 344 ni __PT_FLAGS+7(%r11),255-_PIF_PER_TRAP
345 lgr %r2,%r11 # pass pointer to pt_regs 345 lgr %r2,%r11 # pass pointer to pt_regs
346 larl %r14,sysc_return 346 larl %r14,.Lsysc_return
347 jg do_per_trap 347 jg do_per_trap
348 348
349# 349#
350# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before 350# call tracehook_report_syscall_entry/tracehook_report_syscall_exit before
351# and after the system call 351# and after the system call
352# 352#
353sysc_tracesys: 353.Lsysc_tracesys:
354 lgr %r2,%r11 # pass pointer to pt_regs 354 lgr %r2,%r11 # pass pointer to pt_regs
355 la %r3,0 355 la %r3,0
356 llgh %r0,__PT_INT_CODE+2(%r11) 356 llgh %r0,__PT_INT_CODE+2(%r11)
@@ -358,20 +358,20 @@ sysc_tracesys:
358 brasl %r14,do_syscall_trace_enter 358 brasl %r14,do_syscall_trace_enter
359 lghi %r0,NR_syscalls 359 lghi %r0,NR_syscalls
360 clgr %r0,%r2 360 clgr %r0,%r2
361 jnh sysc_tracenogo 361 jnh .Lsysc_tracenogo
362 sllg %r8,%r2,2 362 sllg %r8,%r2,2
363 lgf %r9,0(%r8,%r10) 363 lgf %r9,0(%r8,%r10)
364sysc_tracego: 364.Lsysc_tracego:
365 lmg %r3,%r7,__PT_R3(%r11) 365 lmg %r3,%r7,__PT_R3(%r11)
366 stg %r7,STACK_FRAME_OVERHEAD(%r15) 366 stg %r7,STACK_FRAME_OVERHEAD(%r15)
367 lg %r2,__PT_ORIG_GPR2(%r11) 367 lg %r2,__PT_ORIG_GPR2(%r11)
368 basr %r14,%r9 # call sys_xxx 368 basr %r14,%r9 # call sys_xxx
369 stg %r2,__PT_R2(%r11) # store return value 369 stg %r2,__PT_R2(%r11) # store return value
370sysc_tracenogo: 370.Lsysc_tracenogo:
371 tm __TI_flags+7(%r12),_TIF_TRACE 371 tm __TI_flags+7(%r12),_TIF_TRACE
372 jz sysc_return 372 jz .Lsysc_return
373 lgr %r2,%r11 # pass pointer to pt_regs 373 lgr %r2,%r11 # pass pointer to pt_regs
374 larl %r14,sysc_return 374 larl %r14,.Lsysc_return
375 jg do_syscall_trace_exit 375 jg do_syscall_trace_exit
376 376
377# 377#
@@ -384,13 +384,13 @@ ENTRY(ret_from_fork)
384 TRACE_IRQS_ON 384 TRACE_IRQS_ON
385 ssm __LC_SVC_NEW_PSW # reenable interrupts 385 ssm __LC_SVC_NEW_PSW # reenable interrupts
386 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ? 386 tm __PT_PSW+1(%r11),0x01 # forking a kernel thread ?
387 jne sysc_tracenogo 387 jne .Lsysc_tracenogo
388 # it's a kernel thread 388 # it's a kernel thread
389 lmg %r9,%r10,__PT_R9(%r11) # load gprs 389 lmg %r9,%r10,__PT_R9(%r11) # load gprs
390ENTRY(kernel_thread_starter) 390ENTRY(kernel_thread_starter)
391 la %r2,0(%r10) 391 la %r2,0(%r10)
392 basr %r14,%r9 392 basr %r14,%r9
393 j sysc_tracenogo 393 j .Lsysc_tracenogo
394 394
395/* 395/*
396 * Program check handler routine 396 * Program check handler routine
@@ -409,7 +409,7 @@ ENTRY(pgm_check_handler)
409 tmhh %r8,0x4000 # PER bit set in old PSW ? 409 tmhh %r8,0x4000 # PER bit set in old PSW ?
410 jnz 0f # -> enabled, can't be a double fault 410 jnz 0f # -> enabled, can't be a double fault
411 tm __LC_PGM_ILC+3,0x80 # check for per exception 411 tm __LC_PGM_ILC+3,0x80 # check for per exception
412 jnz pgm_svcper # -> single stepped svc 412 jnz .Lpgm_svcper # -> single stepped svc
4130: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC 4130: CHECK_STACK STACK_SIZE,__LC_SAVE_AREA_SYNC
414 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 414 aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
415 j 2f 415 j 2f
@@ -432,7 +432,7 @@ ENTRY(pgm_check_handler)
432 tm __LC_PGM_ILC+3,0x80 # check for per exception 432 tm __LC_PGM_ILC+3,0x80 # check for per exception
433 jz 0f 433 jz 0f
434 tmhh %r8,0x0001 # kernel per event ? 434 tmhh %r8,0x0001 # kernel per event ?
435 jz pgm_kprobe 435 jz .Lpgm_kprobe
436 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP 436 oi __PT_FLAGS+7(%r11),_PIF_PER_TRAP
437 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS 437 mvc __THREAD_per_address(8,%r14),__LC_PER_ADDRESS
438 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE 438 mvc __THREAD_per_cause(2,%r14),__LC_PER_CODE
@@ -443,31 +443,31 @@ ENTRY(pgm_check_handler)
443 llgh %r10,__PT_INT_CODE+2(%r11) 443 llgh %r10,__PT_INT_CODE+2(%r11)
444 nill %r10,0x007f 444 nill %r10,0x007f
445 sll %r10,2 445 sll %r10,2
446 je sysc_return 446 je .Lsysc_return
447 lgf %r1,0(%r10,%r1) # load address of handler routine 447 lgf %r1,0(%r10,%r1) # load address of handler routine
448 lgr %r2,%r11 # pass pointer to pt_regs 448 lgr %r2,%r11 # pass pointer to pt_regs
449 basr %r14,%r1 # branch to interrupt-handler 449 basr %r14,%r1 # branch to interrupt-handler
450 j sysc_return 450 j .Lsysc_return
451 451
452# 452#
453# PER event in supervisor state, must be kprobes 453# PER event in supervisor state, must be kprobes
454# 454#
455pgm_kprobe: 455.Lpgm_kprobe:
456 REENABLE_IRQS 456 REENABLE_IRQS
457 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 457 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
458 lgr %r2,%r11 # pass pointer to pt_regs 458 lgr %r2,%r11 # pass pointer to pt_regs
459 brasl %r14,do_per_trap 459 brasl %r14,do_per_trap
460 j sysc_return 460 j .Lsysc_return
461 461
462# 462#
463# single stepped system call 463# single stepped system call
464# 464#
465pgm_svcper: 465.Lpgm_svcper:
466 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW 466 mvc __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
467 larl %r14,sysc_per 467 larl %r14,.Lsysc_per
468 stg %r14,__LC_RETURN_PSW+8 468 stg %r14,__LC_RETURN_PSW+8
469 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP 469 lghi %r14,_PIF_SYSCALL | _PIF_PER_TRAP
470 lpswe __LC_RETURN_PSW # branch to sysc_per and enable irqs 470 lpswe __LC_RETURN_PSW # branch to .Lsysc_per and enable irqs
471 471
472/* 472/*
473 * IO interrupt handler routine 473 * IO interrupt handler routine
@@ -483,10 +483,10 @@ ENTRY(io_int_handler)
483 HANDLE_SIE_INTERCEPT %r14,2 483 HANDLE_SIE_INTERCEPT %r14,2
484 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 484 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
485 tmhh %r8,0x0001 # interrupting from user? 485 tmhh %r8,0x0001 # interrupting from user?
486 jz io_skip 486 jz .Lio_skip
487 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER 487 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
488 LAST_BREAK %r14 488 LAST_BREAK %r14
489io_skip: 489.Lio_skip:
490 stmg %r0,%r7,__PT_R0(%r11) 490 stmg %r0,%r7,__PT_R0(%r11)
491 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 491 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
492 stmg %r8,%r9,__PT_PSW(%r11) 492 stmg %r8,%r9,__PT_PSW(%r11)
@@ -494,29 +494,29 @@ io_skip:
494 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11) 494 xc __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
495 TRACE_IRQS_OFF 495 TRACE_IRQS_OFF
496 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15) 496 xc __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
497io_loop: 497.Lio_loop:
498 lgr %r2,%r11 # pass pointer to pt_regs 498 lgr %r2,%r11 # pass pointer to pt_regs
499 lghi %r3,IO_INTERRUPT 499 lghi %r3,IO_INTERRUPT
500 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ? 500 tm __PT_INT_CODE+8(%r11),0x80 # adapter interrupt ?
501 jz io_call 501 jz .Lio_call
502 lghi %r3,THIN_INTERRUPT 502 lghi %r3,THIN_INTERRUPT
503io_call: 503.Lio_call:
504 brasl %r14,do_IRQ 504 brasl %r14,do_IRQ
505 tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR 505 tm __LC_MACHINE_FLAGS+6,0x10 # MACHINE_FLAG_LPAR
506 jz io_return 506 jz .Lio_return
507 tpi 0 507 tpi 0
508 jz io_return 508 jz .Lio_return
509 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID 509 mvc __PT_INT_CODE(12,%r11),__LC_SUBCHANNEL_ID
510 j io_loop 510 j .Lio_loop
511io_return: 511.Lio_return:
512 LOCKDEP_SYS_EXIT 512 LOCKDEP_SYS_EXIT
513 TRACE_IRQS_ON 513 TRACE_IRQS_ON
514io_tif: 514.Lio_tif:
515 tm __TI_flags+7(%r12),_TIF_WORK 515 tm __TI_flags+7(%r12),_TIF_WORK
516 jnz io_work # there is work to do (signals etc.) 516 jnz .Lio_work # there is work to do (signals etc.)
517 tm __LC_CPU_FLAGS+7,_CIF_WORK 517 tm __LC_CPU_FLAGS+7,_CIF_WORK
518 jnz io_work 518 jnz .Lio_work
519io_restore: 519.Lio_restore:
520 lg %r14,__LC_VDSO_PER_CPU 520 lg %r14,__LC_VDSO_PER_CPU
521 lmg %r0,%r10,__PT_R0(%r11) 521 lmg %r0,%r10,__PT_R0(%r11)
522 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11) 522 mvc __LC_RETURN_PSW(16),__PT_PSW(%r11)
@@ -524,7 +524,7 @@ io_restore:
524 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER 524 mvc __VDSO_ECTG_BASE(16,%r14),__LC_EXIT_TIMER
525 lmg %r11,%r15,__PT_R11(%r11) 525 lmg %r11,%r15,__PT_R11(%r11)
526 lpswe __LC_RETURN_PSW 526 lpswe __LC_RETURN_PSW
527io_done: 527.Lio_done:
528 528
529# 529#
530# There is work todo, find out in which context we have been interrupted: 530# There is work todo, find out in which context we have been interrupted:
@@ -535,15 +535,15 @@ io_done:
535# the preemption counter and if it is zero call preempt_schedule_irq 535# the preemption counter and if it is zero call preempt_schedule_irq
536# Before any work can be done, a switch to the kernel stack is required. 536# Before any work can be done, a switch to the kernel stack is required.
537# 537#
538io_work: 538.Lio_work:
539 tm __PT_PSW+1(%r11),0x01 # returning to user ? 539 tm __PT_PSW+1(%r11),0x01 # returning to user ?
540 jo io_work_user # yes -> do resched & signal 540 jo .Lio_work_user # yes -> do resched & signal
541#ifdef CONFIG_PREEMPT 541#ifdef CONFIG_PREEMPT
542 # check for preemptive scheduling 542 # check for preemptive scheduling
543 icm %r0,15,__TI_precount(%r12) 543 icm %r0,15,__TI_precount(%r12)
544 jnz io_restore # preemption is disabled 544 jnz .Lio_restore # preemption is disabled
545 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 545 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
546 jno io_restore 546 jno .Lio_restore
547 # switch to kernel stack 547 # switch to kernel stack
548 lg %r1,__PT_R15(%r11) 548 lg %r1,__PT_R15(%r11)
549 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 549 aghi %r1,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
@@ -551,19 +551,19 @@ io_work:
551 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 551 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
552 la %r11,STACK_FRAME_OVERHEAD(%r1) 552 la %r11,STACK_FRAME_OVERHEAD(%r1)
553 lgr %r15,%r1 553 lgr %r15,%r1
554 # TRACE_IRQS_ON already done at io_return, call 554 # TRACE_IRQS_ON already done at .Lio_return, call
555 # TRACE_IRQS_OFF to keep things symmetrical 555 # TRACE_IRQS_OFF to keep things symmetrical
556 TRACE_IRQS_OFF 556 TRACE_IRQS_OFF
557 brasl %r14,preempt_schedule_irq 557 brasl %r14,preempt_schedule_irq
558 j io_return 558 j .Lio_return
559#else 559#else
560 j io_restore 560 j .Lio_restore
561#endif 561#endif
562 562
563# 563#
564# Need to do work before returning to userspace, switch to kernel stack 564# Need to do work before returning to userspace, switch to kernel stack
565# 565#
566io_work_user: 566.Lio_work_user:
567 lg %r1,__LC_KERNEL_STACK 567 lg %r1,__LC_KERNEL_STACK
568 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 568 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
569 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 569 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
@@ -573,70 +573,70 @@ io_work_user:
573# 573#
574# One of the work bits is on. Find out which one. 574# One of the work bits is on. Find out which one.
575# 575#
576io_work_tif: 576.Lio_work_tif:
577 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 577 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
578 jo io_mcck_pending 578 jo .Lio_mcck_pending
579 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED 579 tm __TI_flags+7(%r12),_TIF_NEED_RESCHED
580 jo io_reschedule 580 jo .Lio_reschedule
581 tm __TI_flags+7(%r12),_TIF_SIGPENDING 581 tm __TI_flags+7(%r12),_TIF_SIGPENDING
582 jo io_sigpending 582 jo .Lio_sigpending
583 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME 583 tm __TI_flags+7(%r12),_TIF_NOTIFY_RESUME
584 jo io_notify_resume 584 jo .Lio_notify_resume
585 tm __LC_CPU_FLAGS+7,_CIF_ASCE 585 tm __LC_CPU_FLAGS+7,_CIF_ASCE
586 jo io_uaccess 586 jo .Lio_uaccess
587 j io_return # beware of critical section cleanup 587 j .Lio_return # beware of critical section cleanup
588 588
589# 589#
590# _CIF_MCCK_PENDING is set, call handler 590# _CIF_MCCK_PENDING is set, call handler
591# 591#
592io_mcck_pending: 592.Lio_mcck_pending:
593 # TRACE_IRQS_ON already done at io_return 593 # TRACE_IRQS_ON already done at .Lio_return
594 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler 594 brasl %r14,s390_handle_mcck # TIF bit will be cleared by handler
595 TRACE_IRQS_OFF 595 TRACE_IRQS_OFF
596 j io_return 596 j .Lio_return
597 597
598# 598#
599# _CIF_ASCE is set, load user space asce 599# _CIF_ASCE is set, load user space asce
600# 600#
601io_uaccess: 601.Lio_uaccess:
602 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE 602 ni __LC_CPU_FLAGS+7,255-_CIF_ASCE
603 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 603 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
604 j io_return 604 j .Lio_return
605 605
606# 606#
607# _TIF_NEED_RESCHED is set, call schedule 607# _TIF_NEED_RESCHED is set, call schedule
608# 608#
609io_reschedule: 609.Lio_reschedule:
610 # TRACE_IRQS_ON already done at io_return 610 # TRACE_IRQS_ON already done at .Lio_return
611 ssm __LC_SVC_NEW_PSW # reenable interrupts 611 ssm __LC_SVC_NEW_PSW # reenable interrupts
612 brasl %r14,schedule # call scheduler 612 brasl %r14,schedule # call scheduler
613 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 613 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
614 TRACE_IRQS_OFF 614 TRACE_IRQS_OFF
615 j io_return 615 j .Lio_return
616 616
617# 617#
618# _TIF_SIGPENDING or is set, call do_signal 618# _TIF_SIGPENDING or is set, call do_signal
619# 619#
620io_sigpending: 620.Lio_sigpending:
621 # TRACE_IRQS_ON already done at io_return 621 # TRACE_IRQS_ON already done at .Lio_return
622 ssm __LC_SVC_NEW_PSW # reenable interrupts 622 ssm __LC_SVC_NEW_PSW # reenable interrupts
623 lgr %r2,%r11 # pass pointer to pt_regs 623 lgr %r2,%r11 # pass pointer to pt_regs
624 brasl %r14,do_signal 624 brasl %r14,do_signal
625 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 625 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
626 TRACE_IRQS_OFF 626 TRACE_IRQS_OFF
627 j io_return 627 j .Lio_return
628 628
629# 629#
630# _TIF_NOTIFY_RESUME or is set, call do_notify_resume 630# _TIF_NOTIFY_RESUME or is set, call do_notify_resume
631# 631#
632io_notify_resume: 632.Lio_notify_resume:
633 # TRACE_IRQS_ON already done at io_return 633 # TRACE_IRQS_ON already done at .Lio_return
634 ssm __LC_SVC_NEW_PSW # reenable interrupts 634 ssm __LC_SVC_NEW_PSW # reenable interrupts
635 lgr %r2,%r11 # pass pointer to pt_regs 635 lgr %r2,%r11 # pass pointer to pt_regs
636 brasl %r14,do_notify_resume 636 brasl %r14,do_notify_resume
637 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts 637 ssm __LC_PGM_NEW_PSW # disable I/O and ext. interrupts
638 TRACE_IRQS_OFF 638 TRACE_IRQS_OFF
639 j io_return 639 j .Lio_return
640 640
641/* 641/*
642 * External interrupt handler routine 642 * External interrupt handler routine
@@ -652,10 +652,10 @@ ENTRY(ext_int_handler)
652 HANDLE_SIE_INTERCEPT %r14,3 652 HANDLE_SIE_INTERCEPT %r14,3
653 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT 653 SWITCH_ASYNC __LC_SAVE_AREA_ASYNC,__LC_ASYNC_STACK,STACK_SHIFT
654 tmhh %r8,0x0001 # interrupting from user ? 654 tmhh %r8,0x0001 # interrupting from user ?
655 jz ext_skip 655 jz .Lext_skip
656 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER 656 UPDATE_VTIME %r14,__LC_ASYNC_ENTER_TIMER
657 LAST_BREAK %r14 657 LAST_BREAK %r14
658ext_skip: 658.Lext_skip:
659 stmg %r0,%r7,__PT_R0(%r11) 659 stmg %r0,%r7,__PT_R0(%r11)
660 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC 660 mvc __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
661 stmg %r8,%r9,__PT_PSW(%r11) 661 stmg %r8,%r9,__PT_PSW(%r11)
@@ -669,23 +669,23 @@ ext_skip:
669 lgr %r2,%r11 # pass pointer to pt_regs 669 lgr %r2,%r11 # pass pointer to pt_regs
670 lghi %r3,EXT_INTERRUPT 670 lghi %r3,EXT_INTERRUPT
671 brasl %r14,do_IRQ 671 brasl %r14,do_IRQ
672 j io_return 672 j .Lio_return
673 673
674/* 674/*
675 * Load idle PSW. The second "half" of this function is in cleanup_idle. 675 * Load idle PSW. The second "half" of this function is in .Lcleanup_idle.
676 */ 676 */
677ENTRY(psw_idle) 677ENTRY(psw_idle)
678 stg %r3,__SF_EMPTY(%r15) 678 stg %r3,__SF_EMPTY(%r15)
679 larl %r1,psw_idle_lpsw+4 679 larl %r1,.Lpsw_idle_lpsw+4
680 stg %r1,__SF_EMPTY+8(%r15) 680 stg %r1,__SF_EMPTY+8(%r15)
681 STCK __CLOCK_IDLE_ENTER(%r2) 681 STCK __CLOCK_IDLE_ENTER(%r2)
682 stpt __TIMER_IDLE_ENTER(%r2) 682 stpt __TIMER_IDLE_ENTER(%r2)
683psw_idle_lpsw: 683.Lpsw_idle_lpsw:
684 lpswe __SF_EMPTY(%r15) 684 lpswe __SF_EMPTY(%r15)
685 br %r14 685 br %r14
686psw_idle_end: 686.Lpsw_idle_end:
687 687
688__critical_end: 688.L__critical_end:
689 689
690/* 690/*
691 * Machine check handler routines 691 * Machine check handler routines
@@ -701,7 +701,7 @@ ENTRY(mcck_int_handler)
701 lmg %r8,%r9,__LC_MCK_OLD_PSW 701 lmg %r8,%r9,__LC_MCK_OLD_PSW
702 HANDLE_SIE_INTERCEPT %r14,4 702 HANDLE_SIE_INTERCEPT %r14,4
703 tm __LC_MCCK_CODE,0x80 # system damage? 703 tm __LC_MCCK_CODE,0x80 # system damage?
704 jo mcck_panic # yes -> rest of mcck code invalid 704 jo .Lmcck_panic # yes -> rest of mcck code invalid
705 lghi %r14,__LC_CPU_TIMER_SAVE_AREA 705 lghi %r14,__LC_CPU_TIMER_SAVE_AREA
706 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 706 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
707 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid? 707 tm __LC_MCCK_CODE+5,0x02 # stored cpu timer value valid?
@@ -719,13 +719,13 @@ ENTRY(mcck_int_handler)
7192: spt 0(%r14) 7192: spt 0(%r14)
720 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14) 720 mvc __LC_MCCK_ENTER_TIMER(8),0(%r14)
7213: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid? 7213: tm __LC_MCCK_CODE+2,0x09 # mwp + ia of old psw valid?
722 jno mcck_panic # no -> skip cleanup critical 722 jno .Lmcck_panic # no -> skip cleanup critical
723 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT 723 SWITCH_ASYNC __LC_GPREGS_SAVE_AREA+64,__LC_PANIC_STACK,PAGE_SHIFT
724 tm %r8,0x0001 # interrupting from user ? 724 tm %r8,0x0001 # interrupting from user ?
725 jz mcck_skip 725 jz .Lmcck_skip
726 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER 726 UPDATE_VTIME %r14,__LC_MCCK_ENTER_TIMER
727 LAST_BREAK %r14 727 LAST_BREAK %r14
728mcck_skip: 728.Lmcck_skip:
729 lghi %r14,__LC_GPREGS_SAVE_AREA+64 729 lghi %r14,__LC_GPREGS_SAVE_AREA+64
730 stmg %r0,%r7,__PT_R0(%r11) 730 stmg %r0,%r7,__PT_R0(%r11)
731 mvc __PT_R8(64,%r11),0(%r14) 731 mvc __PT_R8(64,%r11),0(%r14)
@@ -735,7 +735,7 @@ mcck_skip:
735 lgr %r2,%r11 # pass pointer to pt_regs 735 lgr %r2,%r11 # pass pointer to pt_regs
736 brasl %r14,s390_do_machine_check 736 brasl %r14,s390_do_machine_check
737 tm __PT_PSW+1(%r11),0x01 # returning to user ? 737 tm __PT_PSW+1(%r11),0x01 # returning to user ?
738 jno mcck_return 738 jno .Lmcck_return
739 lg %r1,__LC_KERNEL_STACK # switch to kernel stack 739 lg %r1,__LC_KERNEL_STACK # switch to kernel stack
740 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11) 740 mvc STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
741 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1) 741 xc __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
@@ -743,11 +743,11 @@ mcck_skip:
743 lgr %r15,%r1 743 lgr %r15,%r1
744 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off 744 ssm __LC_PGM_NEW_PSW # turn dat on, keep irqs off
745 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING 745 tm __LC_CPU_FLAGS+7,_CIF_MCCK_PENDING
746 jno mcck_return 746 jno .Lmcck_return
747 TRACE_IRQS_OFF 747 TRACE_IRQS_OFF
748 brasl %r14,s390_handle_mcck 748 brasl %r14,s390_handle_mcck
749 TRACE_IRQS_ON 749 TRACE_IRQS_ON
750mcck_return: 750.Lmcck_return:
751 lg %r14,__LC_VDSO_PER_CPU 751 lg %r14,__LC_VDSO_PER_CPU
752 lmg %r0,%r10,__PT_R0(%r11) 752 lmg %r0,%r10,__PT_R0(%r11)
753 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW 753 mvc __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
@@ -758,14 +758,14 @@ mcck_return:
7580: lmg %r11,%r15,__PT_R11(%r11) 7580: lmg %r11,%r15,__PT_R11(%r11)
759 lpswe __LC_RETURN_MCCK_PSW 759 lpswe __LC_RETURN_MCCK_PSW
760 760
761mcck_panic: 761.Lmcck_panic:
762 lg %r14,__LC_PANIC_STACK 762 lg %r14,__LC_PANIC_STACK
763 slgr %r14,%r15 763 slgr %r14,%r15
764 srag %r14,%r14,PAGE_SHIFT 764 srag %r14,%r14,PAGE_SHIFT
765 jz 0f 765 jz 0f
766 lg %r15,__LC_PANIC_STACK 766 lg %r15,__LC_PANIC_STACK
7670: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE) 7670: aghi %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
768 j mcck_skip 768 j .Lmcck_skip
769 769
770# 770#
771# PSW restart interrupt handler 771# PSW restart interrupt handler
@@ -815,69 +815,69 @@ stack_overflow:
815#endif 815#endif
816 816
817 .align 8 817 .align 8
818cleanup_table: 818.Lcleanup_table:
819 .quad system_call 819 .quad system_call
820 .quad sysc_do_svc 820 .quad .Lsysc_do_svc
821 .quad sysc_tif 821 .quad .Lsysc_tif
822 .quad sysc_restore 822 .quad .Lsysc_restore
823 .quad sysc_done 823 .quad .Lsysc_done
824 .quad io_tif 824 .quad .Lio_tif
825 .quad io_restore 825 .quad .Lio_restore
826 .quad io_done 826 .quad .Lio_done
827 .quad psw_idle 827 .quad psw_idle
828 .quad psw_idle_end 828 .quad .Lpsw_idle_end
829 829
830cleanup_critical: 830cleanup_critical:
831 clg %r9,BASED(cleanup_table) # system_call 831 clg %r9,BASED(.Lcleanup_table) # system_call
832 jl 0f 832 jl 0f
833 clg %r9,BASED(cleanup_table+8) # sysc_do_svc 833 clg %r9,BASED(.Lcleanup_table+8) # .Lsysc_do_svc
834 jl cleanup_system_call 834 jl .Lcleanup_system_call
835 clg %r9,BASED(cleanup_table+16) # sysc_tif 835 clg %r9,BASED(.Lcleanup_table+16) # .Lsysc_tif
836 jl 0f 836 jl 0f
837 clg %r9,BASED(cleanup_table+24) # sysc_restore 837 clg %r9,BASED(.Lcleanup_table+24) # .Lsysc_restore
838 jl cleanup_sysc_tif 838 jl .Lcleanup_sysc_tif
839 clg %r9,BASED(cleanup_table+32) # sysc_done 839 clg %r9,BASED(.Lcleanup_table+32) # .Lsysc_done
840 jl cleanup_sysc_restore 840 jl .Lcleanup_sysc_restore
841 clg %r9,BASED(cleanup_table+40) # io_tif 841 clg %r9,BASED(.Lcleanup_table+40) # .Lio_tif
842 jl 0f 842 jl 0f
843 clg %r9,BASED(cleanup_table+48) # io_restore 843 clg %r9,BASED(.Lcleanup_table+48) # .Lio_restore
844 jl cleanup_io_tif 844 jl .Lcleanup_io_tif
845 clg %r9,BASED(cleanup_table+56) # io_done 845 clg %r9,BASED(.Lcleanup_table+56) # .Lio_done
846 jl cleanup_io_restore 846 jl .Lcleanup_io_restore
847 clg %r9,BASED(cleanup_table+64) # psw_idle 847 clg %r9,BASED(.Lcleanup_table+64) # psw_idle
848 jl 0f 848 jl 0f
849 clg %r9,BASED(cleanup_table+72) # psw_idle_end 849 clg %r9,BASED(.Lcleanup_table+72) # .Lpsw_idle_end
850 jl cleanup_idle 850 jl .Lcleanup_idle
8510: br %r14 8510: br %r14
852 852
853 853
854cleanup_system_call: 854.Lcleanup_system_call:
855 # check if stpt has been executed 855 # check if stpt has been executed
856 clg %r9,BASED(cleanup_system_call_insn) 856 clg %r9,BASED(.Lcleanup_system_call_insn)
857 jh 0f 857 jh 0f
858 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER 858 mvc __LC_SYNC_ENTER_TIMER(8),__LC_ASYNC_ENTER_TIMER
859 cghi %r11,__LC_SAVE_AREA_ASYNC 859 cghi %r11,__LC_SAVE_AREA_ASYNC
860 je 0f 860 je 0f
861 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER 861 mvc __LC_SYNC_ENTER_TIMER(8),__LC_MCCK_ENTER_TIMER
8620: # check if stmg has been executed 8620: # check if stmg has been executed
863 clg %r9,BASED(cleanup_system_call_insn+8) 863 clg %r9,BASED(.Lcleanup_system_call_insn+8)
864 jh 0f 864 jh 0f
865 mvc __LC_SAVE_AREA_SYNC(64),0(%r11) 865 mvc __LC_SAVE_AREA_SYNC(64),0(%r11)
8660: # check if base register setup + TIF bit load has been done 8660: # check if base register setup + TIF bit load has been done
867 clg %r9,BASED(cleanup_system_call_insn+16) 867 clg %r9,BASED(.Lcleanup_system_call_insn+16)
868 jhe 0f 868 jhe 0f
869 # set up saved registers r10 and r12 869 # set up saved registers r10 and r12
870 stg %r10,16(%r11) # r10 last break 870 stg %r10,16(%r11) # r10 last break
871 stg %r12,32(%r11) # r12 thread-info pointer 871 stg %r12,32(%r11) # r12 thread-info pointer
8720: # check if the user time update has been done 8720: # check if the user time update has been done
873 clg %r9,BASED(cleanup_system_call_insn+24) 873 clg %r9,BASED(.Lcleanup_system_call_insn+24)
874 jh 0f 874 jh 0f
875 lg %r15,__LC_EXIT_TIMER 875 lg %r15,__LC_EXIT_TIMER
876 slg %r15,__LC_SYNC_ENTER_TIMER 876 slg %r15,__LC_SYNC_ENTER_TIMER
877 alg %r15,__LC_USER_TIMER 877 alg %r15,__LC_USER_TIMER
878 stg %r15,__LC_USER_TIMER 878 stg %r15,__LC_USER_TIMER
8790: # check if the system time update has been done 8790: # check if the system time update has been done
880 clg %r9,BASED(cleanup_system_call_insn+32) 880 clg %r9,BASED(.Lcleanup_system_call_insn+32)
881 jh 0f 881 jh 0f
882 lg %r15,__LC_LAST_UPDATE_TIMER 882 lg %r15,__LC_LAST_UPDATE_TIMER
883 slg %r15,__LC_EXIT_TIMER 883 slg %r15,__LC_EXIT_TIMER
@@ -904,21 +904,21 @@ cleanup_system_call:
904 # setup saved register r15 904 # setup saved register r15
905 stg %r15,56(%r11) # r15 stack pointer 905 stg %r15,56(%r11) # r15 stack pointer
906 # set new psw address and exit 906 # set new psw address and exit
907 larl %r9,sysc_do_svc 907 larl %r9,.Lsysc_do_svc
908 br %r14 908 br %r14
909cleanup_system_call_insn: 909.Lcleanup_system_call_insn:
910 .quad system_call 910 .quad system_call
911 .quad sysc_stmg 911 .quad .Lsysc_stmg
912 .quad sysc_per 912 .quad .Lsysc_per
913 .quad sysc_vtime+18 913 .quad .Lsysc_vtime+18
914 .quad sysc_vtime+42 914 .quad .Lsysc_vtime+42
915 915
916cleanup_sysc_tif: 916.Lcleanup_sysc_tif:
917 larl %r9,sysc_tif 917 larl %r9,.Lsysc_tif
918 br %r14 918 br %r14
919 919
920cleanup_sysc_restore: 920.Lcleanup_sysc_restore:
921 clg %r9,BASED(cleanup_sysc_restore_insn) 921 clg %r9,BASED(.Lcleanup_sysc_restore_insn)
922 je 0f 922 je 0f
923 lg %r9,24(%r11) # get saved pointer to pt_regs 923 lg %r9,24(%r11) # get saved pointer to pt_regs
924 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 924 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
@@ -926,15 +926,15 @@ cleanup_sysc_restore:
926 lmg %r0,%r7,__PT_R0(%r9) 926 lmg %r0,%r7,__PT_R0(%r9)
9270: lmg %r8,%r9,__LC_RETURN_PSW 9270: lmg %r8,%r9,__LC_RETURN_PSW
928 br %r14 928 br %r14
929cleanup_sysc_restore_insn: 929.Lcleanup_sysc_restore_insn:
930 .quad sysc_done - 4 930 .quad .Lsysc_done - 4
931 931
932cleanup_io_tif: 932.Lcleanup_io_tif:
933 larl %r9,io_tif 933 larl %r9,.Lio_tif
934 br %r14 934 br %r14
935 935
936cleanup_io_restore: 936.Lcleanup_io_restore:
937 clg %r9,BASED(cleanup_io_restore_insn) 937 clg %r9,BASED(.Lcleanup_io_restore_insn)
938 je 0f 938 je 0f
939 lg %r9,24(%r11) # get saved r11 pointer to pt_regs 939 lg %r9,24(%r11) # get saved r11 pointer to pt_regs
940 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9) 940 mvc __LC_RETURN_PSW(16),__PT_PSW(%r9)
@@ -942,10 +942,10 @@ cleanup_io_restore:
942 lmg %r0,%r7,__PT_R0(%r9) 942 lmg %r0,%r7,__PT_R0(%r9)
9430: lmg %r8,%r9,__LC_RETURN_PSW 9430: lmg %r8,%r9,__LC_RETURN_PSW
944 br %r14 944 br %r14
945cleanup_io_restore_insn: 945.Lcleanup_io_restore_insn:
946 .quad io_done - 4 946 .quad .Lio_done - 4
947 947
948cleanup_idle: 948.Lcleanup_idle:
949 # copy interrupt clock & cpu timer 949 # copy interrupt clock & cpu timer
950 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK 950 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_INT_CLOCK
951 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER 951 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_ASYNC_ENTER_TIMER
@@ -954,7 +954,7 @@ cleanup_idle:
954 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK 954 mvc __CLOCK_IDLE_EXIT(8,%r2),__LC_MCCK_CLOCK
955 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER 955 mvc __TIMER_IDLE_EXIT(8,%r2),__LC_MCCK_ENTER_TIMER
9560: # check if stck & stpt have been executed 9560: # check if stck & stpt have been executed
957 clg %r9,BASED(cleanup_idle_insn) 957 clg %r9,BASED(.Lcleanup_idle_insn)
958 jhe 1f 958 jhe 1f
959 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2) 959 mvc __CLOCK_IDLE_ENTER(8,%r2),__CLOCK_IDLE_EXIT(%r2)
960 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2) 960 mvc __TIMER_IDLE_ENTER(8,%r2),__TIMER_IDLE_EXIT(%r2)
@@ -973,17 +973,17 @@ cleanup_idle:
973 nihh %r8,0xfcfd # clear irq & wait state bits 973 nihh %r8,0xfcfd # clear irq & wait state bits
974 lg %r9,48(%r11) # return from psw_idle 974 lg %r9,48(%r11) # return from psw_idle
975 br %r14 975 br %r14
976cleanup_idle_insn: 976.Lcleanup_idle_insn:
977 .quad psw_idle_lpsw 977 .quad .Lpsw_idle_lpsw
978 978
979/* 979/*
980 * Integer constants 980 * Integer constants
981 */ 981 */
982 .align 8 982 .align 8
983.Lcritical_start: 983.Lcritical_start:
984 .quad __critical_start 984 .quad .L__critical_start
985.Lcritical_length: 985.Lcritical_length:
986 .quad __critical_end - __critical_start 986 .quad .L__critical_end - .L__critical_start
987 987
988 988
989#if IS_ENABLED(CONFIG_KVM) 989#if IS_ENABLED(CONFIG_KVM)
@@ -1000,25 +1000,25 @@ ENTRY(sie64a)
1000 lmg %r0,%r13,0(%r3) # load guest gprs 0-13 1000 lmg %r0,%r13,0(%r3) # load guest gprs 0-13
1001 lg %r14,__LC_GMAP # get gmap pointer 1001 lg %r14,__LC_GMAP # get gmap pointer
1002 ltgr %r14,%r14 1002 ltgr %r14,%r14
1003 jz sie_gmap 1003 jz .Lsie_gmap
1004 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce 1004 lctlg %c1,%c1,__GMAP_ASCE(%r14) # load primary asce
1005sie_gmap: 1005.Lsie_gmap:
1006 lg %r14,__SF_EMPTY(%r15) # get control block pointer 1006 lg %r14,__SF_EMPTY(%r15) # get control block pointer
1007 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now 1007 oi __SIE_PROG0C+3(%r14),1 # we are going into SIE now
1008 tm __SIE_PROG20+3(%r14),1 # last exit... 1008 tm __SIE_PROG20+3(%r14),1 # last exit...
1009 jnz sie_done 1009 jnz .Lsie_done
1010 LPP __SF_EMPTY(%r15) # set guest id 1010 LPP __SF_EMPTY(%r15) # set guest id
1011 sie 0(%r14) 1011 sie 0(%r14)
1012sie_done: 1012.Lsie_done:
1013 LPP __SF_EMPTY+16(%r15) # set host id 1013 LPP __SF_EMPTY+16(%r15) # set host id
1014 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE 1014 ni __SIE_PROG0C+3(%r14),0xfe # no longer in SIE
1015 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce 1015 lctlg %c1,%c1,__LC_USER_ASCE # load primary asce
1016# some program checks are suppressing. C code (e.g. do_protection_exception) 1016# some program checks are suppressing. C code (e.g. do_protection_exception)
1017# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other 1017# will rewind the PSW by the ILC, which is 4 bytes in case of SIE. Other
1018# instructions between sie64a and sie_done should not cause program 1018# instructions between sie64a and .Lsie_done should not cause program
1019# interrupts. So lets use a nop (47 00 00 00) as a landing pad. 1019# interrupts. So lets use a nop (47 00 00 00) as a landing pad.
1020# See also HANDLE_SIE_INTERCEPT 1020# See also HANDLE_SIE_INTERCEPT
1021rewind_pad: 1021.Lrewind_pad:
1022 nop 0 1022 nop 0
1023 .globl sie_exit 1023 .globl sie_exit
1024sie_exit: 1024sie_exit:
@@ -1027,19 +1027,19 @@ sie_exit:
1027 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers 1027 lmg %r6,%r14,__SF_GPRS(%r15) # restore kernel registers
1028 lg %r2,__SF_EMPTY+24(%r15) # return exit reason code 1028 lg %r2,__SF_EMPTY+24(%r15) # return exit reason code
1029 br %r14 1029 br %r14
1030sie_fault: 1030.Lsie_fault:
1031 lghi %r14,-EFAULT 1031 lghi %r14,-EFAULT
1032 stg %r14,__SF_EMPTY+24(%r15) # set exit reason code 1032 stg %r14,__SF_EMPTY+24(%r15) # set exit reason code
1033 j sie_exit 1033 j sie_exit
1034 1034
1035 .align 8 1035 .align 8
1036.Lsie_critical: 1036.Lsie_critical:
1037 .quad sie_gmap 1037 .quad .Lsie_gmap
1038.Lsie_critical_length: 1038.Lsie_critical_length:
1039 .quad sie_done - sie_gmap 1039 .quad .Lsie_done - .Lsie_gmap
1040 1040
1041 EX_TABLE(rewind_pad,sie_fault) 1041 EX_TABLE(.Lrewind_pad,.Lsie_fault)
1042 EX_TABLE(sie_exit,sie_fault) 1042 EX_TABLE(sie_exit,.Lsie_fault)
1043#endif 1043#endif
1044 1044
1045 .section .rodata, "a" 1045 .section .rodata, "a"
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index ca1cabb3a96c..b86bb8823f15 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -7,6 +7,7 @@
7 * Martin Schwidefsky <schwidefsky@de.ibm.com> 7 * Martin Schwidefsky <schwidefsky@de.ibm.com>
8 */ 8 */
9 9
10#include <linux/moduleloader.h>
10#include <linux/hardirq.h> 11#include <linux/hardirq.h>
11#include <linux/uaccess.h> 12#include <linux/uaccess.h>
12#include <linux/ftrace.h> 13#include <linux/ftrace.h>
@@ -15,60 +16,39 @@
15#include <linux/kprobes.h> 16#include <linux/kprobes.h>
16#include <trace/syscall.h> 17#include <trace/syscall.h>
17#include <asm/asm-offsets.h> 18#include <asm/asm-offsets.h>
19#include <asm/cacheflush.h>
18#include "entry.h" 20#include "entry.h"
19 21
20void mcount_replace_code(void);
21void ftrace_disable_code(void);
22void ftrace_enable_insn(void);
23
24/* 22/*
25 * The mcount code looks like this: 23 * The mcount code looks like this:
26 * stg %r14,8(%r15) # offset 0 24 * stg %r14,8(%r15) # offset 0
27 * larl %r1,<&counter> # offset 6 25 * larl %r1,<&counter> # offset 6
28 * brasl %r14,_mcount # offset 12 26 * brasl %r14,_mcount # offset 12
29 * lg %r14,8(%r15) # offset 18 27 * lg %r14,8(%r15) # offset 18
30 * Total length is 24 bytes. The complete mcount block initially gets replaced 28 * Total length is 24 bytes. Only the first instruction will be patched
31 * by ftrace_make_nop. Subsequent calls to ftrace_make_call / ftrace_make_nop 29 * by ftrace_make_call / ftrace_make_nop.
32 * only patch the jg/lg instruction within the block.
33 * Note: we do not patch the first instruction to an unconditional branch,
34 * since that would break kprobes/jprobes. It is easier to leave the larl
35 * instruction in and only modify the second instruction.
36 * The enabled ftrace code block looks like this: 30 * The enabled ftrace code block looks like this:
37 * larl %r0,.+24 # offset 0 31 * > brasl %r0,ftrace_caller # offset 0
38 * > lg %r1,__LC_FTRACE_FUNC # offset 6 32 * larl %r1,<&counter> # offset 6
39 * br %r1 # offset 12 33 * brasl %r14,_mcount # offset 12
40 * brcl 0,0 # offset 14 34 * lg %r14,8(%r15) # offset 18
41 * brc 0,0 # offset 20
42 * The ftrace function gets called with a non-standard C function call ABI 35 * The ftrace function gets called with a non-standard C function call ABI
43 * where r0 contains the return address. It is also expected that the called 36 * where r0 contains the return address. It is also expected that the called
44 * function only clobbers r0 and r1, but restores r2-r15. 37 * function only clobbers r0 and r1, but restores r2-r15.
38 * For module code we can't directly jump to ftrace caller, but need a
39 * trampoline (ftrace_plt), which clobbers also r1.
45 * The return point of the ftrace function has offset 24, so execution 40 * The return point of the ftrace function has offset 24, so execution
46 * continues behind the mcount block. 41 * continues behind the mcount block.
47 * larl %r0,.+24 # offset 0 42 * The disabled ftrace code block looks like this:
48 * > jg .+18 # offset 6 43 * > jg .+24 # offset 0
49 * br %r1 # offset 12 44 * larl %r1,<&counter> # offset 6
50 * brcl 0,0 # offset 14 45 * brasl %r14,_mcount # offset 12
51 * brc 0,0 # offset 20 46 * lg %r14,8(%r15) # offset 18
52 * The jg instruction branches to offset 24 to skip as many instructions 47 * The jg instruction branches to offset 24 to skip as many instructions
53 * as possible. 48 * as possible.
54 */ 49 */
55asm( 50
56 " .align 4\n" 51unsigned long ftrace_plt;
57 "mcount_replace_code:\n"
58 " larl %r0,0f\n"
59 "ftrace_disable_code:\n"
60 " jg 0f\n"
61 " br %r1\n"
62 " brcl 0,0\n"
63 " brc 0,0\n"
64 "0:\n"
65 " .align 4\n"
66 "ftrace_enable_insn:\n"
67 " lg %r1,"__stringify(__LC_FTRACE_FUNC)"\n");
68
69#define MCOUNT_BLOCK_SIZE 24
70#define MCOUNT_INSN_OFFSET 6
71#define FTRACE_INSN_SIZE 6
72 52
73int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, 53int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
74 unsigned long addr) 54 unsigned long addr)
@@ -79,24 +59,62 @@ int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
79int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, 59int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
80 unsigned long addr) 60 unsigned long addr)
81{ 61{
82 /* Initial replacement of the whole mcount block */ 62 struct ftrace_insn insn;
83 if (addr == MCOUNT_ADDR) { 63 unsigned short op;
84 if (probe_kernel_write((void *) rec->ip - MCOUNT_INSN_OFFSET, 64 void *from, *to;
85 mcount_replace_code, 65 size_t size;
86 MCOUNT_BLOCK_SIZE)) 66
87 return -EPERM; 67 ftrace_generate_nop_insn(&insn);
88 return 0; 68 size = sizeof(insn);
69 from = &insn;
70 to = (void *) rec->ip;
71 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
72 return -EFAULT;
73 /*
74 * If we find a breakpoint instruction, a kprobe has been placed
75 * at the beginning of the function. We write the constant
76 * KPROBE_ON_FTRACE_NOP into the remaining four bytes of the original
77 * instruction so that the kprobes handler can execute a nop, if it
78 * reaches this breakpoint.
79 */
80 if (op == BREAKPOINT_INSTRUCTION) {
81 size -= 2;
82 from += 2;
83 to += 2;
84 insn.disp = KPROBE_ON_FTRACE_NOP;
89 } 85 }
90 if (probe_kernel_write((void *) rec->ip, ftrace_disable_code, 86 if (probe_kernel_write(to, from, size))
91 MCOUNT_INSN_SIZE))
92 return -EPERM; 87 return -EPERM;
93 return 0; 88 return 0;
94} 89}
95 90
96int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 91int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
97{ 92{
98 if (probe_kernel_write((void *) rec->ip, ftrace_enable_insn, 93 struct ftrace_insn insn;
99 FTRACE_INSN_SIZE)) 94 unsigned short op;
95 void *from, *to;
96 size_t size;
97
98 ftrace_generate_call_insn(&insn, rec->ip);
99 size = sizeof(insn);
100 from = &insn;
101 to = (void *) rec->ip;
102 if (probe_kernel_read(&op, (void *) rec->ip, sizeof(op)))
103 return -EFAULT;
104 /*
105 * If we find a breakpoint instruction, a kprobe has been placed
106 * at the beginning of the function. We write the constant
107 * KPROBE_ON_FTRACE_CALL into the remaining four bytes of the original
108 * instruction so that the kprobes handler can execute a brasl if it
109 * reaches this breakpoint.
110 */
111 if (op == BREAKPOINT_INSTRUCTION) {
112 size -= 2;
113 from += 2;
114 to += 2;
115 insn.disp = KPROBE_ON_FTRACE_CALL;
116 }
117 if (probe_kernel_write(to, from, size))
100 return -EPERM; 118 return -EPERM;
101 return 0; 119 return 0;
102} 120}
@@ -111,13 +129,30 @@ int __init ftrace_dyn_arch_init(void)
111 return 0; 129 return 0;
112} 130}
113 131
132static int __init ftrace_plt_init(void)
133{
134 unsigned int *ip;
135
136 ftrace_plt = (unsigned long) module_alloc(PAGE_SIZE);
137 if (!ftrace_plt)
138 panic("cannot allocate ftrace plt\n");
139 ip = (unsigned int *) ftrace_plt;
140 ip[0] = 0x0d10e310; /* basr 1,0; lg 1,10(1); br 1 */
141 ip[1] = 0x100a0004;
142 ip[2] = 0x07f10000;
143 ip[3] = FTRACE_ADDR >> 32;
144 ip[4] = FTRACE_ADDR & 0xffffffff;
145 set_memory_ro(ftrace_plt, 1);
146 return 0;
147}
148device_initcall(ftrace_plt_init);
149
114#ifdef CONFIG_FUNCTION_GRAPH_TRACER 150#ifdef CONFIG_FUNCTION_GRAPH_TRACER
115/* 151/*
116 * Hook the return address and push it in the stack of return addresses 152 * Hook the return address and push it in the stack of return addresses
117 * in current thread info. 153 * in current thread info.
118 */ 154 */
119unsigned long __kprobes prepare_ftrace_return(unsigned long parent, 155unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
120 unsigned long ip)
121{ 156{
122 struct ftrace_graph_ent trace; 157 struct ftrace_graph_ent trace;
123 158
@@ -137,6 +172,7 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
137out: 172out:
138 return parent; 173 return parent;
139} 174}
175NOKPROBE_SYMBOL(prepare_ftrace_return);
140 176
141/* 177/*
142 * Patch the kernel code at ftrace_graph_caller location. The instruction 178 * Patch the kernel code at ftrace_graph_caller location. The instruction
diff --git a/arch/s390/kernel/idle.c b/arch/s390/kernel/idle.c
index 7559f1beab29..7a55c29b0b33 100644
--- a/arch/s390/kernel/idle.c
+++ b/arch/s390/kernel/idle.c
@@ -19,7 +19,7 @@
19 19
20static DEFINE_PER_CPU(struct s390_idle_data, s390_idle); 20static DEFINE_PER_CPU(struct s390_idle_data, s390_idle);
21 21
22void __kprobes enabled_wait(void) 22void enabled_wait(void)
23{ 23{
24 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle); 24 struct s390_idle_data *idle = this_cpu_ptr(&s390_idle);
25 unsigned long long idle_time; 25 unsigned long long idle_time;
@@ -35,31 +35,32 @@ void __kprobes enabled_wait(void)
35 /* Call the assembler magic in entry.S */ 35 /* Call the assembler magic in entry.S */
36 psw_idle(idle, psw_mask); 36 psw_idle(idle, psw_mask);
37 37
38 trace_hardirqs_off();
39
38 /* Account time spent with enabled wait psw loaded as idle time. */ 40 /* Account time spent with enabled wait psw loaded as idle time. */
39 idle->sequence++; 41 write_seqcount_begin(&idle->seqcount);
40 smp_wmb();
41 idle_time = idle->clock_idle_exit - idle->clock_idle_enter; 42 idle_time = idle->clock_idle_exit - idle->clock_idle_enter;
42 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL; 43 idle->clock_idle_enter = idle->clock_idle_exit = 0ULL;
43 idle->idle_time += idle_time; 44 idle->idle_time += idle_time;
44 idle->idle_count++; 45 idle->idle_count++;
45 account_idle_time(idle_time); 46 account_idle_time(idle_time);
46 smp_wmb(); 47 write_seqcount_end(&idle->seqcount);
47 idle->sequence++;
48} 48}
49NOKPROBE_SYMBOL(enabled_wait);
49 50
50static ssize_t show_idle_count(struct device *dev, 51static ssize_t show_idle_count(struct device *dev,
51 struct device_attribute *attr, char *buf) 52 struct device_attribute *attr, char *buf)
52{ 53{
53 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 54 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
54 unsigned long long idle_count; 55 unsigned long long idle_count;
55 unsigned int sequence; 56 unsigned int seq;
56 57
57 do { 58 do {
58 sequence = ACCESS_ONCE(idle->sequence); 59 seq = read_seqcount_begin(&idle->seqcount);
59 idle_count = ACCESS_ONCE(idle->idle_count); 60 idle_count = ACCESS_ONCE(idle->idle_count);
60 if (ACCESS_ONCE(idle->clock_idle_enter)) 61 if (ACCESS_ONCE(idle->clock_idle_enter))
61 idle_count++; 62 idle_count++;
62 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); 63 } while (read_seqcount_retry(&idle->seqcount, seq));
63 return sprintf(buf, "%llu\n", idle_count); 64 return sprintf(buf, "%llu\n", idle_count);
64} 65}
65DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL); 66DEVICE_ATTR(idle_count, 0444, show_idle_count, NULL);
@@ -69,15 +70,15 @@ static ssize_t show_idle_time(struct device *dev,
69{ 70{
70 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id); 71 struct s390_idle_data *idle = &per_cpu(s390_idle, dev->id);
71 unsigned long long now, idle_time, idle_enter, idle_exit; 72 unsigned long long now, idle_time, idle_enter, idle_exit;
72 unsigned int sequence; 73 unsigned int seq;
73 74
74 do { 75 do {
75 now = get_tod_clock(); 76 now = get_tod_clock();
76 sequence = ACCESS_ONCE(idle->sequence); 77 seq = read_seqcount_begin(&idle->seqcount);
77 idle_time = ACCESS_ONCE(idle->idle_time); 78 idle_time = ACCESS_ONCE(idle->idle_time);
78 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 79 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
79 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 80 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
80 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); 81 } while (read_seqcount_retry(&idle->seqcount, seq));
81 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0; 82 idle_time += idle_enter ? ((idle_exit ? : now) - idle_enter) : 0;
82 return sprintf(buf, "%llu\n", idle_time >> 12); 83 return sprintf(buf, "%llu\n", idle_time >> 12);
83} 84}
@@ -87,14 +88,14 @@ cputime64_t arch_cpu_idle_time(int cpu)
87{ 88{
88 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu); 89 struct s390_idle_data *idle = &per_cpu(s390_idle, cpu);
89 unsigned long long now, idle_enter, idle_exit; 90 unsigned long long now, idle_enter, idle_exit;
90 unsigned int sequence; 91 unsigned int seq;
91 92
92 do { 93 do {
93 now = get_tod_clock(); 94 now = get_tod_clock();
94 sequence = ACCESS_ONCE(idle->sequence); 95 seq = read_seqcount_begin(&idle->seqcount);
95 idle_enter = ACCESS_ONCE(idle->clock_idle_enter); 96 idle_enter = ACCESS_ONCE(idle->clock_idle_enter);
96 idle_exit = ACCESS_ONCE(idle->clock_idle_exit); 97 idle_exit = ACCESS_ONCE(idle->clock_idle_exit);
97 } while ((sequence & 1) || (ACCESS_ONCE(idle->sequence) != sequence)); 98 } while (read_seqcount_retry(&idle->seqcount, seq));
98 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0; 99 return idle_enter ? ((idle_exit ?: now) - idle_enter) : 0;
99} 100}
100 101
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 1b8a38ab7861..f238720690f3 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -127,13 +127,10 @@ int show_interrupts(struct seq_file *p, void *v)
127 for_each_online_cpu(cpu) 127 for_each_online_cpu(cpu)
128 seq_printf(p, "CPU%d ", cpu); 128 seq_printf(p, "CPU%d ", cpu);
129 seq_putc(p, '\n'); 129 seq_putc(p, '\n');
130 goto out;
131 } 130 }
132 if (index < NR_IRQS) { 131 if (index < NR_IRQS) {
133 if (index >= NR_IRQS_BASE) 132 if (index >= NR_IRQS_BASE)
134 goto out; 133 goto out;
135 /* Adjust index to process irqclass_main_desc array entries */
136 index--;
137 seq_printf(p, "%s: ", irqclass_main_desc[index].name); 134 seq_printf(p, "%s: ", irqclass_main_desc[index].name);
138 irq = irqclass_main_desc[index].irq; 135 irq = irqclass_main_desc[index].irq;
139 for_each_online_cpu(cpu) 136 for_each_online_cpu(cpu)
@@ -158,7 +155,7 @@ out:
158 155
159unsigned int arch_dynirq_lower_bound(unsigned int from) 156unsigned int arch_dynirq_lower_bound(unsigned int from)
160{ 157{
161 return from < THIN_INTERRUPT ? THIN_INTERRUPT : from; 158 return from < NR_IRQS_BASE ? NR_IRQS_BASE : from;
162} 159}
163 160
164/* 161/*
diff --git a/arch/s390/kernel/kprobes.c b/arch/s390/kernel/kprobes.c
index 014d4729b134..1e4c710dfb92 100644
--- a/arch/s390/kernel/kprobes.c
+++ b/arch/s390/kernel/kprobes.c
@@ -29,6 +29,7 @@
29#include <linux/module.h> 29#include <linux/module.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31#include <linux/hardirq.h> 31#include <linux/hardirq.h>
32#include <linux/ftrace.h>
32#include <asm/cacheflush.h> 33#include <asm/cacheflush.h>
33#include <asm/sections.h> 34#include <asm/sections.h>
34#include <asm/dis.h> 35#include <asm/dis.h>
@@ -58,12 +59,23 @@ struct kprobe_insn_cache kprobe_dmainsn_slots = {
58 .insn_size = MAX_INSN_SIZE, 59 .insn_size = MAX_INSN_SIZE,
59}; 60};
60 61
61static void __kprobes copy_instruction(struct kprobe *p) 62static void copy_instruction(struct kprobe *p)
62{ 63{
64 unsigned long ip = (unsigned long) p->addr;
63 s64 disp, new_disp; 65 s64 disp, new_disp;
64 u64 addr, new_addr; 66 u64 addr, new_addr;
65 67
66 memcpy(p->ainsn.insn, p->addr, insn_length(p->opcode >> 8)); 68 if (ftrace_location(ip) == ip) {
69 /*
70 * If kprobes patches the instruction that is morphed by
71 * ftrace make sure that kprobes always sees the branch
72 * "jg .+24" that skips the mcount block
73 */
74 ftrace_generate_nop_insn((struct ftrace_insn *)p->ainsn.insn);
75 p->ainsn.is_ftrace_insn = 1;
76 } else
77 memcpy(p->ainsn.insn, p->addr, insn_length(*p->addr >> 8));
78 p->opcode = p->ainsn.insn[0];
67 if (!probe_is_insn_relative_long(p->ainsn.insn)) 79 if (!probe_is_insn_relative_long(p->ainsn.insn))
68 return; 80 return;
69 /* 81 /*
@@ -79,25 +91,14 @@ static void __kprobes copy_instruction(struct kprobe *p)
79 new_disp = ((addr + (disp * 2)) - new_addr) / 2; 91 new_disp = ((addr + (disp * 2)) - new_addr) / 2;
80 *(s32 *)&p->ainsn.insn[1] = new_disp; 92 *(s32 *)&p->ainsn.insn[1] = new_disp;
81} 93}
94NOKPROBE_SYMBOL(copy_instruction);
82 95
83static inline int is_kernel_addr(void *addr) 96static inline int is_kernel_addr(void *addr)
84{ 97{
85 return addr < (void *)_end; 98 return addr < (void *)_end;
86} 99}
87 100
88static inline int is_module_addr(void *addr) 101static int s390_get_insn_slot(struct kprobe *p)
89{
90#ifdef CONFIG_64BIT
91 BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
92 if (addr < (void *)MODULES_VADDR)
93 return 0;
94 if (addr > (void *)MODULES_END)
95 return 0;
96#endif
97 return 1;
98}
99
100static int __kprobes s390_get_insn_slot(struct kprobe *p)
101{ 102{
102 /* 103 /*
103 * Get an insn slot that is within the same 2GB area like the original 104 * Get an insn slot that is within the same 2GB area like the original
@@ -111,8 +112,9 @@ static int __kprobes s390_get_insn_slot(struct kprobe *p)
111 p->ainsn.insn = get_insn_slot(); 112 p->ainsn.insn = get_insn_slot();
112 return p->ainsn.insn ? 0 : -ENOMEM; 113 return p->ainsn.insn ? 0 : -ENOMEM;
113} 114}
115NOKPROBE_SYMBOL(s390_get_insn_slot);
114 116
115static void __kprobes s390_free_insn_slot(struct kprobe *p) 117static void s390_free_insn_slot(struct kprobe *p)
116{ 118{
117 if (!p->ainsn.insn) 119 if (!p->ainsn.insn)
118 return; 120 return;
@@ -122,8 +124,9 @@ static void __kprobes s390_free_insn_slot(struct kprobe *p)
122 free_insn_slot(p->ainsn.insn, 0); 124 free_insn_slot(p->ainsn.insn, 0);
123 p->ainsn.insn = NULL; 125 p->ainsn.insn = NULL;
124} 126}
127NOKPROBE_SYMBOL(s390_free_insn_slot);
125 128
126int __kprobes arch_prepare_kprobe(struct kprobe *p) 129int arch_prepare_kprobe(struct kprobe *p)
127{ 130{
128 if ((unsigned long) p->addr & 0x01) 131 if ((unsigned long) p->addr & 0x01)
129 return -EINVAL; 132 return -EINVAL;
@@ -132,54 +135,79 @@ int __kprobes arch_prepare_kprobe(struct kprobe *p)
132 return -EINVAL; 135 return -EINVAL;
133 if (s390_get_insn_slot(p)) 136 if (s390_get_insn_slot(p))
134 return -ENOMEM; 137 return -ENOMEM;
135 p->opcode = *p->addr;
136 copy_instruction(p); 138 copy_instruction(p);
137 return 0; 139 return 0;
138} 140}
141NOKPROBE_SYMBOL(arch_prepare_kprobe);
139 142
140struct ins_replace_args { 143int arch_check_ftrace_location(struct kprobe *p)
141 kprobe_opcode_t *ptr; 144{
142 kprobe_opcode_t opcode; 145 return 0;
146}
147
148struct swap_insn_args {
149 struct kprobe *p;
150 unsigned int arm_kprobe : 1;
143}; 151};
144 152
145static int __kprobes swap_instruction(void *aref) 153static int swap_instruction(void *data)
146{ 154{
147 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 155 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
148 unsigned long status = kcb->kprobe_status; 156 unsigned long status = kcb->kprobe_status;
149 struct ins_replace_args *args = aref; 157 struct swap_insn_args *args = data;
150 158 struct ftrace_insn new_insn, *insn;
159 struct kprobe *p = args->p;
160 size_t len;
161
162 new_insn.opc = args->arm_kprobe ? BREAKPOINT_INSTRUCTION : p->opcode;
163 len = sizeof(new_insn.opc);
164 if (!p->ainsn.is_ftrace_insn)
165 goto skip_ftrace;
166 len = sizeof(new_insn);
167 insn = (struct ftrace_insn *) p->addr;
168 if (args->arm_kprobe) {
169 if (is_ftrace_nop(insn))
170 new_insn.disp = KPROBE_ON_FTRACE_NOP;
171 else
172 new_insn.disp = KPROBE_ON_FTRACE_CALL;
173 } else {
174 ftrace_generate_call_insn(&new_insn, (unsigned long)p->addr);
175 if (insn->disp == KPROBE_ON_FTRACE_NOP)
176 ftrace_generate_nop_insn(&new_insn);
177 }
178skip_ftrace:
151 kcb->kprobe_status = KPROBE_SWAP_INST; 179 kcb->kprobe_status = KPROBE_SWAP_INST;
152 probe_kernel_write(args->ptr, &args->opcode, sizeof(args->opcode)); 180 probe_kernel_write(p->addr, &new_insn, len);
153 kcb->kprobe_status = status; 181 kcb->kprobe_status = status;
154 return 0; 182 return 0;
155} 183}
184NOKPROBE_SYMBOL(swap_instruction);
156 185
157void __kprobes arch_arm_kprobe(struct kprobe *p) 186void arch_arm_kprobe(struct kprobe *p)
158{ 187{
159 struct ins_replace_args args; 188 struct swap_insn_args args = {.p = p, .arm_kprobe = 1};
160 189
161 args.ptr = p->addr;
162 args.opcode = BREAKPOINT_INSTRUCTION;
163 stop_machine(swap_instruction, &args, NULL); 190 stop_machine(swap_instruction, &args, NULL);
164} 191}
192NOKPROBE_SYMBOL(arch_arm_kprobe);
165 193
166void __kprobes arch_disarm_kprobe(struct kprobe *p) 194void arch_disarm_kprobe(struct kprobe *p)
167{ 195{
168 struct ins_replace_args args; 196 struct swap_insn_args args = {.p = p, .arm_kprobe = 0};
169 197
170 args.ptr = p->addr;
171 args.opcode = p->opcode;
172 stop_machine(swap_instruction, &args, NULL); 198 stop_machine(swap_instruction, &args, NULL);
173} 199}
200NOKPROBE_SYMBOL(arch_disarm_kprobe);
174 201
175void __kprobes arch_remove_kprobe(struct kprobe *p) 202void arch_remove_kprobe(struct kprobe *p)
176{ 203{
177 s390_free_insn_slot(p); 204 s390_free_insn_slot(p);
178} 205}
206NOKPROBE_SYMBOL(arch_remove_kprobe);
179 207
180static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, 208static void enable_singlestep(struct kprobe_ctlblk *kcb,
181 struct pt_regs *regs, 209 struct pt_regs *regs,
182 unsigned long ip) 210 unsigned long ip)
183{ 211{
184 struct per_regs per_kprobe; 212 struct per_regs per_kprobe;
185 213
@@ -199,10 +227,11 @@ static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
199 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT); 227 regs->psw.mask &= ~(PSW_MASK_IO | PSW_MASK_EXT);
200 regs->psw.addr = ip | PSW_ADDR_AMODE; 228 regs->psw.addr = ip | PSW_ADDR_AMODE;
201} 229}
230NOKPROBE_SYMBOL(enable_singlestep);
202 231
203static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb, 232static void disable_singlestep(struct kprobe_ctlblk *kcb,
204 struct pt_regs *regs, 233 struct pt_regs *regs,
205 unsigned long ip) 234 unsigned long ip)
206{ 235{
207 /* Restore control regs and psw mask, set new psw address */ 236 /* Restore control regs and psw mask, set new psw address */
208 __ctl_load(kcb->kprobe_saved_ctl, 9, 11); 237 __ctl_load(kcb->kprobe_saved_ctl, 9, 11);
@@ -210,41 +239,43 @@ static void __kprobes disable_singlestep(struct kprobe_ctlblk *kcb,
210 regs->psw.mask |= kcb->kprobe_saved_imask; 239 regs->psw.mask |= kcb->kprobe_saved_imask;
211 regs->psw.addr = ip | PSW_ADDR_AMODE; 240 regs->psw.addr = ip | PSW_ADDR_AMODE;
212} 241}
242NOKPROBE_SYMBOL(disable_singlestep);
213 243
214/* 244/*
215 * Activate a kprobe by storing its pointer to current_kprobe. The 245 * Activate a kprobe by storing its pointer to current_kprobe. The
216 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to 246 * previous kprobe is stored in kcb->prev_kprobe. A stack of up to
217 * two kprobes can be active, see KPROBE_REENTER. 247 * two kprobes can be active, see KPROBE_REENTER.
218 */ 248 */
219static void __kprobes push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p) 249static void push_kprobe(struct kprobe_ctlblk *kcb, struct kprobe *p)
220{ 250{
221 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe); 251 kcb->prev_kprobe.kp = __this_cpu_read(current_kprobe);
222 kcb->prev_kprobe.status = kcb->kprobe_status; 252 kcb->prev_kprobe.status = kcb->kprobe_status;
223 __this_cpu_write(current_kprobe, p); 253 __this_cpu_write(current_kprobe, p);
224} 254}
255NOKPROBE_SYMBOL(push_kprobe);
225 256
226/* 257/*
227 * Deactivate a kprobe by backing up to the previous state. If the 258 * Deactivate a kprobe by backing up to the previous state. If the
228 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL, 259 * current state is KPROBE_REENTER prev_kprobe.kp will be non-NULL,
229 * for any other state prev_kprobe.kp will be NULL. 260 * for any other state prev_kprobe.kp will be NULL.
230 */ 261 */
231static void __kprobes pop_kprobe(struct kprobe_ctlblk *kcb) 262static void pop_kprobe(struct kprobe_ctlblk *kcb)
232{ 263{
233 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); 264 __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
234 kcb->kprobe_status = kcb->prev_kprobe.status; 265 kcb->kprobe_status = kcb->prev_kprobe.status;
235} 266}
267NOKPROBE_SYMBOL(pop_kprobe);
236 268
237void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, 269void arch_prepare_kretprobe(struct kretprobe_instance *ri, struct pt_regs *regs)
238 struct pt_regs *regs)
239{ 270{
240 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14]; 271 ri->ret_addr = (kprobe_opcode_t *) regs->gprs[14];
241 272
242 /* Replace the return addr with trampoline addr */ 273 /* Replace the return addr with trampoline addr */
243 regs->gprs[14] = (unsigned long) &kretprobe_trampoline; 274 regs->gprs[14] = (unsigned long) &kretprobe_trampoline;
244} 275}
276NOKPROBE_SYMBOL(arch_prepare_kretprobe);
245 277
246static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb, 278static void kprobe_reenter_check(struct kprobe_ctlblk *kcb, struct kprobe *p)
247 struct kprobe *p)
248{ 279{
249 switch (kcb->kprobe_status) { 280 switch (kcb->kprobe_status) {
250 case KPROBE_HIT_SSDONE: 281 case KPROBE_HIT_SSDONE:
@@ -264,8 +295,9 @@ static void __kprobes kprobe_reenter_check(struct kprobe_ctlblk *kcb,
264 BUG(); 295 BUG();
265 } 296 }
266} 297}
298NOKPROBE_SYMBOL(kprobe_reenter_check);
267 299
268static int __kprobes kprobe_handler(struct pt_regs *regs) 300static int kprobe_handler(struct pt_regs *regs)
269{ 301{
270 struct kprobe_ctlblk *kcb; 302 struct kprobe_ctlblk *kcb;
271 struct kprobe *p; 303 struct kprobe *p;
@@ -339,6 +371,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
339 preempt_enable_no_resched(); 371 preempt_enable_no_resched();
340 return 0; 372 return 0;
341} 373}
374NOKPROBE_SYMBOL(kprobe_handler);
342 375
343/* 376/*
344 * Function return probe trampoline: 377 * Function return probe trampoline:
@@ -355,8 +388,7 @@ static void __used kretprobe_trampoline_holder(void)
355/* 388/*
356 * Called when the probe at kretprobe trampoline is hit 389 * Called when the probe at kretprobe trampoline is hit
357 */ 390 */
358static int __kprobes trampoline_probe_handler(struct kprobe *p, 391static int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
359 struct pt_regs *regs)
360{ 392{
361 struct kretprobe_instance *ri; 393 struct kretprobe_instance *ri;
362 struct hlist_head *head, empty_rp; 394 struct hlist_head *head, empty_rp;
@@ -444,6 +476,7 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
444 */ 476 */
445 return 1; 477 return 1;
446} 478}
479NOKPROBE_SYMBOL(trampoline_probe_handler);
447 480
448/* 481/*
449 * Called after single-stepping. p->addr is the address of the 482 * Called after single-stepping. p->addr is the address of the
@@ -453,12 +486,30 @@ static int __kprobes trampoline_probe_handler(struct kprobe *p,
453 * single-stepped a copy of the instruction. The address of this 486 * single-stepped a copy of the instruction. The address of this
454 * copy is p->ainsn.insn. 487 * copy is p->ainsn.insn.
455 */ 488 */
456static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs) 489static void resume_execution(struct kprobe *p, struct pt_regs *regs)
457{ 490{
458 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 491 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
459 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN; 492 unsigned long ip = regs->psw.addr & PSW_ADDR_INSN;
460 int fixup = probe_get_fixup_type(p->ainsn.insn); 493 int fixup = probe_get_fixup_type(p->ainsn.insn);
461 494
495 /* Check if the kprobes location is an enabled ftrace caller */
496 if (p->ainsn.is_ftrace_insn) {
497 struct ftrace_insn *insn = (struct ftrace_insn *) p->addr;
498 struct ftrace_insn call_insn;
499
500 ftrace_generate_call_insn(&call_insn, (unsigned long) p->addr);
501 /*
502 * A kprobe on an enabled ftrace call site actually single
503 * stepped an unconditional branch (ftrace nop equivalent).
504 * Now we need to fixup things and pretend that a brasl r0,...
505 * was executed instead.
506 */
507 if (insn->disp == KPROBE_ON_FTRACE_CALL) {
508 ip += call_insn.disp * 2 - MCOUNT_INSN_SIZE;
509 regs->gprs[0] = (unsigned long)p->addr + sizeof(*insn);
510 }
511 }
512
462 if (fixup & FIXUP_PSW_NORMAL) 513 if (fixup & FIXUP_PSW_NORMAL)
463 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn; 514 ip += (unsigned long) p->addr - (unsigned long) p->ainsn.insn;
464 515
@@ -476,8 +527,9 @@ static void __kprobes resume_execution(struct kprobe *p, struct pt_regs *regs)
476 527
477 disable_singlestep(kcb, regs, ip); 528 disable_singlestep(kcb, regs, ip);
478} 529}
530NOKPROBE_SYMBOL(resume_execution);
479 531
480static int __kprobes post_kprobe_handler(struct pt_regs *regs) 532static int post_kprobe_handler(struct pt_regs *regs)
481{ 533{
482 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 534 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
483 struct kprobe *p = kprobe_running(); 535 struct kprobe *p = kprobe_running();
@@ -504,8 +556,9 @@ static int __kprobes post_kprobe_handler(struct pt_regs *regs)
504 556
505 return 1; 557 return 1;
506} 558}
559NOKPROBE_SYMBOL(post_kprobe_handler);
507 560
508static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr) 561static int kprobe_trap_handler(struct pt_regs *regs, int trapnr)
509{ 562{
510 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 563 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
511 struct kprobe *p = kprobe_running(); 564 struct kprobe *p = kprobe_running();
@@ -567,8 +620,9 @@ static int __kprobes kprobe_trap_handler(struct pt_regs *regs, int trapnr)
567 } 620 }
568 return 0; 621 return 0;
569} 622}
623NOKPROBE_SYMBOL(kprobe_trap_handler);
570 624
571int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr) 625int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
572{ 626{
573 int ret; 627 int ret;
574 628
@@ -579,12 +633,13 @@ int __kprobes kprobe_fault_handler(struct pt_regs *regs, int trapnr)
579 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER); 633 local_irq_restore(regs->psw.mask & ~PSW_MASK_PER);
580 return ret; 634 return ret;
581} 635}
636NOKPROBE_SYMBOL(kprobe_fault_handler);
582 637
583/* 638/*
584 * Wrapper routine to for handling exceptions. 639 * Wrapper routine to for handling exceptions.
585 */ 640 */
586int __kprobes kprobe_exceptions_notify(struct notifier_block *self, 641int kprobe_exceptions_notify(struct notifier_block *self,
587 unsigned long val, void *data) 642 unsigned long val, void *data)
588{ 643{
589 struct die_args *args = (struct die_args *) data; 644 struct die_args *args = (struct die_args *) data;
590 struct pt_regs *regs = args->regs; 645 struct pt_regs *regs = args->regs;
@@ -616,8 +671,9 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
616 671
617 return ret; 672 return ret;
618} 673}
674NOKPROBE_SYMBOL(kprobe_exceptions_notify);
619 675
620int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 676int setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
621{ 677{
622 struct jprobe *jp = container_of(p, struct jprobe, kp); 678 struct jprobe *jp = container_of(p, struct jprobe, kp);
623 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 679 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
@@ -635,13 +691,15 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
635 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack)); 691 memcpy(kcb->jprobes_stack, (void *) stack, MIN_STACK_SIZE(stack));
636 return 1; 692 return 1;
637} 693}
694NOKPROBE_SYMBOL(setjmp_pre_handler);
638 695
639void __kprobes jprobe_return(void) 696void jprobe_return(void)
640{ 697{
641 asm volatile(".word 0x0002"); 698 asm volatile(".word 0x0002");
642} 699}
700NOKPROBE_SYMBOL(jprobe_return);
643 701
644int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) 702int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
645{ 703{
646 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); 704 struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
647 unsigned long stack; 705 unsigned long stack;
@@ -655,6 +713,7 @@ int __kprobes longjmp_break_handler(struct kprobe *p, struct pt_regs *regs)
655 preempt_enable_no_resched(); 713 preempt_enable_no_resched();
656 return 1; 714 return 1;
657} 715}
716NOKPROBE_SYMBOL(longjmp_break_handler);
658 717
659static struct kprobe trampoline = { 718static struct kprobe trampoline = {
660 .addr = (kprobe_opcode_t *) &kretprobe_trampoline, 719 .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
@@ -666,7 +725,8 @@ int __init arch_init_kprobes(void)
666 return register_kprobe(&trampoline); 725 return register_kprobe(&trampoline);
667} 726}
668 727
669int __kprobes arch_trampoline_kprobe(struct kprobe *p) 728int arch_trampoline_kprobe(struct kprobe *p)
670{ 729{
671 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline; 730 return p->addr == (kprobe_opcode_t *) &kretprobe_trampoline;
672} 731}
732NOKPROBE_SYMBOL(arch_trampoline_kprobe);
diff --git a/arch/s390/kernel/mcount.S b/arch/s390/kernel/mcount.S
index 4300ea374826..b6dfc5bfcb89 100644
--- a/arch/s390/kernel/mcount.S
+++ b/arch/s390/kernel/mcount.S
@@ -27,6 +27,7 @@ ENTRY(ftrace_caller)
27 .globl ftrace_regs_caller 27 .globl ftrace_regs_caller
28 .set ftrace_regs_caller,ftrace_caller 28 .set ftrace_regs_caller,ftrace_caller
29 lgr %r1,%r15 29 lgr %r1,%r15
30 aghi %r0,MCOUNT_RETURN_FIXUP
30 aghi %r15,-STACK_FRAME_SIZE 31 aghi %r15,-STACK_FRAME_SIZE
31 stg %r1,__SF_BACKCHAIN(%r15) 32 stg %r1,__SF_BACKCHAIN(%r15)
32 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15) 33 stg %r1,(STACK_PTREGS_GPRS+15*8)(%r15)
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index b878f12a9597..c3f8d157cb0d 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1383,7 +1383,6 @@ static int cpumsf_pmu_add(struct perf_event *event, int flags)
1383 cpuhw->lsctl.ed = 1; 1383 cpuhw->lsctl.ed = 1;
1384 1384
1385 /* Set in_use flag and store event */ 1385 /* Set in_use flag and store event */
1386 event->hw.idx = 0; /* only one sampling event per CPU supported */
1387 cpuhw->event = event; 1386 cpuhw->event = event;
1388 cpuhw->flags |= PMU_F_IN_USE; 1387 cpuhw->flags |= PMU_F_IN_USE;
1389 1388
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index ed84cc224899..aa7a83948c7b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -61,7 +61,7 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
61 return sf->gprs[8]; 61 return sf->gprs[8];
62} 62}
63 63
64extern void __kprobes kernel_thread_starter(void); 64extern void kernel_thread_starter(void);
65 65
66/* 66/*
67 * Free current thread data structures etc.. 67 * Free current thread data structures etc..
@@ -153,6 +153,7 @@ int copy_thread(unsigned long clone_flags, unsigned long new_stackp,
153 save_fp_ctl(&p->thread.fp_regs.fpc); 153 save_fp_ctl(&p->thread.fp_regs.fpc);
154 save_fp_regs(p->thread.fp_regs.fprs); 154 save_fp_regs(p->thread.fp_regs.fprs);
155 p->thread.fp_regs.pad = 0; 155 p->thread.fp_regs.pad = 0;
156 p->thread.vxrs = NULL;
156 /* Set a new TLS ? */ 157 /* Set a new TLS ? */
157 if (clone_flags & CLONE_SETTLS) { 158 if (clone_flags & CLONE_SETTLS) {
158 unsigned long tls = frame->childregs.gprs[6]; 159 unsigned long tls = frame->childregs.gprs[6];
diff --git a/arch/s390/kernel/ptrace.c b/arch/s390/kernel/ptrace.c
index 99a567b70d16..eabfb4594517 100644
--- a/arch/s390/kernel/ptrace.c
+++ b/arch/s390/kernel/ptrace.c
@@ -248,14 +248,27 @@ static unsigned long __peek_user(struct task_struct *child, addr_t addr)
248 */ 248 */
249 tmp = 0; 249 tmp = 0;
250 250
251 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
252 /*
253 * floating point control reg. is in the thread structure
254 */
255 tmp = child->thread.fp_regs.fpc;
256 tmp <<= BITS_PER_LONG - 32;
257
251 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 258 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
252 /* 259 /*
253 * floating point regs. are stored in the thread structure 260 * floating point regs. are either in child->thread.fp_regs
261 * or the child->thread.vxrs array
254 */ 262 */
255 offset = addr - (addr_t) &dummy->regs.fp_regs; 263 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
256 tmp = *(addr_t *)((addr_t) &child->thread.fp_regs + offset); 264#ifdef CONFIG_64BIT
257 if (addr == (addr_t) &dummy->regs.fp_regs.fpc) 265 if (child->thread.vxrs)
258 tmp <<= BITS_PER_LONG - 32; 266 tmp = *(addr_t *)
267 ((addr_t) child->thread.vxrs + 2*offset);
268 else
269#endif
270 tmp = *(addr_t *)
271 ((addr_t) &child->thread.fp_regs.fprs + offset);
259 272
260 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 273 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
261 /* 274 /*
@@ -383,16 +396,29 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
383 */ 396 */
384 return 0; 397 return 0;
385 398
399 } else if (addr == (addr_t) &dummy->regs.fp_regs.fpc) {
400 /*
401 * floating point control reg. is in the thread structure
402 */
403 if ((unsigned int) data != 0 ||
404 test_fp_ctl(data >> (BITS_PER_LONG - 32)))
405 return -EINVAL;
406 child->thread.fp_regs.fpc = data >> (BITS_PER_LONG - 32);
407
386 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) { 408 } else if (addr < (addr_t) (&dummy->regs.fp_regs + 1)) {
387 /* 409 /*
388 * floating point regs. are stored in the thread structure 410 * floating point regs. are either in child->thread.fp_regs
411 * or the child->thread.vxrs array
389 */ 412 */
390 if (addr == (addr_t) &dummy->regs.fp_regs.fpc) 413 offset = addr - (addr_t) &dummy->regs.fp_regs.fprs;
391 if ((unsigned int) data != 0 || 414#ifdef CONFIG_64BIT
392 test_fp_ctl(data >> (BITS_PER_LONG - 32))) 415 if (child->thread.vxrs)
393 return -EINVAL; 416 *(addr_t *)((addr_t)
394 offset = addr - (addr_t) &dummy->regs.fp_regs; 417 child->thread.vxrs + 2*offset) = data;
395 *(addr_t *)((addr_t) &child->thread.fp_regs + offset) = data; 418 else
419#endif
420 *(addr_t *)((addr_t)
421 &child->thread.fp_regs.fprs + offset) = data;
396 422
397 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) { 423 } else if (addr < (addr_t) (&dummy->regs.per_info + 1)) {
398 /* 424 /*
@@ -611,12 +637,26 @@ static u32 __peek_user_compat(struct task_struct *child, addr_t addr)
611 */ 637 */
612 tmp = 0; 638 tmp = 0;
613 639
640 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
641 /*
642 * floating point control reg. is in the thread structure
643 */
644 tmp = child->thread.fp_regs.fpc;
645
614 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 646 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
615 /* 647 /*
616 * floating point regs. are stored in the thread structure 648 * floating point regs. are either in child->thread.fp_regs
649 * or the child->thread.vxrs array
617 */ 650 */
618 offset = addr - (addr_t) &dummy32->regs.fp_regs; 651 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
619 tmp = *(__u32 *)((addr_t) &child->thread.fp_regs + offset); 652#ifdef CONFIG_64BIT
653 if (child->thread.vxrs)
654 tmp = *(__u32 *)
655 ((addr_t) child->thread.vxrs + 2*offset);
656 else
657#endif
658 tmp = *(__u32 *)
659 ((addr_t) &child->thread.fp_regs.fprs + offset);
620 660
621 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 661 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
622 /* 662 /*
@@ -722,15 +762,28 @@ static int __poke_user_compat(struct task_struct *child,
722 */ 762 */
723 return 0; 763 return 0;
724 764
725 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) { 765 } else if (addr == (addr_t) &dummy32->regs.fp_regs.fpc) {
726 /* 766 /*
727 * floating point regs. are stored in the thread structure 767 * floating point control reg. is in the thread structure
728 */ 768 */
729 if (addr == (addr_t) &dummy32->regs.fp_regs.fpc && 769 if (test_fp_ctl(tmp))
730 test_fp_ctl(tmp))
731 return -EINVAL; 770 return -EINVAL;
732 offset = addr - (addr_t) &dummy32->regs.fp_regs; 771 child->thread.fp_regs.fpc = data;
733 *(__u32 *)((addr_t) &child->thread.fp_regs + offset) = tmp; 772
773 } else if (addr < (addr_t) (&dummy32->regs.fp_regs + 1)) {
774 /*
775 * floating point regs. are either in child->thread.fp_regs
776 * or the child->thread.vxrs array
777 */
778 offset = addr - (addr_t) &dummy32->regs.fp_regs.fprs;
779#ifdef CONFIG_64BIT
780 if (child->thread.vxrs)
781 *(__u32 *)((addr_t)
782 child->thread.vxrs + 2*offset) = tmp;
783 else
784#endif
785 *(__u32 *)((addr_t)
786 &child->thread.fp_regs.fprs + offset) = tmp;
734 787
735 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) { 788 } else if (addr < (addr_t) (&dummy32->regs.per_info + 1)) {
736 /* 789 /*
@@ -1038,12 +1091,6 @@ static int s390_tdb_set(struct task_struct *target,
1038 return 0; 1091 return 0;
1039} 1092}
1040 1093
1041static int s390_vxrs_active(struct task_struct *target,
1042 const struct user_regset *regset)
1043{
1044 return !!target->thread.vxrs;
1045}
1046
1047static int s390_vxrs_low_get(struct task_struct *target, 1094static int s390_vxrs_low_get(struct task_struct *target,
1048 const struct user_regset *regset, 1095 const struct user_regset *regset,
1049 unsigned int pos, unsigned int count, 1096 unsigned int pos, unsigned int count,
@@ -1052,6 +1099,8 @@ static int s390_vxrs_low_get(struct task_struct *target,
1052 __u64 vxrs[__NUM_VXRS_LOW]; 1099 __u64 vxrs[__NUM_VXRS_LOW];
1053 int i; 1100 int i;
1054 1101
1102 if (!MACHINE_HAS_VX)
1103 return -ENODEV;
1055 if (target->thread.vxrs) { 1104 if (target->thread.vxrs) {
1056 if (target == current) 1105 if (target == current)
1057 save_vx_regs(target->thread.vxrs); 1106 save_vx_regs(target->thread.vxrs);
@@ -1070,6 +1119,8 @@ static int s390_vxrs_low_set(struct task_struct *target,
1070 __u64 vxrs[__NUM_VXRS_LOW]; 1119 __u64 vxrs[__NUM_VXRS_LOW];
1071 int i, rc; 1120 int i, rc;
1072 1121
1122 if (!MACHINE_HAS_VX)
1123 return -ENODEV;
1073 if (!target->thread.vxrs) { 1124 if (!target->thread.vxrs) {
1074 rc = alloc_vector_registers(target); 1125 rc = alloc_vector_registers(target);
1075 if (rc) 1126 if (rc)
@@ -1095,6 +1146,8 @@ static int s390_vxrs_high_get(struct task_struct *target,
1095{ 1146{
1096 __vector128 vxrs[__NUM_VXRS_HIGH]; 1147 __vector128 vxrs[__NUM_VXRS_HIGH];
1097 1148
1149 if (!MACHINE_HAS_VX)
1150 return -ENODEV;
1098 if (target->thread.vxrs) { 1151 if (target->thread.vxrs) {
1099 if (target == current) 1152 if (target == current)
1100 save_vx_regs(target->thread.vxrs); 1153 save_vx_regs(target->thread.vxrs);
@@ -1112,6 +1165,8 @@ static int s390_vxrs_high_set(struct task_struct *target,
1112{ 1165{
1113 int rc; 1166 int rc;
1114 1167
1168 if (!MACHINE_HAS_VX)
1169 return -ENODEV;
1115 if (!target->thread.vxrs) { 1170 if (!target->thread.vxrs) {
1116 rc = alloc_vector_registers(target); 1171 rc = alloc_vector_registers(target);
1117 if (rc) 1172 if (rc)
@@ -1196,7 +1251,6 @@ static const struct user_regset s390_regsets[] = {
1196 .n = __NUM_VXRS_LOW, 1251 .n = __NUM_VXRS_LOW,
1197 .size = sizeof(__u64), 1252 .size = sizeof(__u64),
1198 .align = sizeof(__u64), 1253 .align = sizeof(__u64),
1199 .active = s390_vxrs_active,
1200 .get = s390_vxrs_low_get, 1254 .get = s390_vxrs_low_get,
1201 .set = s390_vxrs_low_set, 1255 .set = s390_vxrs_low_set,
1202 }, 1256 },
@@ -1205,7 +1259,6 @@ static const struct user_regset s390_regsets[] = {
1205 .n = __NUM_VXRS_HIGH, 1259 .n = __NUM_VXRS_HIGH,
1206 .size = sizeof(__vector128), 1260 .size = sizeof(__vector128),
1207 .align = sizeof(__vector128), 1261 .align = sizeof(__vector128),
1208 .active = s390_vxrs_active,
1209 .get = s390_vxrs_high_get, 1262 .get = s390_vxrs_high_get,
1210 .set = s390_vxrs_high_set, 1263 .set = s390_vxrs_high_set,
1211 }, 1264 },
@@ -1419,7 +1472,6 @@ static const struct user_regset s390_compat_regsets[] = {
1419 .n = __NUM_VXRS_LOW, 1472 .n = __NUM_VXRS_LOW,
1420 .size = sizeof(__u64), 1473 .size = sizeof(__u64),
1421 .align = sizeof(__u64), 1474 .align = sizeof(__u64),
1422 .active = s390_vxrs_active,
1423 .get = s390_vxrs_low_get, 1475 .get = s390_vxrs_low_get,
1424 .set = s390_vxrs_low_set, 1476 .set = s390_vxrs_low_set,
1425 }, 1477 },
@@ -1428,7 +1480,6 @@ static const struct user_regset s390_compat_regsets[] = {
1428 .n = __NUM_VXRS_HIGH, 1480 .n = __NUM_VXRS_HIGH,
1429 .size = sizeof(__vector128), 1481 .size = sizeof(__vector128),
1430 .align = sizeof(__vector128), 1482 .align = sizeof(__vector128),
1431 .active = s390_vxrs_active,
1432 .get = s390_vxrs_high_get, 1483 .get = s390_vxrs_high_get,
1433 .set = s390_vxrs_high_set, 1484 .set = s390_vxrs_high_set,
1434 }, 1485 },
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index e80d9ff9a56d..4e532c67832f 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -41,7 +41,6 @@
41#include <linux/ctype.h> 41#include <linux/ctype.h>
42#include <linux/reboot.h> 42#include <linux/reboot.h>
43#include <linux/topology.h> 43#include <linux/topology.h>
44#include <linux/ftrace.h>
45#include <linux/kexec.h> 44#include <linux/kexec.h>
46#include <linux/crash_dump.h> 45#include <linux/crash_dump.h>
47#include <linux/memory.h> 46#include <linux/memory.h>
@@ -356,7 +355,6 @@ static void __init setup_lowcore(void)
356 lc->steal_timer = S390_lowcore.steal_timer; 355 lc->steal_timer = S390_lowcore.steal_timer;
357 lc->last_update_timer = S390_lowcore.last_update_timer; 356 lc->last_update_timer = S390_lowcore.last_update_timer;
358 lc->last_update_clock = S390_lowcore.last_update_clock; 357 lc->last_update_clock = S390_lowcore.last_update_clock;
359 lc->ftrace_func = S390_lowcore.ftrace_func;
360 358
361 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0); 359 restart_stack = __alloc_bootmem(ASYNC_SIZE, ASYNC_SIZE, 0);
362 restart_stack += ASYNC_SIZE; 360 restart_stack += ASYNC_SIZE;
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index 0c1a0ff0a558..6a2ac257d98f 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -371,7 +371,7 @@ static int setup_frame(int sig, struct k_sigaction *ka,
371 restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE; 371 restorer = (unsigned long) ka->sa.sa_restorer | PSW_ADDR_AMODE;
372 } else { 372 } else {
373 /* Signal frame without vector registers are short ! */ 373 /* Signal frame without vector registers are short ! */
374 __u16 __user *svc = (void *) frame + frame_size - 2; 374 __u16 __user *svc = (void __user *) frame + frame_size - 2;
375 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc)) 375 if (__put_user(S390_SYSCALL_OPCODE | __NR_sigreturn, svc))
376 return -EFAULT; 376 return -EFAULT;
377 restorer = (unsigned long) svc | PSW_ADDR_AMODE; 377 restorer = (unsigned long) svc | PSW_ADDR_AMODE;
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 6fd9e60101f1..0b499f5cbe19 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -236,7 +236,6 @@ static void pcpu_prepare_secondary(struct pcpu *pcpu, int cpu)
236 lc->percpu_offset = __per_cpu_offset[cpu]; 236 lc->percpu_offset = __per_cpu_offset[cpu];
237 lc->kernel_asce = S390_lowcore.kernel_asce; 237 lc->kernel_asce = S390_lowcore.kernel_asce;
238 lc->machine_flags = S390_lowcore.machine_flags; 238 lc->machine_flags = S390_lowcore.machine_flags;
239 lc->ftrace_func = S390_lowcore.ftrace_func;
240 lc->user_timer = lc->system_timer = lc->steal_timer = 0; 239 lc->user_timer = lc->system_timer = lc->steal_timer = 0;
241 __ctl_store(lc->cregs_save_area, 0, 15); 240 __ctl_store(lc->cregs_save_area, 0, 15);
242 save_access_regs((unsigned int *) lc->access_regs_save_area); 241 save_access_regs((unsigned int *) lc->access_regs_save_area);
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 9f7087fd58de..a2987243bc76 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -360,3 +360,5 @@ SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
360SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) 360SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
361SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ 361SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
362SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf) 362SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
363SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_write,compat_sys_s390_pci_mmio_write)
364SYSCALL(sys_ni_syscall,sys_s390_pci_mmio_read,compat_sys_s390_pci_mmio_read)
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 005d665fe4a5..20660dddb2d6 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -61,10 +61,11 @@ static DEFINE_PER_CPU(struct clock_event_device, comparators);
61/* 61/*
62 * Scheduler clock - returns current time in nanosec units. 62 * Scheduler clock - returns current time in nanosec units.
63 */ 63 */
64unsigned long long notrace __kprobes sched_clock(void) 64unsigned long long notrace sched_clock(void)
65{ 65{
66 return tod_to_ns(get_tod_clock_monotonic()); 66 return tod_to_ns(get_tod_clock_monotonic());
67} 67}
68NOKPROBE_SYMBOL(sched_clock);
68 69
69/* 70/*
70 * Monotonic_clock - returns # of nanoseconds passed since time_init() 71 * Monotonic_clock - returns # of nanoseconds passed since time_init()
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 9ff5ecba26ab..f081cf1157c3 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -49,7 +49,8 @@ static inline void report_user_fault(struct pt_regs *regs, int signr)
49 return; 49 return;
50 if (!printk_ratelimit()) 50 if (!printk_ratelimit())
51 return; 51 return;
52 printk("User process fault: interruption code 0x%X ", regs->int_code); 52 printk("User process fault: interruption code %04x ilc:%d ",
53 regs->int_code & 0xffff, regs->int_code >> 17);
53 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN); 54 print_vma_addr("in ", regs->psw.addr & PSW_ADDR_INSN);
54 printk("\n"); 55 printk("\n");
55 show_regs(regs); 56 show_regs(regs);
@@ -87,16 +88,16 @@ void do_report_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
87 } 88 }
88} 89}
89 90
90static void __kprobes do_trap(struct pt_regs *regs, int si_signo, int si_code, 91static void do_trap(struct pt_regs *regs, int si_signo, int si_code, char *str)
91 char *str)
92{ 92{
93 if (notify_die(DIE_TRAP, str, regs, 0, 93 if (notify_die(DIE_TRAP, str, regs, 0,
94 regs->int_code, si_signo) == NOTIFY_STOP) 94 regs->int_code, si_signo) == NOTIFY_STOP)
95 return; 95 return;
96 do_report_trap(regs, si_signo, si_code, str); 96 do_report_trap(regs, si_signo, si_code, str);
97} 97}
98NOKPROBE_SYMBOL(do_trap);
98 99
99void __kprobes do_per_trap(struct pt_regs *regs) 100void do_per_trap(struct pt_regs *regs)
100{ 101{
101 siginfo_t info; 102 siginfo_t info;
102 103
@@ -111,6 +112,7 @@ void __kprobes do_per_trap(struct pt_regs *regs)
111 (void __force __user *) current->thread.per_event.address; 112 (void __force __user *) current->thread.per_event.address;
112 force_sig_info(SIGTRAP, &info, current); 113 force_sig_info(SIGTRAP, &info, current);
113} 114}
115NOKPROBE_SYMBOL(do_per_trap);
114 116
115void default_trap_handler(struct pt_regs *regs) 117void default_trap_handler(struct pt_regs *regs)
116{ 118{
@@ -151,8 +153,6 @@ DO_ERROR_INFO(privileged_op, SIGILL, ILL_PRVOPC,
151 "privileged operation") 153 "privileged operation")
152DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN, 154DO_ERROR_INFO(special_op_exception, SIGILL, ILL_ILLOPN,
153 "special operation exception") 155 "special operation exception")
154DO_ERROR_INFO(translation_exception, SIGILL, ILL_ILLOPN,
155 "translation exception")
156 156
157#ifdef CONFIG_64BIT 157#ifdef CONFIG_64BIT
158DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN, 158DO_ERROR_INFO(transaction_exception, SIGILL, ILL_ILLOPN,
@@ -179,7 +179,13 @@ static inline void do_fp_trap(struct pt_regs *regs, int fpc)
179 do_trap(regs, SIGFPE, si_code, "floating point exception"); 179 do_trap(regs, SIGFPE, si_code, "floating point exception");
180} 180}
181 181
182void __kprobes illegal_op(struct pt_regs *regs) 182void translation_exception(struct pt_regs *regs)
183{
184 /* May never happen. */
185 die(regs, "Translation exception");
186}
187
188void illegal_op(struct pt_regs *regs)
183{ 189{
184 siginfo_t info; 190 siginfo_t info;
185 __u8 opcode[6]; 191 __u8 opcode[6];
@@ -252,7 +258,7 @@ void __kprobes illegal_op(struct pt_regs *regs)
252 if (signal) 258 if (signal)
253 do_trap(regs, signal, ILL_ILLOPC, "illegal operation"); 259 do_trap(regs, signal, ILL_ILLOPC, "illegal operation");
254} 260}
255 261NOKPROBE_SYMBOL(illegal_op);
256 262
257#ifdef CONFIG_MATHEMU 263#ifdef CONFIG_MATHEMU
258void specification_exception(struct pt_regs *regs) 264void specification_exception(struct pt_regs *regs)
@@ -469,7 +475,7 @@ void space_switch_exception(struct pt_regs *regs)
469 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event"); 475 do_trap(regs, SIGILL, ILL_PRVOPC, "space switch event");
470} 476}
471 477
472void __kprobes kernel_stack_overflow(struct pt_regs * regs) 478void kernel_stack_overflow(struct pt_regs *regs)
473{ 479{
474 bust_spinlocks(1); 480 bust_spinlocks(1);
475 printk("Kernel stack overflow.\n"); 481 printk("Kernel stack overflow.\n");
@@ -477,6 +483,7 @@ void __kprobes kernel_stack_overflow(struct pt_regs * regs)
477 bust_spinlocks(0); 483 bust_spinlocks(0);
478 panic("Corrupt kernel stack, can't continue."); 484 panic("Corrupt kernel stack, can't continue.");
479} 485}
486NOKPROBE_SYMBOL(kernel_stack_overflow);
480 487
481void __init trap_init(void) 488void __init trap_init(void)
482{ 489{
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 55aade49b6d1..6b049ee75a56 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -271,7 +271,7 @@ static int kvm_s390_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
271 case KVM_S390_VM_MEM_CLR_CMMA: 271 case KVM_S390_VM_MEM_CLR_CMMA:
272 mutex_lock(&kvm->lock); 272 mutex_lock(&kvm->lock);
273 idx = srcu_read_lock(&kvm->srcu); 273 idx = srcu_read_lock(&kvm->srcu);
274 page_table_reset_pgste(kvm->arch.gmap->mm, 0, TASK_SIZE, false); 274 s390_reset_cmma(kvm->arch.gmap->mm);
275 srcu_read_unlock(&kvm->srcu, idx); 275 srcu_read_unlock(&kvm->srcu, idx);
276 mutex_unlock(&kvm->lock); 276 mutex_unlock(&kvm->lock);
277 ret = 0; 277 ret = 0;
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index 72bb2dd8b9cd..f47cb0c6d906 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -156,21 +156,25 @@ static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
156 return 0; 156 return 0;
157} 157}
158 158
159static void __skey_check_enable(struct kvm_vcpu *vcpu) 159static int __skey_check_enable(struct kvm_vcpu *vcpu)
160{ 160{
161 int rc = 0;
161 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE))) 162 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
162 return; 163 return rc;
163 164
164 s390_enable_skey(); 165 rc = s390_enable_skey();
165 trace_kvm_s390_skey_related_inst(vcpu); 166 trace_kvm_s390_skey_related_inst(vcpu);
166 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE); 167 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
168 return rc;
167} 169}
168 170
169 171
170static int handle_skey(struct kvm_vcpu *vcpu) 172static int handle_skey(struct kvm_vcpu *vcpu)
171{ 173{
172 __skey_check_enable(vcpu); 174 int rc = __skey_check_enable(vcpu);
173 175
176 if (rc)
177 return rc;
174 vcpu->stat.instruction_storage_key++; 178 vcpu->stat.instruction_storage_key++;
175 179
176 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) 180 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
@@ -683,7 +687,10 @@ static int handle_pfmf(struct kvm_vcpu *vcpu)
683 } 687 }
684 688
685 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { 689 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
686 __skey_check_enable(vcpu); 690 int rc = __skey_check_enable(vcpu);
691
692 if (rc)
693 return rc;
687 if (set_guest_storage_key(current->mm, useraddr, 694 if (set_guest_storage_key(current->mm, useraddr,
688 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, 695 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
689 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) 696 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
index a2b81d6ce8a5..811937bb90be 100644
--- a/arch/s390/mm/fault.c
+++ b/arch/s390/mm/fault.c
@@ -261,8 +261,8 @@ static inline void report_user_fault(struct pt_regs *regs, long signr)
261 return; 261 return;
262 if (!printk_ratelimit()) 262 if (!printk_ratelimit())
263 return; 263 return;
264 printk(KERN_ALERT "User process fault: interruption code 0x%X ", 264 printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d",
265 regs->int_code); 265 regs->int_code & 0xffff, regs->int_code >> 17);
266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); 266 print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN);
267 printk(KERN_CONT "\n"); 267 printk(KERN_CONT "\n");
268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n", 268 printk(KERN_ALERT "failing address: %016lx TEID: %016lx\n",
@@ -548,7 +548,7 @@ out:
548 return fault; 548 return fault;
549} 549}
550 550
551void __kprobes do_protection_exception(struct pt_regs *regs) 551void do_protection_exception(struct pt_regs *regs)
552{ 552{
553 unsigned long trans_exc_code; 553 unsigned long trans_exc_code;
554 int fault; 554 int fault;
@@ -574,8 +574,9 @@ void __kprobes do_protection_exception(struct pt_regs *regs)
574 if (unlikely(fault)) 574 if (unlikely(fault))
575 do_fault_error(regs, fault); 575 do_fault_error(regs, fault);
576} 576}
577NOKPROBE_SYMBOL(do_protection_exception);
577 578
578void __kprobes do_dat_exception(struct pt_regs *regs) 579void do_dat_exception(struct pt_regs *regs)
579{ 580{
580 int access, fault; 581 int access, fault;
581 582
@@ -584,6 +585,7 @@ void __kprobes do_dat_exception(struct pt_regs *regs)
584 if (unlikely(fault)) 585 if (unlikely(fault))
585 do_fault_error(regs, fault); 586 do_fault_error(regs, fault);
586} 587}
588NOKPROBE_SYMBOL(do_dat_exception);
587 589
588#ifdef CONFIG_PFAULT 590#ifdef CONFIG_PFAULT
589/* 591/*
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 1b79ca67392f..71c7eff2c89f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -18,6 +18,8 @@
18#include <linux/rcupdate.h> 18#include <linux/rcupdate.h>
19#include <linux/slab.h> 19#include <linux/slab.h>
20#include <linux/swapops.h> 20#include <linux/swapops.h>
21#include <linux/ksm.h>
22#include <linux/mman.h>
21 23
22#include <asm/pgtable.h> 24#include <asm/pgtable.h>
23#include <asm/pgalloc.h> 25#include <asm/pgalloc.h>
@@ -750,8 +752,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
750 break; 752 break;
751 /* Walk the process page table, lock and get pte pointer */ 753 /* Walk the process page table, lock and get pte pointer */
752 ptep = get_locked_pte(gmap->mm, addr, &ptl); 754 ptep = get_locked_pte(gmap->mm, addr, &ptl);
753 if (unlikely(!ptep)) 755 VM_BUG_ON(!ptep);
754 continue;
755 /* Set notification bit in the pgste of the pte */ 756 /* Set notification bit in the pgste of the pte */
756 entry = *ptep; 757 entry = *ptep;
757 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) { 758 if ((pte_val(entry) & (_PAGE_INVALID | _PAGE_PROTECT)) == 0) {
@@ -761,7 +762,7 @@ int gmap_ipte_notify(struct gmap *gmap, unsigned long gaddr, unsigned long len)
761 gaddr += PAGE_SIZE; 762 gaddr += PAGE_SIZE;
762 len -= PAGE_SIZE; 763 len -= PAGE_SIZE;
763 } 764 }
764 spin_unlock(ptl); 765 pte_unmap_unlock(ptep, ptl);
765 } 766 }
766 up_read(&gmap->mm->mmap_sem); 767 up_read(&gmap->mm->mmap_sem);
767 return rc; 768 return rc;
@@ -834,99 +835,6 @@ static inline void page_table_free_pgste(unsigned long *table)
834 __free_page(page); 835 __free_page(page);
835} 836}
836 837
837static inline unsigned long page_table_reset_pte(struct mm_struct *mm, pmd_t *pmd,
838 unsigned long addr, unsigned long end, bool init_skey)
839{
840 pte_t *start_pte, *pte;
841 spinlock_t *ptl;
842 pgste_t pgste;
843
844 start_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
845 pte = start_pte;
846 do {
847 pgste = pgste_get_lock(pte);
848 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
849 if (init_skey) {
850 unsigned long address;
851
852 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
853 PGSTE_GR_BIT | PGSTE_GC_BIT);
854
855 /* skip invalid and not writable pages */
856 if (pte_val(*pte) & _PAGE_INVALID ||
857 !(pte_val(*pte) & _PAGE_WRITE)) {
858 pgste_set_unlock(pte, pgste);
859 continue;
860 }
861
862 address = pte_val(*pte) & PAGE_MASK;
863 page_set_storage_key(address, PAGE_DEFAULT_KEY, 1);
864 }
865 pgste_set_unlock(pte, pgste);
866 } while (pte++, addr += PAGE_SIZE, addr != end);
867 pte_unmap_unlock(start_pte, ptl);
868
869 return addr;
870}
871
872static inline unsigned long page_table_reset_pmd(struct mm_struct *mm, pud_t *pud,
873 unsigned long addr, unsigned long end, bool init_skey)
874{
875 unsigned long next;
876 pmd_t *pmd;
877
878 pmd = pmd_offset(pud, addr);
879 do {
880 next = pmd_addr_end(addr, end);
881 if (pmd_none_or_clear_bad(pmd))
882 continue;
883 next = page_table_reset_pte(mm, pmd, addr, next, init_skey);
884 } while (pmd++, addr = next, addr != end);
885
886 return addr;
887}
888
889static inline unsigned long page_table_reset_pud(struct mm_struct *mm, pgd_t *pgd,
890 unsigned long addr, unsigned long end, bool init_skey)
891{
892 unsigned long next;
893 pud_t *pud;
894
895 pud = pud_offset(pgd, addr);
896 do {
897 next = pud_addr_end(addr, end);
898 if (pud_none_or_clear_bad(pud))
899 continue;
900 next = page_table_reset_pmd(mm, pud, addr, next, init_skey);
901 } while (pud++, addr = next, addr != end);
902
903 return addr;
904}
905
906void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
907 unsigned long end, bool init_skey)
908{
909 unsigned long addr, next;
910 pgd_t *pgd;
911
912 down_write(&mm->mmap_sem);
913 if (init_skey && mm_use_skey(mm))
914 goto out_up;
915 addr = start;
916 pgd = pgd_offset(mm, addr);
917 do {
918 next = pgd_addr_end(addr, end);
919 if (pgd_none_or_clear_bad(pgd))
920 continue;
921 next = page_table_reset_pud(mm, pgd, addr, next, init_skey);
922 } while (pgd++, addr = next, addr != end);
923 if (init_skey)
924 current->mm->context.use_skey = 1;
925out_up:
926 up_write(&mm->mmap_sem);
927}
928EXPORT_SYMBOL(page_table_reset_pgste);
929
930int set_guest_storage_key(struct mm_struct *mm, unsigned long addr, 838int set_guest_storage_key(struct mm_struct *mm, unsigned long addr,
931 unsigned long key, bool nq) 839 unsigned long key, bool nq)
932{ 840{
@@ -992,11 +900,6 @@ static inline unsigned long *page_table_alloc_pgste(struct mm_struct *mm)
992 return NULL; 900 return NULL;
993} 901}
994 902
995void page_table_reset_pgste(struct mm_struct *mm, unsigned long start,
996 unsigned long end, bool init_skey)
997{
998}
999
1000static inline void page_table_free_pgste(unsigned long *table) 903static inline void page_table_free_pgste(unsigned long *table)
1001{ 904{
1002} 905}
@@ -1347,13 +1250,89 @@ EXPORT_SYMBOL_GPL(s390_enable_sie);
1347 * Enable storage key handling from now on and initialize the storage 1250 * Enable storage key handling from now on and initialize the storage
1348 * keys with the default key. 1251 * keys with the default key.
1349 */ 1252 */
1350void s390_enable_skey(void) 1253static int __s390_enable_skey(pte_t *pte, unsigned long addr,
1254 unsigned long next, struct mm_walk *walk)
1351{ 1255{
1352 page_table_reset_pgste(current->mm, 0, TASK_SIZE, true); 1256 unsigned long ptev;
1257 pgste_t pgste;
1258
1259 pgste = pgste_get_lock(pte);
1260 /*
1261 * Remove all zero page mappings,
1262 * after establishing a policy to forbid zero page mappings
1263 * following faults for that page will get fresh anonymous pages
1264 */
1265 if (is_zero_pfn(pte_pfn(*pte))) {
1266 ptep_flush_direct(walk->mm, addr, pte);
1267 pte_val(*pte) = _PAGE_INVALID;
1268 }
1269 /* Clear storage key */
1270 pgste_val(pgste) &= ~(PGSTE_ACC_BITS | PGSTE_FP_BIT |
1271 PGSTE_GR_BIT | PGSTE_GC_BIT);
1272 ptev = pte_val(*pte);
1273 if (!(ptev & _PAGE_INVALID) && (ptev & _PAGE_WRITE))
1274 page_set_storage_key(ptev & PAGE_MASK, PAGE_DEFAULT_KEY, 1);
1275 pgste_set_unlock(pte, pgste);
1276 return 0;
1277}
1278
1279int s390_enable_skey(void)
1280{
1281 struct mm_walk walk = { .pte_entry = __s390_enable_skey };
1282 struct mm_struct *mm = current->mm;
1283 struct vm_area_struct *vma;
1284 int rc = 0;
1285
1286 down_write(&mm->mmap_sem);
1287 if (mm_use_skey(mm))
1288 goto out_up;
1289
1290 mm->context.use_skey = 1;
1291 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1292 if (ksm_madvise(vma, vma->vm_start, vma->vm_end,
1293 MADV_UNMERGEABLE, &vma->vm_flags)) {
1294 mm->context.use_skey = 0;
1295 rc = -ENOMEM;
1296 goto out_up;
1297 }
1298 }
1299 mm->def_flags &= ~VM_MERGEABLE;
1300
1301 walk.mm = mm;
1302 walk_page_range(0, TASK_SIZE, &walk);
1303
1304out_up:
1305 up_write(&mm->mmap_sem);
1306 return rc;
1353} 1307}
1354EXPORT_SYMBOL_GPL(s390_enable_skey); 1308EXPORT_SYMBOL_GPL(s390_enable_skey);
1355 1309
1356/* 1310/*
1311 * Reset CMMA state, make all pages stable again.
1312 */
1313static int __s390_reset_cmma(pte_t *pte, unsigned long addr,
1314 unsigned long next, struct mm_walk *walk)
1315{
1316 pgste_t pgste;
1317
1318 pgste = pgste_get_lock(pte);
1319 pgste_val(pgste) &= ~_PGSTE_GPS_USAGE_MASK;
1320 pgste_set_unlock(pte, pgste);
1321 return 0;
1322}
1323
1324void s390_reset_cmma(struct mm_struct *mm)
1325{
1326 struct mm_walk walk = { .pte_entry = __s390_reset_cmma };
1327
1328 down_write(&mm->mmap_sem);
1329 walk.mm = mm;
1330 walk_page_range(0, TASK_SIZE, &walk);
1331 up_write(&mm->mmap_sem);
1332}
1333EXPORT_SYMBOL_GPL(s390_reset_cmma);
1334
1335/*
1357 * Test and reset if a guest page is dirty 1336 * Test and reset if a guest page is dirty
1358 */ 1337 */
1359bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap) 1338bool gmap_test_and_clear_dirty(unsigned long address, struct gmap *gmap)
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
index a9e1dc4ae442..805d8b29193a 100644
--- a/arch/s390/pci/Makefile
+++ b/arch/s390/pci/Makefile
@@ -3,4 +3,4 @@
3# 3#
4 4
5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \ 5obj-$(CONFIG_PCI) += pci.o pci_dma.o pci_clp.o pci_sysfs.o \
6 pci_event.o pci_debug.o pci_insn.o 6 pci_event.o pci_debug.o pci_insn.o pci_mmio.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index d59c82569750..3290f11ae1d9 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -369,8 +369,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
369 369
370 if (type == PCI_CAP_ID_MSI && nvec > 1) 370 if (type == PCI_CAP_ID_MSI && nvec > 1)
371 return 1; 371 return 1;
372 msi_vecs = min(nvec, ZPCI_MSI_VEC_MAX); 372 msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
373 msi_vecs = min_t(unsigned int, msi_vecs, CONFIG_PCI_NR_MSI);
374 373
375 /* Allocate adapter summary indicator bit */ 374 /* Allocate adapter summary indicator bit */
376 rc = -EIO; 375 rc = -EIO;
@@ -474,7 +473,8 @@ static void zpci_map_resources(struct zpci_dev *zdev)
474 len = pci_resource_len(pdev, i); 473 len = pci_resource_len(pdev, i);
475 if (!len) 474 if (!len)
476 continue; 475 continue;
477 pdev->resource[i].start = (resource_size_t) pci_iomap(pdev, i, 0); 476 pdev->resource[i].start =
477 (resource_size_t __force) pci_iomap(pdev, i, 0);
478 pdev->resource[i].end = pdev->resource[i].start + len - 1; 478 pdev->resource[i].end = pdev->resource[i].start + len - 1;
479 } 479 }
480} 480}
@@ -489,7 +489,8 @@ static void zpci_unmap_resources(struct zpci_dev *zdev)
489 len = pci_resource_len(pdev, i); 489 len = pci_resource_len(pdev, i);
490 if (!len) 490 if (!len)
491 continue; 491 continue;
492 pci_iounmap(pdev, (void *) pdev->resource[i].start); 492 pci_iounmap(pdev, (void __iomem __force *)
493 pdev->resource[i].start);
493 } 494 }
494} 495}
495 496
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
index 6e22a247de9b..d6e411ed8b1f 100644
--- a/arch/s390/pci/pci_clp.c
+++ b/arch/s390/pci/pci_clp.c
@@ -62,6 +62,7 @@ static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
62 zdev->tlb_refresh = response->refresh; 62 zdev->tlb_refresh = response->refresh;
63 zdev->dma_mask = response->dasm; 63 zdev->dma_mask = response->dasm;
64 zdev->msi_addr = response->msia; 64 zdev->msi_addr = response->msia;
65 zdev->max_msi = response->noi;
65 zdev->fmb_update = response->mui; 66 zdev->fmb_update = response->mui;
66 67
67 switch (response->version) { 68 switch (response->version) {
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
index eec598c5939f..3229a2e570df 100644
--- a/arch/s390/pci/pci_debug.c
+++ b/arch/s390/pci/pci_debug.c
@@ -158,10 +158,7 @@ int __init zpci_debug_init(void)
158 158
159void zpci_debug_exit(void) 159void zpci_debug_exit(void)
160{ 160{
161 if (pci_debug_msg_id) 161 debug_unregister(pci_debug_msg_id);
162 debug_unregister(pci_debug_msg_id); 162 debug_unregister(pci_debug_err_id);
163 if (pci_debug_err_id)
164 debug_unregister(pci_debug_err_id);
165
166 debugfs_remove(debugfs_root); 163 debugfs_remove(debugfs_root);
167} 164}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
new file mode 100644
index 000000000000..62c5ea6d8682
--- /dev/null
+++ b/arch/s390/pci/pci_mmio.c
@@ -0,0 +1,115 @@
1/*
2 * Access to PCI I/O memory from user space programs.
3 *
4 * Copyright IBM Corp. 2014
5 * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
6 */
7#include <linux/kernel.h>
8#include <linux/syscalls.h>
9#include <linux/init.h>
10#include <linux/mm.h>
11#include <linux/errno.h>
12#include <linux/pci.h>
13
14static long get_pfn(unsigned long user_addr, unsigned long access,
15 unsigned long *pfn)
16{
17 struct vm_area_struct *vma;
18 long ret;
19
20 down_read(&current->mm->mmap_sem);
21 ret = -EINVAL;
22 vma = find_vma(current->mm, user_addr);
23 if (!vma)
24 goto out;
25 ret = -EACCES;
26 if (!(vma->vm_flags & access))
27 goto out;
28 ret = follow_pfn(vma, user_addr, pfn);
29out:
30 up_read(&current->mm->mmap_sem);
31 return ret;
32}
33
34SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
35 const void __user *, user_buffer, size_t, length)
36{
37 u8 local_buf[64];
38 void __iomem *io_addr;
39 void *buf;
40 unsigned long pfn;
41 long ret;
42
43 if (!zpci_is_enabled())
44 return -ENODEV;
45
46 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
47 return -EINVAL;
48 if (length > 64) {
49 buf = kmalloc(length, GFP_KERNEL);
50 if (!buf)
51 return -ENOMEM;
52 } else
53 buf = local_buf;
54
55 ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
56 if (ret)
57 goto out;
58 io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
59
60 ret = -EFAULT;
61 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
62 goto out;
63
64 if (copy_from_user(buf, user_buffer, length))
65 goto out;
66
67 memcpy_toio(io_addr, buf, length);
68 ret = 0;
69out:
70 if (buf != local_buf)
71 kfree(buf);
72 return ret;
73}
74
75SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
76 void __user *, user_buffer, size_t, length)
77{
78 u8 local_buf[64];
79 void __iomem *io_addr;
80 void *buf;
81 unsigned long pfn;
82 long ret;
83
84 if (!zpci_is_enabled())
85 return -ENODEV;
86
87 if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
88 return -EINVAL;
89 if (length > 64) {
90 buf = kmalloc(length, GFP_KERNEL);
91 if (!buf)
92 return -ENOMEM;
93 } else
94 buf = local_buf;
95
96 ret = get_pfn(mmio_addr, VM_READ, &pfn);
97 if (ret)
98 goto out;
99 io_addr = (void *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
100
101 ret = -EFAULT;
102 if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
103 goto out;
104
105 memcpy_fromio(buf, io_addr, length);
106
107 if (copy_to_user(user_buffer, buf, length))
108 goto out;
109
110 ret = 0;
111out:
112 if (buf != local_buf)
113 kfree(buf);
114 return ret;
115}
diff --git a/arch/x86/include/asm/xen/cpuid.h b/arch/x86/include/asm/xen/cpuid.h
new file mode 100644
index 000000000000..0d809e9fc975
--- /dev/null
+++ b/arch/x86/include/asm/xen/cpuid.h
@@ -0,0 +1,91 @@
1/******************************************************************************
2 * arch-x86/cpuid.h
3 *
4 * CPUID interface to Xen.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
22 * DEALINGS IN THE SOFTWARE.
23 *
24 * Copyright (c) 2007 Citrix Systems, Inc.
25 *
26 * Authors:
27 * Keir Fraser <keir@xen.org>
28 */
29
30#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
31#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
32
33/*
34 * For compatibility with other hypervisor interfaces, the Xen cpuid leaves
35 * can be found at the first otherwise unused 0x100 aligned boundary starting
36 * from 0x40000000.
37 *
38 * e.g If viridian extensions are enabled for an HVM domain, the Xen cpuid
39 * leaves will start at 0x40000100
40 */
41
42#define XEN_CPUID_FIRST_LEAF 0x40000000
43#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
44
45/*
46 * Leaf 1 (0x40000x00)
47 * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
48 * are supported by the Xen host.
49 * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
50 * of a Xen host.
51 */
52#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
53#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
54#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
55
56/*
57 * Leaf 2 (0x40000x01)
58 * EAX[31:16]: Xen major version.
59 * EAX[15: 0]: Xen minor version.
60 * EBX-EDX: Reserved (currently all zeroes).
61 */
62
63/*
64 * Leaf 3 (0x40000x02)
65 * EAX: Number of hypercall transfer pages. This register is always guaranteed
66 * to specify one hypercall page.
67 * EBX: Base address of Xen-specific MSRs.
68 * ECX: Features 1. Unused bits are set to zero.
69 * EDX: Features 2. Unused bits are set to zero.
70 */
71
72/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
73#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
74#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
75
76/*
77 * Leaf 5 (0x40000x04)
78 * HVM-specific features
79 */
80
81/* EAX Features */
82/* Virtualized APIC registers */
83#define XEN_HVM_CPUID_APIC_ACCESS_VIRT (1u << 0)
84/* Virtualized x2APIC accesses */
85#define XEN_HVM_CPUID_X2APIC_VIRT (1u << 1)
86/* Memory mapped from other domains has valid IOMMU entries */
87#define XEN_HVM_CPUID_IOMMU_MAPPINGS (1u << 2)
88
89#define XEN_CPUID_MAX_NUM_LEAVES 4
90
91#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
diff --git a/arch/x86/include/asm/xen/page-coherent.h b/arch/x86/include/asm/xen/page-coherent.h
index 7f02fe4e2c7b..acd844c017d3 100644
--- a/arch/x86/include/asm/xen/page-coherent.h
+++ b/arch/x86/include/asm/xen/page-coherent.h
@@ -22,8 +22,8 @@ static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
22} 22}
23 23
24static inline void xen_dma_map_page(struct device *hwdev, struct page *page, 24static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
25 unsigned long offset, size_t size, enum dma_data_direction dir, 25 dma_addr_t dev_addr, unsigned long offset, size_t size,
26 struct dma_attrs *attrs) { } 26 enum dma_data_direction dir, struct dma_attrs *attrs) { }
27 27
28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle, 28static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
29 size_t size, enum dma_data_direction dir, 29 size_t size, enum dma_data_direction dir,
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c949923a5668..f58ef6c0613b 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -236,4 +236,11 @@ void make_lowmem_page_readwrite(void *vaddr);
236#define xen_remap(cookie, size) ioremap((cookie), (size)); 236#define xen_remap(cookie, size) ioremap((cookie), (size));
237#define xen_unmap(cookie) iounmap((cookie)) 237#define xen_unmap(cookie) iounmap((cookie))
238 238
239static inline bool xen_arch_need_swiotlb(struct device *dev,
240 unsigned long pfn,
241 unsigned long mfn)
242{
243 return false;
244}
245
239#endif /* _ASM_X86_XEN_PAGE_H */ 246#endif /* _ASM_X86_XEN_PAGE_H */
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 1819a91bbb9f..c489ef2c1a39 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -23,6 +23,8 @@
23#include <xen/features.h> 23#include <xen/features.h>
24#include <xen/events.h> 24#include <xen/events.h>
25#include <asm/xen/pci.h> 25#include <asm/xen/pci.h>
26#include <asm/xen/cpuid.h>
27#include <asm/apic.h>
26#include <asm/i8259.h> 28#include <asm/i8259.h>
27 29
28static int xen_pcifront_enable_irq(struct pci_dev *dev) 30static int xen_pcifront_enable_irq(struct pci_dev *dev)
@@ -423,6 +425,28 @@ int __init pci_xen_init(void)
423 return 0; 425 return 0;
424} 426}
425 427
428#ifdef CONFIG_PCI_MSI
429void __init xen_msi_init(void)
430{
431 if (!disable_apic) {
432 /*
433 * If hardware supports (x2)APIC virtualization (as indicated
434 * by hypervisor's leaf 4) then we don't need to use pirqs/
435 * event channels for MSI handling and instead use regular
436 * APIC processing
437 */
438 uint32_t eax = cpuid_eax(xen_cpuid_base() + 4);
439
440 if (((eax & XEN_HVM_CPUID_X2APIC_VIRT) && x2apic_mode) ||
441 ((eax & XEN_HVM_CPUID_APIC_ACCESS_VIRT) && cpu_has_apic))
442 return;
443 }
444
445 x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs;
446 x86_msi.teardown_msi_irq = xen_teardown_msi_irq;
447}
448#endif
449
426int __init pci_xen_hvm_init(void) 450int __init pci_xen_hvm_init(void)
427{ 451{
428 if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs)) 452 if (!xen_have_vector_callback || !xen_feature(XENFEAT_hvm_pirqs))
@@ -437,8 +461,11 @@ int __init pci_xen_hvm_init(void)
437#endif 461#endif
438 462
439#ifdef CONFIG_PCI_MSI 463#ifdef CONFIG_PCI_MSI
440 x86_msi.setup_msi_irqs = xen_hvm_setup_msi_irqs; 464 /*
441 x86_msi.teardown_msi_irq = xen_teardown_msi_irq; 465 * We need to wait until after x2apic is initialized
466 * before we can set MSI IRQ ops.
467 */
468 x86_platform.apic_post_init = xen_msi_init;
442#endif 469#endif
443 return 0; 470 return 0;
444} 471}
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index cd4de7e038ea..c6bcb8c719d8 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -33,6 +33,7 @@
33#include <linux/pci.h> 33#include <linux/pci.h>
34#include <linux/pci-acpi.h> 34#include <linux/pci-acpi.h>
35#include <linux/pci-aspm.h> 35#include <linux/pci-aspm.h>
36#include <linux/dmar.h>
36#include <linux/acpi.h> 37#include <linux/acpi.h>
37#include <linux/slab.h> 38#include <linux/slab.h>
38#include <linux/dmi.h> 39#include <linux/dmi.h>
@@ -525,6 +526,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
525 struct acpi_pci_root *root; 526 struct acpi_pci_root *root;
526 acpi_handle handle = device->handle; 527 acpi_handle handle = device->handle;
527 int no_aspm = 0, clear_aspm = 0; 528 int no_aspm = 0, clear_aspm = 0;
529 bool hotadd = system_state != SYSTEM_BOOTING;
528 530
529 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL); 531 root = kzalloc(sizeof(struct acpi_pci_root), GFP_KERNEL);
530 if (!root) 532 if (!root)
@@ -571,6 +573,11 @@ static int acpi_pci_root_add(struct acpi_device *device,
571 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS); 573 strcpy(acpi_device_class(device), ACPI_PCI_ROOT_CLASS);
572 device->driver_data = root; 574 device->driver_data = root;
573 575
576 if (hotadd && dmar_device_add(handle)) {
577 result = -ENXIO;
578 goto end;
579 }
580
574 pr_info(PREFIX "%s [%s] (domain %04x %pR)\n", 581 pr_info(PREFIX "%s [%s] (domain %04x %pR)\n",
575 acpi_device_name(device), acpi_device_bid(device), 582 acpi_device_name(device), acpi_device_bid(device),
576 root->segment, &root->secondary); 583 root->segment, &root->secondary);
@@ -597,7 +604,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
597 root->segment, (unsigned int)root->secondary.start); 604 root->segment, (unsigned int)root->secondary.start);
598 device->driver_data = NULL; 605 device->driver_data = NULL;
599 result = -ENODEV; 606 result = -ENODEV;
600 goto end; 607 goto remove_dmar;
601 } 608 }
602 609
603 if (clear_aspm) { 610 if (clear_aspm) {
@@ -611,7 +618,7 @@ static int acpi_pci_root_add(struct acpi_device *device,
611 if (device->wakeup.flags.run_wake) 618 if (device->wakeup.flags.run_wake)
612 device_set_run_wake(root->bus->bridge, true); 619 device_set_run_wake(root->bus->bridge, true);
613 620
614 if (system_state != SYSTEM_BOOTING) { 621 if (hotadd) {
615 pcibios_resource_survey_bus(root->bus); 622 pcibios_resource_survey_bus(root->bus);
616 pci_assign_unassigned_root_bus_resources(root->bus); 623 pci_assign_unassigned_root_bus_resources(root->bus);
617 } 624 }
@@ -621,6 +628,9 @@ static int acpi_pci_root_add(struct acpi_device *device,
621 pci_unlock_rescan_remove(); 628 pci_unlock_rescan_remove();
622 return 1; 629 return 1;
623 630
631remove_dmar:
632 if (hotadd)
633 dmar_device_remove(handle);
624end: 634end:
625 kfree(root); 635 kfree(root);
626 return result; 636 return result;
@@ -639,6 +649,8 @@ static void acpi_pci_root_remove(struct acpi_device *device)
639 649
640 pci_remove_root_bus(root->bus); 650 pci_remove_root_bus(root->bus);
641 651
652 dmar_device_remove(device->handle);
653
642 pci_unlock_rescan_remove(); 654 pci_unlock_rescan_remove();
643 655
644 kfree(root); 656 kfree(root);
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 973a3332a85f..80f4de729a86 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -95,8 +95,12 @@ static int amba_pm_runtime_suspend(struct device *dev)
95 struct amba_device *pcdev = to_amba_device(dev); 95 struct amba_device *pcdev = to_amba_device(dev);
96 int ret = pm_generic_runtime_suspend(dev); 96 int ret = pm_generic_runtime_suspend(dev);
97 97
98 if (ret == 0 && dev->driver) 98 if (ret == 0 && dev->driver) {
99 clk_disable_unprepare(pcdev->pclk); 99 if (pm_runtime_is_irq_safe(dev))
100 clk_disable(pcdev->pclk);
101 else
102 clk_disable_unprepare(pcdev->pclk);
103 }
100 104
101 return ret; 105 return ret;
102} 106}
@@ -107,7 +111,10 @@ static int amba_pm_runtime_resume(struct device *dev)
107 int ret; 111 int ret;
108 112
109 if (dev->driver) { 113 if (dev->driver) {
110 ret = clk_prepare_enable(pcdev->pclk); 114 if (pm_runtime_is_irq_safe(dev))
115 ret = clk_enable(pcdev->pclk);
116 else
117 ret = clk_prepare_enable(pcdev->pclk);
111 /* Failure is probably fatal to the system, but... */ 118 /* Failure is probably fatal to the system, but... */
112 if (ret) 119 if (ret)
113 return ret; 120 return ret;
@@ -115,7 +122,7 @@ static int amba_pm_runtime_resume(struct device *dev)
115 122
116 return pm_generic_runtime_resume(dev); 123 return pm_generic_runtime_resume(dev);
117} 124}
118#endif 125#endif /* CONFIG_PM */
119 126
120static const struct dev_pm_ops amba_pm = { 127static const struct dev_pm_ops amba_pm = {
121 .suspend = pm_generic_suspend, 128 .suspend = pm_generic_suspend,
diff --git a/drivers/ata/ahci_sunxi.c b/drivers/ata/ahci_sunxi.c
index e44d675a30ec..b5aedca5ea3c 100644
--- a/drivers/ata/ahci_sunxi.c
+++ b/drivers/ata/ahci_sunxi.c
@@ -27,6 +27,12 @@
27#include <linux/regulator/consumer.h> 27#include <linux/regulator/consumer.h>
28#include "ahci.h" 28#include "ahci.h"
29 29
30/* Insmod parameters */
31static bool enable_pmp;
32module_param(enable_pmp, bool, 0);
33MODULE_PARM_DESC(enable_pmp,
34 "Enable support for sata port multipliers, only use if you use a pmp!");
35
30#define AHCI_BISTAFR 0x00a0 36#define AHCI_BISTAFR 0x00a0
31#define AHCI_BISTCR 0x00a4 37#define AHCI_BISTCR 0x00a4
32#define AHCI_BISTFCTR 0x00a8 38#define AHCI_BISTFCTR 0x00a8
@@ -184,7 +190,15 @@ static int ahci_sunxi_probe(struct platform_device *pdev)
184 goto disable_resources; 190 goto disable_resources;
185 191
186 hpriv->flags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI | 192 hpriv->flags = AHCI_HFLAG_32BIT_ONLY | AHCI_HFLAG_NO_MSI |
187 AHCI_HFLAG_NO_PMP | AHCI_HFLAG_YES_NCQ; 193 AHCI_HFLAG_YES_NCQ;
194
195 /*
196 * The sunxi sata controller seems to be unable to successfully do a
197 * soft reset if no pmp is attached, so disable pmp use unless
198 * requested, otherwise directly attached disks do not work.
199 */
200 if (!enable_pmp)
201 hpriv->flags |= AHCI_HFLAG_NO_PMP;
188 202
189 rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info); 203 rc = ahci_platform_init_host(pdev, hpriv, &ahci_sunxi_port_info);
190 if (rc) 204 if (rc)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index c5ba15af87d3..5c84fb5c3372 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -1043,8 +1043,8 @@ const char *sata_spd_string(unsigned int spd)
1043 * None. 1043 * None.
1044 * 1044 *
1045 * RETURNS: 1045 * RETURNS:
1046 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or 1046 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP,
1047 * %ATA_DEV_UNKNOWN the event of failure. 1047 * %ATA_DEV_ZAC, or %ATA_DEV_UNKNOWN the event of failure.
1048 */ 1048 */
1049unsigned int ata_dev_classify(const struct ata_taskfile *tf) 1049unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1050{ 1050{
@@ -1089,6 +1089,11 @@ unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1089 return ATA_DEV_SEMB; 1089 return ATA_DEV_SEMB;
1090 } 1090 }
1091 1091
1092 if ((tf->lbam == 0xcd) && (tf->lbah == 0xab)) {
1093 DPRINTK("found ZAC device by sig\n");
1094 return ATA_DEV_ZAC;
1095 }
1096
1092 DPRINTK("unknown device\n"); 1097 DPRINTK("unknown device\n");
1093 return ATA_DEV_UNKNOWN; 1098 return ATA_DEV_UNKNOWN;
1094} 1099}
@@ -1329,7 +1334,7 @@ static int ata_hpa_resize(struct ata_device *dev)
1329 int rc; 1334 int rc;
1330 1335
1331 /* do we need to do it? */ 1336 /* do we need to do it? */
1332 if (dev->class != ATA_DEV_ATA || 1337 if ((dev->class != ATA_DEV_ATA && dev->class != ATA_DEV_ZAC) ||
1333 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) || 1338 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1334 (dev->horkage & ATA_HORKAGE_BROKEN_HPA)) 1339 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
1335 return 0; 1340 return 0;
@@ -1889,6 +1894,7 @@ retry:
1889 case ATA_DEV_SEMB: 1894 case ATA_DEV_SEMB:
1890 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */ 1895 class = ATA_DEV_ATA; /* some hard drives report SEMB sig */
1891 case ATA_DEV_ATA: 1896 case ATA_DEV_ATA:
1897 case ATA_DEV_ZAC:
1892 tf.command = ATA_CMD_ID_ATA; 1898 tf.command = ATA_CMD_ID_ATA;
1893 break; 1899 break;
1894 case ATA_DEV_ATAPI: 1900 case ATA_DEV_ATAPI:
@@ -1980,7 +1986,7 @@ retry:
1980 rc = -EINVAL; 1986 rc = -EINVAL;
1981 reason = "device reports invalid type"; 1987 reason = "device reports invalid type";
1982 1988
1983 if (class == ATA_DEV_ATA) { 1989 if (class == ATA_DEV_ATA || class == ATA_DEV_ZAC) {
1984 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id)) 1990 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1985 goto err_out; 1991 goto err_out;
1986 if (ap->host->flags & ATA_HOST_IGNORE_ATA && 1992 if (ap->host->flags & ATA_HOST_IGNORE_ATA &&
@@ -2015,7 +2021,8 @@ retry:
2015 goto retry; 2021 goto retry;
2016 } 2022 }
2017 2023
2018 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) { 2024 if ((flags & ATA_READID_POSTRESET) &&
2025 (class == ATA_DEV_ATA || class == ATA_DEV_ZAC)) {
2019 /* 2026 /*
2020 * The exact sequence expected by certain pre-ATA4 drives is: 2027 * The exact sequence expected by certain pre-ATA4 drives is:
2021 * SRST RESET 2028 * SRST RESET
@@ -2280,7 +2287,7 @@ int ata_dev_configure(struct ata_device *dev)
2280 sizeof(modelbuf)); 2287 sizeof(modelbuf));
2281 2288
2282 /* ATA-specific feature tests */ 2289 /* ATA-specific feature tests */
2283 if (dev->class == ATA_DEV_ATA) { 2290 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
2284 if (ata_id_is_cfa(id)) { 2291 if (ata_id_is_cfa(id)) {
2285 /* CPRM may make this media unusable */ 2292 /* CPRM may make this media unusable */
2286 if (id[ATA_ID_CFA_KEY_MGMT] & 1) 2293 if (id[ATA_ID_CFA_KEY_MGMT] & 1)
@@ -4033,6 +4040,7 @@ int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4033 if (ata_class_enabled(new_class) && 4040 if (ata_class_enabled(new_class) &&
4034 new_class != ATA_DEV_ATA && 4041 new_class != ATA_DEV_ATA &&
4035 new_class != ATA_DEV_ATAPI && 4042 new_class != ATA_DEV_ATAPI &&
4043 new_class != ATA_DEV_ZAC &&
4036 new_class != ATA_DEV_SEMB) { 4044 new_class != ATA_DEV_SEMB) {
4037 ata_dev_info(dev, "class mismatch %u != %u\n", 4045 ata_dev_info(dev, "class mismatch %u != %u\n",
4038 dev->class, new_class); 4046 dev->class, new_class);
diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
index dad83df555c4..3dbec8954c86 100644
--- a/drivers/ata/libata-eh.c
+++ b/drivers/ata/libata-eh.c
@@ -1809,6 +1809,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1809 1809
1810 switch (qc->dev->class) { 1810 switch (qc->dev->class) {
1811 case ATA_DEV_ATA: 1811 case ATA_DEV_ATA:
1812 case ATA_DEV_ZAC:
1812 if (err & ATA_ICRC) 1813 if (err & ATA_ICRC)
1813 qc->err_mask |= AC_ERR_ATA_BUS; 1814 qc->err_mask |= AC_ERR_ATA_BUS;
1814 if (err & (ATA_UNC | ATA_AMNF)) 1815 if (err & (ATA_UNC | ATA_AMNF))
@@ -3792,7 +3793,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3792 struct ata_eh_context *ehc = &link->eh_context; 3793 struct ata_eh_context *ehc = &link->eh_context;
3793 unsigned long tmp; 3794 unsigned long tmp;
3794 3795
3795 if (dev->class != ATA_DEV_ATA) 3796 if (dev->class != ATA_DEV_ATA &&
3797 dev->class != ATA_DEV_ZAC)
3796 continue; 3798 continue;
3797 if (!(ehc->i.dev_action[dev->devno] & 3799 if (!(ehc->i.dev_action[dev->devno] &
3798 ATA_EH_PARK)) 3800 ATA_EH_PARK))
@@ -3873,7 +3875,8 @@ int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3873 3875
3874 /* retry flush if necessary */ 3876 /* retry flush if necessary */
3875 ata_for_each_dev(dev, link, ALL) { 3877 ata_for_each_dev(dev, link, ALL) {
3876 if (dev->class != ATA_DEV_ATA) 3878 if (dev->class != ATA_DEV_ATA &&
3879 dev->class != ATA_DEV_ZAC)
3877 continue; 3880 continue;
3878 rc = ata_eh_maybe_retry_flush(dev); 3881 rc = ata_eh_maybe_retry_flush(dev);
3879 if (rc) 3882 if (rc)
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index dd45c6a03e5d..e364e86e84d7 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -235,7 +235,8 @@ static ssize_t ata_scsi_park_store(struct device *device,
235 rc = -ENODEV; 235 rc = -ENODEV;
236 goto unlock; 236 goto unlock;
237 } 237 }
238 if (dev->class != ATA_DEV_ATA) { 238 if (dev->class != ATA_DEV_ATA &&
239 dev->class != ATA_DEV_ZAC) {
239 rc = -EOPNOTSUPP; 240 rc = -EOPNOTSUPP;
240 goto unlock; 241 goto unlock;
241 } 242 }
@@ -1961,6 +1962,7 @@ static void ata_scsi_rbuf_fill(struct ata_scsi_args *args,
1961static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf) 1962static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1962{ 1963{
1963 const u8 versions[] = { 1964 const u8 versions[] = {
1965 0x00,
1964 0x60, /* SAM-3 (no version claimed) */ 1966 0x60, /* SAM-3 (no version claimed) */
1965 1967
1966 0x03, 1968 0x03,
@@ -1969,6 +1971,20 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1969 0x02, 1971 0x02,
1970 0x60 /* SPC-3 (no version claimed) */ 1972 0x60 /* SPC-3 (no version claimed) */
1971 }; 1973 };
1974 const u8 versions_zbc[] = {
1975 0x00,
1976 0xA0, /* SAM-5 (no version claimed) */
1977
1978 0x04,
1979 0xC0, /* SBC-3 (no version claimed) */
1980
1981 0x04,
1982 0x60, /* SPC-4 (no version claimed) */
1983
1984 0x60,
1985 0x20, /* ZBC (no version claimed) */
1986 };
1987
1972 u8 hdr[] = { 1988 u8 hdr[] = {
1973 TYPE_DISK, 1989 TYPE_DISK,
1974 0, 1990 0,
@@ -1983,6 +1999,11 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1983 if (ata_id_removeable(args->id)) 1999 if (ata_id_removeable(args->id))
1984 hdr[1] |= (1 << 7); 2000 hdr[1] |= (1 << 7);
1985 2001
2002 if (args->dev->class == ATA_DEV_ZAC) {
2003 hdr[0] = TYPE_ZBC;
2004 hdr[2] = 0x6; /* ZBC is defined in SPC-4 */
2005 }
2006
1986 memcpy(rbuf, hdr, sizeof(hdr)); 2007 memcpy(rbuf, hdr, sizeof(hdr));
1987 memcpy(&rbuf[8], "ATA ", 8); 2008 memcpy(&rbuf[8], "ATA ", 8);
1988 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16); 2009 ata_id_string(args->id, &rbuf[16], ATA_ID_PROD, 16);
@@ -1995,7 +2016,10 @@ static unsigned int ata_scsiop_inq_std(struct ata_scsi_args *args, u8 *rbuf)
1995 if (rbuf[32] == 0 || rbuf[32] == ' ') 2016 if (rbuf[32] == 0 || rbuf[32] == ' ')
1996 memcpy(&rbuf[32], "n/a ", 4); 2017 memcpy(&rbuf[32], "n/a ", 4);
1997 2018
1998 memcpy(rbuf + 59, versions, sizeof(versions)); 2019 if (args->dev->class == ATA_DEV_ZAC)
2020 memcpy(rbuf + 58, versions_zbc, sizeof(versions_zbc));
2021 else
2022 memcpy(rbuf + 58, versions, sizeof(versions));
1999 2023
2000 return 0; 2024 return 0;
2001} 2025}
@@ -2564,7 +2588,6 @@ static void atapi_request_sense(struct ata_queued_cmd *qc)
2564 2588
2565 DPRINTK("ATAPI request sense\n"); 2589 DPRINTK("ATAPI request sense\n");
2566 2590
2567 /* FIXME: is this needed? */
2568 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); 2591 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
2569 2592
2570#ifdef CONFIG_ATA_SFF 2593#ifdef CONFIG_ATA_SFF
@@ -3405,7 +3428,7 @@ static inline int __ata_scsi_queuecmd(struct scsi_cmnd *scmd,
3405 ata_xlat_func_t xlat_func; 3428 ata_xlat_func_t xlat_func;
3406 int rc = 0; 3429 int rc = 0;
3407 3430
3408 if (dev->class == ATA_DEV_ATA) { 3431 if (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ZAC) {
3409 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len)) 3432 if (unlikely(!scmd->cmd_len || scmd->cmd_len > dev->cdb_len))
3410 goto bad_cdb_len; 3433 goto bad_cdb_len;
3411 3434
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index e37413228228..3227b7c8a05f 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -143,6 +143,7 @@ static struct {
143 { ATA_DEV_PMP_UNSUP, "pmp" }, 143 { ATA_DEV_PMP_UNSUP, "pmp" },
144 { ATA_DEV_SEMB, "semb" }, 144 { ATA_DEV_SEMB, "semb" },
145 { ATA_DEV_SEMB_UNSUP, "semb" }, 145 { ATA_DEV_SEMB_UNSUP, "semb" },
146 { ATA_DEV_ZAC, "zac" },
146 { ATA_DEV_NONE, "none" } 147 { ATA_DEV_NONE, "none" }
147}; 148};
148ata_bitfield_name_search(class, ata_class_names) 149ata_bitfield_name_search(class, ata_class_names)
diff --git a/drivers/bcma/driver_mips.c b/drivers/bcma/driver_mips.c
index 5ec69c3d409d..04faf6df959f 100644
--- a/drivers/bcma/driver_mips.c
+++ b/drivers/bcma/driver_mips.c
@@ -20,6 +20,9 @@
20#include <linux/serial_core.h> 20#include <linux/serial_core.h>
21#include <linux/serial_reg.h> 21#include <linux/serial_reg.h>
22#include <linux/time.h> 22#include <linux/time.h>
23#ifdef CONFIG_BCM47XX
24#include <bcm47xx_nvram.h>
25#endif
23 26
24enum bcma_boot_dev { 27enum bcma_boot_dev {
25 BCMA_BOOT_DEV_UNK = 0, 28 BCMA_BOOT_DEV_UNK = 0,
@@ -316,10 +319,16 @@ static void bcma_core_mips_flash_detect(struct bcma_drv_mips *mcore)
316 switch (boot_dev) { 319 switch (boot_dev) {
317 case BCMA_BOOT_DEV_PARALLEL: 320 case BCMA_BOOT_DEV_PARALLEL:
318 case BCMA_BOOT_DEV_SERIAL: 321 case BCMA_BOOT_DEV_SERIAL:
319 /* TODO: Init NVRAM using BCMA_SOC_FLASH2 window */ 322#ifdef CONFIG_BCM47XX
323 bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH2,
324 BCMA_SOC_FLASH2_SZ);
325#endif
320 break; 326 break;
321 case BCMA_BOOT_DEV_NAND: 327 case BCMA_BOOT_DEV_NAND:
322 /* TODO: Init NVRAM using BCMA_SOC_FLASH1 window */ 328#ifdef CONFIG_BCM47XX
329 bcm47xx_nvram_init_from_mem(BCMA_SOC_FLASH1,
330 BCMA_SOC_FLASH1_SZ);
331#endif
323 break; 332 break;
324 default: 333 default:
325 break; 334 break;
diff --git a/drivers/char/ipmi/Kconfig b/drivers/char/ipmi/Kconfig
index db1c9b7adaa6..6ed9e9fe5233 100644
--- a/drivers/char/ipmi/Kconfig
+++ b/drivers/char/ipmi/Kconfig
@@ -62,6 +62,20 @@ config IPMI_SI_PROBE_DEFAULTS
62 only be available on older systems if the "ipmi_si_intf.trydefaults=1" 62 only be available on older systems if the "ipmi_si_intf.trydefaults=1"
63 boot argument is passed. 63 boot argument is passed.
64 64
65config IPMI_SSIF
66 tristate 'IPMI SMBus handler (SSIF)'
67 select I2C
68 help
69 Provides a driver for a SMBus interface to a BMC, meaning that you
70 have a driver that must be accessed over an I2C bus instead of a
71 standard interface. This module requires I2C support.
72
73config IPMI_POWERNV
74 depends on PPC_POWERNV
75 tristate 'POWERNV (OPAL firmware) IPMI interface'
76 help
77 Provides a driver for OPAL firmware-based IPMI interfaces.
78
65config IPMI_WATCHDOG 79config IPMI_WATCHDOG
66 tristate 'IPMI Watchdog Timer' 80 tristate 'IPMI Watchdog Timer'
67 help 81 help
diff --git a/drivers/char/ipmi/Makefile b/drivers/char/ipmi/Makefile
index 16a93648d54e..f3ffde1f5f1f 100644
--- a/drivers/char/ipmi/Makefile
+++ b/drivers/char/ipmi/Makefile
@@ -7,5 +7,7 @@ ipmi_si-y := ipmi_si_intf.o ipmi_kcs_sm.o ipmi_smic_sm.o ipmi_bt_sm.o
7obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o 7obj-$(CONFIG_IPMI_HANDLER) += ipmi_msghandler.o
8obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o 8obj-$(CONFIG_IPMI_DEVICE_INTERFACE) += ipmi_devintf.o
9obj-$(CONFIG_IPMI_SI) += ipmi_si.o 9obj-$(CONFIG_IPMI_SI) += ipmi_si.o
10obj-$(CONFIG_IPMI_SSIF) += ipmi_ssif.o
11obj-$(CONFIG_IPMI_POWERNV) += ipmi_powernv.o
10obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o 12obj-$(CONFIG_IPMI_WATCHDOG) += ipmi_watchdog.o
11obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o 13obj-$(CONFIG_IPMI_POWEROFF) += ipmi_poweroff.o
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index f816211f062f..5fa83f751378 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -56,6 +56,8 @@ static int ipmi_init_msghandler(void);
56static void smi_recv_tasklet(unsigned long); 56static void smi_recv_tasklet(unsigned long);
57static void handle_new_recv_msgs(ipmi_smi_t intf); 57static void handle_new_recv_msgs(ipmi_smi_t intf);
58static void need_waiter(ipmi_smi_t intf); 58static void need_waiter(ipmi_smi_t intf);
59static int handle_one_recv_msg(ipmi_smi_t intf,
60 struct ipmi_smi_msg *msg);
59 61
60static int initialized; 62static int initialized;
61 63
@@ -191,12 +193,12 @@ struct ipmi_proc_entry {
191#endif 193#endif
192 194
193struct bmc_device { 195struct bmc_device {
194 struct platform_device *dev; 196 struct platform_device pdev;
195 struct ipmi_device_id id; 197 struct ipmi_device_id id;
196 unsigned char guid[16]; 198 unsigned char guid[16];
197 int guid_set; 199 int guid_set;
198 200 char name[16];
199 struct kref refcount; 201 struct kref usecount;
200 202
201 /* bmc device attributes */ 203 /* bmc device attributes */
202 struct device_attribute device_id_attr; 204 struct device_attribute device_id_attr;
@@ -210,6 +212,7 @@ struct bmc_device {
210 struct device_attribute guid_attr; 212 struct device_attribute guid_attr;
211 struct device_attribute aux_firmware_rev_attr; 213 struct device_attribute aux_firmware_rev_attr;
212}; 214};
215#define to_bmc_device(x) container_of((x), struct bmc_device, pdev.dev)
213 216
214/* 217/*
215 * Various statistics for IPMI, these index stats[] in the ipmi_smi 218 * Various statistics for IPMI, these index stats[] in the ipmi_smi
@@ -323,6 +326,9 @@ struct ipmi_smi {
323 326
324 struct kref refcount; 327 struct kref refcount;
325 328
329 /* Set when the interface is being unregistered. */
330 bool in_shutdown;
331
326 /* Used for a list of interfaces. */ 332 /* Used for a list of interfaces. */
327 struct list_head link; 333 struct list_head link;
328 334
@@ -341,7 +347,6 @@ struct ipmi_smi {
341 347
342 struct bmc_device *bmc; 348 struct bmc_device *bmc;
343 char *my_dev_name; 349 char *my_dev_name;
344 char *sysfs_name;
345 350
346 /* 351 /*
347 * This is the lower-layer's sender routine. Note that you 352 * This is the lower-layer's sender routine. Note that you
@@ -377,11 +382,16 @@ struct ipmi_smi {
377 * periodic timer interrupt. The tasklet is for handling received 382 * periodic timer interrupt. The tasklet is for handling received
378 * messages directly from the handler. 383 * messages directly from the handler.
379 */ 384 */
380 spinlock_t waiting_msgs_lock; 385 spinlock_t waiting_rcv_msgs_lock;
381 struct list_head waiting_msgs; 386 struct list_head waiting_rcv_msgs;
382 atomic_t watchdog_pretimeouts_to_deliver; 387 atomic_t watchdog_pretimeouts_to_deliver;
383 struct tasklet_struct recv_tasklet; 388 struct tasklet_struct recv_tasklet;
384 389
390 spinlock_t xmit_msgs_lock;
391 struct list_head xmit_msgs;
392 struct ipmi_smi_msg *curr_msg;
393 struct list_head hp_xmit_msgs;
394
385 /* 395 /*
386 * The list of command receivers that are registered for commands 396 * The list of command receivers that are registered for commands
387 * on this interface. 397 * on this interface.
@@ -474,6 +484,18 @@ static DEFINE_MUTEX(smi_watchers_mutex);
474#define ipmi_get_stat(intf, stat) \ 484#define ipmi_get_stat(intf, stat) \
475 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) 485 ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
476 486
487static char *addr_src_to_str[] = { "invalid", "hotmod", "hardcoded", "SPMI",
488 "ACPI", "SMBIOS", "PCI",
489 "device-tree", "default" };
490
491const char *ipmi_addr_src_to_str(enum ipmi_addr_src src)
492{
493 if (src > SI_DEFAULT)
494 src = 0; /* Invalid */
495 return addr_src_to_str[src];
496}
497EXPORT_SYMBOL(ipmi_addr_src_to_str);
498
477static int is_lan_addr(struct ipmi_addr *addr) 499static int is_lan_addr(struct ipmi_addr *addr)
478{ 500{
479 return addr->addr_type == IPMI_LAN_ADDR_TYPE; 501 return addr->addr_type == IPMI_LAN_ADDR_TYPE;
@@ -517,7 +539,7 @@ static void clean_up_interface_data(ipmi_smi_t intf)
517 539
518 tasklet_kill(&intf->recv_tasklet); 540 tasklet_kill(&intf->recv_tasklet);
519 541
520 free_smi_msg_list(&intf->waiting_msgs); 542 free_smi_msg_list(&intf->waiting_rcv_msgs);
521 free_recv_msg_list(&intf->waiting_events); 543 free_recv_msg_list(&intf->waiting_events);
522 544
523 /* 545 /*
@@ -1473,6 +1495,30 @@ static inline void format_lan_msg(struct ipmi_smi_msg *smi_msg,
1473 smi_msg->msgid = msgid; 1495 smi_msg->msgid = msgid;
1474} 1496}
1475 1497
1498static void smi_send(ipmi_smi_t intf, struct ipmi_smi_handlers *handlers,
1499 struct ipmi_smi_msg *smi_msg, int priority)
1500{
1501 int run_to_completion = intf->run_to_completion;
1502 unsigned long flags;
1503
1504 if (!run_to_completion)
1505 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
1506 if (intf->curr_msg) {
1507 if (priority > 0)
1508 list_add_tail(&smi_msg->link, &intf->hp_xmit_msgs);
1509 else
1510 list_add_tail(&smi_msg->link, &intf->xmit_msgs);
1511 smi_msg = NULL;
1512 } else {
1513 intf->curr_msg = smi_msg;
1514 }
1515 if (!run_to_completion)
1516 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
1517
1518 if (smi_msg)
1519 handlers->sender(intf->send_info, smi_msg);
1520}
1521
1476/* 1522/*
1477 * Separate from ipmi_request so that the user does not have to be 1523 * Separate from ipmi_request so that the user does not have to be
1478 * supplied in certain circumstances (mainly at panic time). If 1524 * supplied in certain circumstances (mainly at panic time). If
@@ -1497,7 +1543,6 @@ static int i_ipmi_request(ipmi_user_t user,
1497 struct ipmi_smi_msg *smi_msg; 1543 struct ipmi_smi_msg *smi_msg;
1498 struct ipmi_recv_msg *recv_msg; 1544 struct ipmi_recv_msg *recv_msg;
1499 unsigned long flags; 1545 unsigned long flags;
1500 struct ipmi_smi_handlers *handlers;
1501 1546
1502 1547
1503 if (supplied_recv) 1548 if (supplied_recv)
@@ -1520,8 +1565,7 @@ static int i_ipmi_request(ipmi_user_t user,
1520 } 1565 }
1521 1566
1522 rcu_read_lock(); 1567 rcu_read_lock();
1523 handlers = intf->handlers; 1568 if (intf->in_shutdown) {
1524 if (!handlers) {
1525 rv = -ENODEV; 1569 rv = -ENODEV;
1526 goto out_err; 1570 goto out_err;
1527 } 1571 }
@@ -1856,7 +1900,7 @@ static int i_ipmi_request(ipmi_user_t user,
1856 } 1900 }
1857#endif 1901#endif
1858 1902
1859 handlers->sender(intf->send_info, smi_msg, priority); 1903 smi_send(intf, intf->handlers, smi_msg, priority);
1860 rcu_read_unlock(); 1904 rcu_read_unlock();
1861 1905
1862 return 0; 1906 return 0;
@@ -2153,7 +2197,7 @@ static void remove_proc_entries(ipmi_smi_t smi)
2153static int __find_bmc_guid(struct device *dev, void *data) 2197static int __find_bmc_guid(struct device *dev, void *data)
2154{ 2198{
2155 unsigned char *id = data; 2199 unsigned char *id = data;
2156 struct bmc_device *bmc = dev_get_drvdata(dev); 2200 struct bmc_device *bmc = to_bmc_device(dev);
2157 return memcmp(bmc->guid, id, 16) == 0; 2201 return memcmp(bmc->guid, id, 16) == 0;
2158} 2202}
2159 2203
@@ -2164,7 +2208,7 @@ static struct bmc_device *ipmi_find_bmc_guid(struct device_driver *drv,
2164 2208
2165 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid); 2209 dev = driver_find_device(drv, NULL, guid, __find_bmc_guid);
2166 if (dev) 2210 if (dev)
2167 return dev_get_drvdata(dev); 2211 return to_bmc_device(dev);
2168 else 2212 else
2169 return NULL; 2213 return NULL;
2170} 2214}
@@ -2177,7 +2221,7 @@ struct prod_dev_id {
2177static int __find_bmc_prod_dev_id(struct device *dev, void *data) 2221static int __find_bmc_prod_dev_id(struct device *dev, void *data)
2178{ 2222{
2179 struct prod_dev_id *id = data; 2223 struct prod_dev_id *id = data;
2180 struct bmc_device *bmc = dev_get_drvdata(dev); 2224 struct bmc_device *bmc = to_bmc_device(dev);
2181 2225
2182 return (bmc->id.product_id == id->product_id 2226 return (bmc->id.product_id == id->product_id
2183 && bmc->id.device_id == id->device_id); 2227 && bmc->id.device_id == id->device_id);
@@ -2195,7 +2239,7 @@ static struct bmc_device *ipmi_find_bmc_prod_dev_id(
2195 2239
2196 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id); 2240 dev = driver_find_device(drv, NULL, &id, __find_bmc_prod_dev_id);
2197 if (dev) 2241 if (dev)
2198 return dev_get_drvdata(dev); 2242 return to_bmc_device(dev);
2199 else 2243 else
2200 return NULL; 2244 return NULL;
2201} 2245}
@@ -2204,84 +2248,92 @@ static ssize_t device_id_show(struct device *dev,
2204 struct device_attribute *attr, 2248 struct device_attribute *attr,
2205 char *buf) 2249 char *buf)
2206{ 2250{
2207 struct bmc_device *bmc = dev_get_drvdata(dev); 2251 struct bmc_device *bmc = to_bmc_device(dev);
2208 2252
2209 return snprintf(buf, 10, "%u\n", bmc->id.device_id); 2253 return snprintf(buf, 10, "%u\n", bmc->id.device_id);
2210} 2254}
2255DEVICE_ATTR(device_id, S_IRUGO, device_id_show, NULL);
2211 2256
2212static ssize_t provides_dev_sdrs_show(struct device *dev, 2257static ssize_t provides_device_sdrs_show(struct device *dev,
2213 struct device_attribute *attr, 2258 struct device_attribute *attr,
2214 char *buf) 2259 char *buf)
2215{ 2260{
2216 struct bmc_device *bmc = dev_get_drvdata(dev); 2261 struct bmc_device *bmc = to_bmc_device(dev);
2217 2262
2218 return snprintf(buf, 10, "%u\n", 2263 return snprintf(buf, 10, "%u\n",
2219 (bmc->id.device_revision & 0x80) >> 7); 2264 (bmc->id.device_revision & 0x80) >> 7);
2220} 2265}
2266DEVICE_ATTR(provides_device_sdrs, S_IRUGO, provides_device_sdrs_show, NULL);
2221 2267
2222static ssize_t revision_show(struct device *dev, struct device_attribute *attr, 2268static ssize_t revision_show(struct device *dev, struct device_attribute *attr,
2223 char *buf) 2269 char *buf)
2224{ 2270{
2225 struct bmc_device *bmc = dev_get_drvdata(dev); 2271 struct bmc_device *bmc = to_bmc_device(dev);
2226 2272
2227 return snprintf(buf, 20, "%u\n", 2273 return snprintf(buf, 20, "%u\n",
2228 bmc->id.device_revision & 0x0F); 2274 bmc->id.device_revision & 0x0F);
2229} 2275}
2276DEVICE_ATTR(revision, S_IRUGO, revision_show, NULL);
2230 2277
2231static ssize_t firmware_rev_show(struct device *dev, 2278static ssize_t firmware_revision_show(struct device *dev,
2232 struct device_attribute *attr, 2279 struct device_attribute *attr,
2233 char *buf) 2280 char *buf)
2234{ 2281{
2235 struct bmc_device *bmc = dev_get_drvdata(dev); 2282 struct bmc_device *bmc = to_bmc_device(dev);
2236 2283
2237 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1, 2284 return snprintf(buf, 20, "%u.%x\n", bmc->id.firmware_revision_1,
2238 bmc->id.firmware_revision_2); 2285 bmc->id.firmware_revision_2);
2239} 2286}
2287DEVICE_ATTR(firmware_revision, S_IRUGO, firmware_revision_show, NULL);
2240 2288
2241static ssize_t ipmi_version_show(struct device *dev, 2289static ssize_t ipmi_version_show(struct device *dev,
2242 struct device_attribute *attr, 2290 struct device_attribute *attr,
2243 char *buf) 2291 char *buf)
2244{ 2292{
2245 struct bmc_device *bmc = dev_get_drvdata(dev); 2293 struct bmc_device *bmc = to_bmc_device(dev);
2246 2294
2247 return snprintf(buf, 20, "%u.%u\n", 2295 return snprintf(buf, 20, "%u.%u\n",
2248 ipmi_version_major(&bmc->id), 2296 ipmi_version_major(&bmc->id),
2249 ipmi_version_minor(&bmc->id)); 2297 ipmi_version_minor(&bmc->id));
2250} 2298}
2299DEVICE_ATTR(ipmi_version, S_IRUGO, ipmi_version_show, NULL);
2251 2300
2252static ssize_t add_dev_support_show(struct device *dev, 2301static ssize_t add_dev_support_show(struct device *dev,
2253 struct device_attribute *attr, 2302 struct device_attribute *attr,
2254 char *buf) 2303 char *buf)
2255{ 2304{
2256 struct bmc_device *bmc = dev_get_drvdata(dev); 2305 struct bmc_device *bmc = to_bmc_device(dev);
2257 2306
2258 return snprintf(buf, 10, "0x%02x\n", 2307 return snprintf(buf, 10, "0x%02x\n",
2259 bmc->id.additional_device_support); 2308 bmc->id.additional_device_support);
2260} 2309}
2310DEVICE_ATTR(additional_device_support, S_IRUGO, add_dev_support_show, NULL);
2261 2311
2262static ssize_t manufacturer_id_show(struct device *dev, 2312static ssize_t manufacturer_id_show(struct device *dev,
2263 struct device_attribute *attr, 2313 struct device_attribute *attr,
2264 char *buf) 2314 char *buf)
2265{ 2315{
2266 struct bmc_device *bmc = dev_get_drvdata(dev); 2316 struct bmc_device *bmc = to_bmc_device(dev);
2267 2317
2268 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id); 2318 return snprintf(buf, 20, "0x%6.6x\n", bmc->id.manufacturer_id);
2269} 2319}
2320DEVICE_ATTR(manufacturer_id, S_IRUGO, manufacturer_id_show, NULL);
2270 2321
2271static ssize_t product_id_show(struct device *dev, 2322static ssize_t product_id_show(struct device *dev,
2272 struct device_attribute *attr, 2323 struct device_attribute *attr,
2273 char *buf) 2324 char *buf)
2274{ 2325{
2275 struct bmc_device *bmc = dev_get_drvdata(dev); 2326 struct bmc_device *bmc = to_bmc_device(dev);
2276 2327
2277 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id); 2328 return snprintf(buf, 10, "0x%4.4x\n", bmc->id.product_id);
2278} 2329}
2330DEVICE_ATTR(product_id, S_IRUGO, product_id_show, NULL);
2279 2331
2280static ssize_t aux_firmware_rev_show(struct device *dev, 2332static ssize_t aux_firmware_rev_show(struct device *dev,
2281 struct device_attribute *attr, 2333 struct device_attribute *attr,
2282 char *buf) 2334 char *buf)
2283{ 2335{
2284 struct bmc_device *bmc = dev_get_drvdata(dev); 2336 struct bmc_device *bmc = to_bmc_device(dev);
2285 2337
2286 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n", 2338 return snprintf(buf, 21, "0x%02x 0x%02x 0x%02x 0x%02x\n",
2287 bmc->id.aux_firmware_revision[3], 2339 bmc->id.aux_firmware_revision[3],
@@ -2289,174 +2341,96 @@ static ssize_t aux_firmware_rev_show(struct device *dev,
2289 bmc->id.aux_firmware_revision[1], 2341 bmc->id.aux_firmware_revision[1],
2290 bmc->id.aux_firmware_revision[0]); 2342 bmc->id.aux_firmware_revision[0]);
2291} 2343}
2344DEVICE_ATTR(aux_firmware_revision, S_IRUGO, aux_firmware_rev_show, NULL);
2292 2345
2293static ssize_t guid_show(struct device *dev, struct device_attribute *attr, 2346static ssize_t guid_show(struct device *dev, struct device_attribute *attr,
2294 char *buf) 2347 char *buf)
2295{ 2348{
2296 struct bmc_device *bmc = dev_get_drvdata(dev); 2349 struct bmc_device *bmc = to_bmc_device(dev);
2297 2350
2298 return snprintf(buf, 100, "%Lx%Lx\n", 2351 return snprintf(buf, 100, "%Lx%Lx\n",
2299 (long long) bmc->guid[0], 2352 (long long) bmc->guid[0],
2300 (long long) bmc->guid[8]); 2353 (long long) bmc->guid[8]);
2301} 2354}
2355DEVICE_ATTR(guid, S_IRUGO, guid_show, NULL);
2356
2357static struct attribute *bmc_dev_attrs[] = {
2358 &dev_attr_device_id.attr,
2359 &dev_attr_provides_device_sdrs.attr,
2360 &dev_attr_revision.attr,
2361 &dev_attr_firmware_revision.attr,
2362 &dev_attr_ipmi_version.attr,
2363 &dev_attr_additional_device_support.attr,
2364 &dev_attr_manufacturer_id.attr,
2365 &dev_attr_product_id.attr,
2366 NULL
2367};
2302 2368
2303static void remove_files(struct bmc_device *bmc) 2369static struct attribute_group bmc_dev_attr_group = {
2304{ 2370 .attrs = bmc_dev_attrs,
2305 if (!bmc->dev) 2371};
2306 return;
2307 2372
2308 device_remove_file(&bmc->dev->dev, 2373static const struct attribute_group *bmc_dev_attr_groups[] = {
2309 &bmc->device_id_attr); 2374 &bmc_dev_attr_group,
2310 device_remove_file(&bmc->dev->dev, 2375 NULL
2311 &bmc->provides_dev_sdrs_attr); 2376};
2312 device_remove_file(&bmc->dev->dev,
2313 &bmc->revision_attr);
2314 device_remove_file(&bmc->dev->dev,
2315 &bmc->firmware_rev_attr);
2316 device_remove_file(&bmc->dev->dev,
2317 &bmc->version_attr);
2318 device_remove_file(&bmc->dev->dev,
2319 &bmc->add_dev_support_attr);
2320 device_remove_file(&bmc->dev->dev,
2321 &bmc->manufacturer_id_attr);
2322 device_remove_file(&bmc->dev->dev,
2323 &bmc->product_id_attr);
2324 2377
2325 if (bmc->id.aux_firmware_revision_set) 2378static struct device_type bmc_device_type = {
2326 device_remove_file(&bmc->dev->dev, 2379 .groups = bmc_dev_attr_groups,
2327 &bmc->aux_firmware_rev_attr); 2380};
2328 if (bmc->guid_set) 2381
2329 device_remove_file(&bmc->dev->dev, 2382static void
2330 &bmc->guid_attr); 2383release_bmc_device(struct device *dev)
2384{
2385 kfree(to_bmc_device(dev));
2331} 2386}
2332 2387
2333static void 2388static void
2334cleanup_bmc_device(struct kref *ref) 2389cleanup_bmc_device(struct kref *ref)
2335{ 2390{
2336 struct bmc_device *bmc; 2391 struct bmc_device *bmc = container_of(ref, struct bmc_device, usecount);
2337 2392
2338 bmc = container_of(ref, struct bmc_device, refcount); 2393 if (bmc->id.aux_firmware_revision_set)
2394 device_remove_file(&bmc->pdev.dev,
2395 &bmc->aux_firmware_rev_attr);
2396 if (bmc->guid_set)
2397 device_remove_file(&bmc->pdev.dev,
2398 &bmc->guid_attr);
2339 2399
2340 remove_files(bmc); 2400 platform_device_unregister(&bmc->pdev);
2341 platform_device_unregister(bmc->dev);
2342 kfree(bmc);
2343} 2401}
2344 2402
2345static void ipmi_bmc_unregister(ipmi_smi_t intf) 2403static void ipmi_bmc_unregister(ipmi_smi_t intf)
2346{ 2404{
2347 struct bmc_device *bmc = intf->bmc; 2405 struct bmc_device *bmc = intf->bmc;
2348 2406
2349 if (intf->sysfs_name) { 2407 sysfs_remove_link(&intf->si_dev->kobj, "bmc");
2350 sysfs_remove_link(&intf->si_dev->kobj, intf->sysfs_name);
2351 kfree(intf->sysfs_name);
2352 intf->sysfs_name = NULL;
2353 }
2354 if (intf->my_dev_name) { 2408 if (intf->my_dev_name) {
2355 sysfs_remove_link(&bmc->dev->dev.kobj, intf->my_dev_name); 2409 sysfs_remove_link(&bmc->pdev.dev.kobj, intf->my_dev_name);
2356 kfree(intf->my_dev_name); 2410 kfree(intf->my_dev_name);
2357 intf->my_dev_name = NULL; 2411 intf->my_dev_name = NULL;
2358 } 2412 }
2359 2413
2360 mutex_lock(&ipmidriver_mutex); 2414 mutex_lock(&ipmidriver_mutex);
2361 kref_put(&bmc->refcount, cleanup_bmc_device); 2415 kref_put(&bmc->usecount, cleanup_bmc_device);
2362 intf->bmc = NULL; 2416 intf->bmc = NULL;
2363 mutex_unlock(&ipmidriver_mutex); 2417 mutex_unlock(&ipmidriver_mutex);
2364} 2418}
2365 2419
2366static int create_files(struct bmc_device *bmc) 2420static int create_bmc_files(struct bmc_device *bmc)
2367{ 2421{
2368 int err; 2422 int err;
2369 2423
2370 bmc->device_id_attr.attr.name = "device_id";
2371 bmc->device_id_attr.attr.mode = S_IRUGO;
2372 bmc->device_id_attr.show = device_id_show;
2373 sysfs_attr_init(&bmc->device_id_attr.attr);
2374
2375 bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
2376 bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
2377 bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
2378 sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
2379
2380 bmc->revision_attr.attr.name = "revision";
2381 bmc->revision_attr.attr.mode = S_IRUGO;
2382 bmc->revision_attr.show = revision_show;
2383 sysfs_attr_init(&bmc->revision_attr.attr);
2384
2385 bmc->firmware_rev_attr.attr.name = "firmware_revision";
2386 bmc->firmware_rev_attr.attr.mode = S_IRUGO;
2387 bmc->firmware_rev_attr.show = firmware_rev_show;
2388 sysfs_attr_init(&bmc->firmware_rev_attr.attr);
2389
2390 bmc->version_attr.attr.name = "ipmi_version";
2391 bmc->version_attr.attr.mode = S_IRUGO;
2392 bmc->version_attr.show = ipmi_version_show;
2393 sysfs_attr_init(&bmc->version_attr.attr);
2394
2395 bmc->add_dev_support_attr.attr.name = "additional_device_support";
2396 bmc->add_dev_support_attr.attr.mode = S_IRUGO;
2397 bmc->add_dev_support_attr.show = add_dev_support_show;
2398 sysfs_attr_init(&bmc->add_dev_support_attr.attr);
2399
2400 bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
2401 bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
2402 bmc->manufacturer_id_attr.show = manufacturer_id_show;
2403 sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
2404
2405 bmc->product_id_attr.attr.name = "product_id";
2406 bmc->product_id_attr.attr.mode = S_IRUGO;
2407 bmc->product_id_attr.show = product_id_show;
2408 sysfs_attr_init(&bmc->product_id_attr.attr);
2409
2410 bmc->guid_attr.attr.name = "guid";
2411 bmc->guid_attr.attr.mode = S_IRUGO;
2412 bmc->guid_attr.show = guid_show;
2413 sysfs_attr_init(&bmc->guid_attr.attr);
2414
2415 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2416 bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
2417 bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
2418 sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
2419
2420 err = device_create_file(&bmc->dev->dev,
2421 &bmc->device_id_attr);
2422 if (err)
2423 goto out;
2424 err = device_create_file(&bmc->dev->dev,
2425 &bmc->provides_dev_sdrs_attr);
2426 if (err)
2427 goto out_devid;
2428 err = device_create_file(&bmc->dev->dev,
2429 &bmc->revision_attr);
2430 if (err)
2431 goto out_sdrs;
2432 err = device_create_file(&bmc->dev->dev,
2433 &bmc->firmware_rev_attr);
2434 if (err)
2435 goto out_rev;
2436 err = device_create_file(&bmc->dev->dev,
2437 &bmc->version_attr);
2438 if (err)
2439 goto out_firm;
2440 err = device_create_file(&bmc->dev->dev,
2441 &bmc->add_dev_support_attr);
2442 if (err)
2443 goto out_version;
2444 err = device_create_file(&bmc->dev->dev,
2445 &bmc->manufacturer_id_attr);
2446 if (err)
2447 goto out_add_dev;
2448 err = device_create_file(&bmc->dev->dev,
2449 &bmc->product_id_attr);
2450 if (err)
2451 goto out_manu;
2452 if (bmc->id.aux_firmware_revision_set) { 2424 if (bmc->id.aux_firmware_revision_set) {
2453 err = device_create_file(&bmc->dev->dev, 2425 bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
2426 err = device_create_file(&bmc->pdev.dev,
2454 &bmc->aux_firmware_rev_attr); 2427 &bmc->aux_firmware_rev_attr);
2455 if (err) 2428 if (err)
2456 goto out_prod_id; 2429 goto out;
2457 } 2430 }
2458 if (bmc->guid_set) { 2431 if (bmc->guid_set) {
2459 err = device_create_file(&bmc->dev->dev, 2432 bmc->guid_attr.attr.name = "guid";
2433 err = device_create_file(&bmc->pdev.dev,
2460 &bmc->guid_attr); 2434 &bmc->guid_attr);
2461 if (err) 2435 if (err)
2462 goto out_aux_firm; 2436 goto out_aux_firm;
@@ -2466,44 +2440,17 @@ static int create_files(struct bmc_device *bmc)
2466 2440
2467out_aux_firm: 2441out_aux_firm:
2468 if (bmc->id.aux_firmware_revision_set) 2442 if (bmc->id.aux_firmware_revision_set)
2469 device_remove_file(&bmc->dev->dev, 2443 device_remove_file(&bmc->pdev.dev,
2470 &bmc->aux_firmware_rev_attr); 2444 &bmc->aux_firmware_rev_attr);
2471out_prod_id:
2472 device_remove_file(&bmc->dev->dev,
2473 &bmc->product_id_attr);
2474out_manu:
2475 device_remove_file(&bmc->dev->dev,
2476 &bmc->manufacturer_id_attr);
2477out_add_dev:
2478 device_remove_file(&bmc->dev->dev,
2479 &bmc->add_dev_support_attr);
2480out_version:
2481 device_remove_file(&bmc->dev->dev,
2482 &bmc->version_attr);
2483out_firm:
2484 device_remove_file(&bmc->dev->dev,
2485 &bmc->firmware_rev_attr);
2486out_rev:
2487 device_remove_file(&bmc->dev->dev,
2488 &bmc->revision_attr);
2489out_sdrs:
2490 device_remove_file(&bmc->dev->dev,
2491 &bmc->provides_dev_sdrs_attr);
2492out_devid:
2493 device_remove_file(&bmc->dev->dev,
2494 &bmc->device_id_attr);
2495out: 2445out:
2496 return err; 2446 return err;
2497} 2447}
2498 2448
2499static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum, 2449static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum)
2500 const char *sysfs_name)
2501{ 2450{
2502 int rv; 2451 int rv;
2503 struct bmc_device *bmc = intf->bmc; 2452 struct bmc_device *bmc = intf->bmc;
2504 struct bmc_device *old_bmc; 2453 struct bmc_device *old_bmc;
2505 int size;
2506 char dummy[1];
2507 2454
2508 mutex_lock(&ipmidriver_mutex); 2455 mutex_lock(&ipmidriver_mutex);
2509 2456
@@ -2527,7 +2474,7 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2527 intf->bmc = old_bmc; 2474 intf->bmc = old_bmc;
2528 bmc = old_bmc; 2475 bmc = old_bmc;
2529 2476
2530 kref_get(&bmc->refcount); 2477 kref_get(&bmc->usecount);
2531 mutex_unlock(&ipmidriver_mutex); 2478 mutex_unlock(&ipmidriver_mutex);
2532 2479
2533 printk(KERN_INFO 2480 printk(KERN_INFO
@@ -2537,12 +2484,12 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2537 bmc->id.product_id, 2484 bmc->id.product_id,
2538 bmc->id.device_id); 2485 bmc->id.device_id);
2539 } else { 2486 } else {
2540 char name[14];
2541 unsigned char orig_dev_id = bmc->id.device_id; 2487 unsigned char orig_dev_id = bmc->id.device_id;
2542 int warn_printed = 0; 2488 int warn_printed = 0;
2543 2489
2544 snprintf(name, sizeof(name), 2490 snprintf(bmc->name, sizeof(bmc->name),
2545 "ipmi_bmc.%4.4x", bmc->id.product_id); 2491 "ipmi_bmc.%4.4x", bmc->id.product_id);
2492 bmc->pdev.name = bmc->name;
2546 2493
2547 while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver, 2494 while (ipmi_find_bmc_prod_dev_id(&ipmidriver.driver,
2548 bmc->id.product_id, 2495 bmc->id.product_id,
@@ -2566,23 +2513,16 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2566 } 2513 }
2567 } 2514 }
2568 2515
2569 bmc->dev = platform_device_alloc(name, bmc->id.device_id); 2516 bmc->pdev.dev.driver = &ipmidriver.driver;
2570 if (!bmc->dev) { 2517 bmc->pdev.id = bmc->id.device_id;
2571 mutex_unlock(&ipmidriver_mutex); 2518 bmc->pdev.dev.release = release_bmc_device;
2572 printk(KERN_ERR 2519 bmc->pdev.dev.type = &bmc_device_type;
2573 "ipmi_msghandler:" 2520 kref_init(&bmc->usecount);
2574 " Unable to allocate platform device\n");
2575 return -ENOMEM;
2576 }
2577 bmc->dev->dev.driver = &ipmidriver.driver;
2578 dev_set_drvdata(&bmc->dev->dev, bmc);
2579 kref_init(&bmc->refcount);
2580 2521
2581 rv = platform_device_add(bmc->dev); 2522 rv = platform_device_register(&bmc->pdev);
2582 mutex_unlock(&ipmidriver_mutex); 2523 mutex_unlock(&ipmidriver_mutex);
2583 if (rv) { 2524 if (rv) {
2584 platform_device_put(bmc->dev); 2525 put_device(&bmc->pdev.dev);
2585 bmc->dev = NULL;
2586 printk(KERN_ERR 2526 printk(KERN_ERR
2587 "ipmi_msghandler:" 2527 "ipmi_msghandler:"
2588 " Unable to register bmc device: %d\n", 2528 " Unable to register bmc device: %d\n",
@@ -2594,10 +2534,10 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2594 return rv; 2534 return rv;
2595 } 2535 }
2596 2536
2597 rv = create_files(bmc); 2537 rv = create_bmc_files(bmc);
2598 if (rv) { 2538 if (rv) {
2599 mutex_lock(&ipmidriver_mutex); 2539 mutex_lock(&ipmidriver_mutex);
2600 platform_device_unregister(bmc->dev); 2540 platform_device_unregister(&bmc->pdev);
2601 mutex_unlock(&ipmidriver_mutex); 2541 mutex_unlock(&ipmidriver_mutex);
2602 2542
2603 return rv; 2543 return rv;
@@ -2614,44 +2554,26 @@ static int ipmi_bmc_register(ipmi_smi_t intf, int ifnum,
2614 * create symlink from system interface device to bmc device 2554 * create symlink from system interface device to bmc device
2615 * and back. 2555 * and back.
2616 */ 2556 */
2617 intf->sysfs_name = kstrdup(sysfs_name, GFP_KERNEL); 2557 rv = sysfs_create_link(&intf->si_dev->kobj, &bmc->pdev.dev.kobj, "bmc");
2618 if (!intf->sysfs_name) {
2619 rv = -ENOMEM;
2620 printk(KERN_ERR
2621 "ipmi_msghandler: allocate link to BMC: %d\n",
2622 rv);
2623 goto out_err;
2624 }
2625
2626 rv = sysfs_create_link(&intf->si_dev->kobj,
2627 &bmc->dev->dev.kobj, intf->sysfs_name);
2628 if (rv) { 2558 if (rv) {
2629 kfree(intf->sysfs_name);
2630 intf->sysfs_name = NULL;
2631 printk(KERN_ERR 2559 printk(KERN_ERR
2632 "ipmi_msghandler: Unable to create bmc symlink: %d\n", 2560 "ipmi_msghandler: Unable to create bmc symlink: %d\n",
2633 rv); 2561 rv);
2634 goto out_err; 2562 goto out_err;
2635 } 2563 }
2636 2564
2637 size = snprintf(dummy, 0, "ipmi%d", ifnum); 2565 intf->my_dev_name = kasprintf(GFP_KERNEL, "ipmi%d", ifnum);
2638 intf->my_dev_name = kmalloc(size+1, GFP_KERNEL);
2639 if (!intf->my_dev_name) { 2566 if (!intf->my_dev_name) {
2640 kfree(intf->sysfs_name);
2641 intf->sysfs_name = NULL;
2642 rv = -ENOMEM; 2567 rv = -ENOMEM;
2643 printk(KERN_ERR 2568 printk(KERN_ERR
2644 "ipmi_msghandler: allocate link from BMC: %d\n", 2569 "ipmi_msghandler: allocate link from BMC: %d\n",
2645 rv); 2570 rv);
2646 goto out_err; 2571 goto out_err;
2647 } 2572 }
2648 snprintf(intf->my_dev_name, size+1, "ipmi%d", ifnum);
2649 2573
2650 rv = sysfs_create_link(&bmc->dev->dev.kobj, &intf->si_dev->kobj, 2574 rv = sysfs_create_link(&bmc->pdev.dev.kobj, &intf->si_dev->kobj,
2651 intf->my_dev_name); 2575 intf->my_dev_name);
2652 if (rv) { 2576 if (rv) {
2653 kfree(intf->sysfs_name);
2654 intf->sysfs_name = NULL;
2655 kfree(intf->my_dev_name); 2577 kfree(intf->my_dev_name);
2656 intf->my_dev_name = NULL; 2578 intf->my_dev_name = NULL;
2657 printk(KERN_ERR 2579 printk(KERN_ERR
@@ -2850,7 +2772,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2850 void *send_info, 2772 void *send_info,
2851 struct ipmi_device_id *device_id, 2773 struct ipmi_device_id *device_id,
2852 struct device *si_dev, 2774 struct device *si_dev,
2853 const char *sysfs_name,
2854 unsigned char slave_addr) 2775 unsigned char slave_addr)
2855{ 2776{
2856 int i, j; 2777 int i, j;
@@ -2909,12 +2830,15 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2909#ifdef CONFIG_PROC_FS 2830#ifdef CONFIG_PROC_FS
2910 mutex_init(&intf->proc_entry_lock); 2831 mutex_init(&intf->proc_entry_lock);
2911#endif 2832#endif
2912 spin_lock_init(&intf->waiting_msgs_lock); 2833 spin_lock_init(&intf->waiting_rcv_msgs_lock);
2913 INIT_LIST_HEAD(&intf->waiting_msgs); 2834 INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
2914 tasklet_init(&intf->recv_tasklet, 2835 tasklet_init(&intf->recv_tasklet,
2915 smi_recv_tasklet, 2836 smi_recv_tasklet,
2916 (unsigned long) intf); 2837 (unsigned long) intf);
2917 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); 2838 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
2839 spin_lock_init(&intf->xmit_msgs_lock);
2840 INIT_LIST_HEAD(&intf->xmit_msgs);
2841 INIT_LIST_HEAD(&intf->hp_xmit_msgs);
2918 spin_lock_init(&intf->events_lock); 2842 spin_lock_init(&intf->events_lock);
2919 atomic_set(&intf->event_waiters, 0); 2843 atomic_set(&intf->event_waiters, 0);
2920 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME; 2844 intf->ticks_to_req_ev = IPMI_REQUEST_EV_TIME;
@@ -2984,7 +2908,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
2984 if (rv == 0) 2908 if (rv == 0)
2985 rv = add_proc_entries(intf, i); 2909 rv = add_proc_entries(intf, i);
2986 2910
2987 rv = ipmi_bmc_register(intf, i, sysfs_name); 2911 rv = ipmi_bmc_register(intf, i);
2988 2912
2989 out: 2913 out:
2990 if (rv) { 2914 if (rv) {
@@ -3014,12 +2938,50 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
3014} 2938}
3015EXPORT_SYMBOL(ipmi_register_smi); 2939EXPORT_SYMBOL(ipmi_register_smi);
3016 2940
2941static void deliver_smi_err_response(ipmi_smi_t intf,
2942 struct ipmi_smi_msg *msg,
2943 unsigned char err)
2944{
2945 msg->rsp[0] = msg->data[0] | 4;
2946 msg->rsp[1] = msg->data[1];
2947 msg->rsp[2] = err;
2948 msg->rsp_size = 3;
2949 /* It's an error, so it will never requeue, no need to check return. */
2950 handle_one_recv_msg(intf, msg);
2951}
2952
3017static void cleanup_smi_msgs(ipmi_smi_t intf) 2953static void cleanup_smi_msgs(ipmi_smi_t intf)
3018{ 2954{
3019 int i; 2955 int i;
3020 struct seq_table *ent; 2956 struct seq_table *ent;
2957 struct ipmi_smi_msg *msg;
2958 struct list_head *entry;
2959 struct list_head tmplist;
2960
2961 /* Clear out our transmit queues and hold the messages. */
2962 INIT_LIST_HEAD(&tmplist);
2963 list_splice_tail(&intf->hp_xmit_msgs, &tmplist);
2964 list_splice_tail(&intf->xmit_msgs, &tmplist);
2965
2966 /* Current message first, to preserve order */
2967 while (intf->curr_msg && !list_empty(&intf->waiting_rcv_msgs)) {
2968 /* Wait for the message to clear out. */
2969 schedule_timeout(1);
2970 }
3021 2971
3022 /* No need for locks, the interface is down. */ 2972 /* No need for locks, the interface is down. */
2973
2974 /*
2975 * Return errors for all pending messages in queue and in the
2976 * tables waiting for remote responses.
2977 */
2978 while (!list_empty(&tmplist)) {
2979 entry = tmplist.next;
2980 list_del(entry);
2981 msg = list_entry(entry, struct ipmi_smi_msg, link);
2982 deliver_smi_err_response(intf, msg, IPMI_ERR_UNSPECIFIED);
2983 }
2984
3023 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) { 2985 for (i = 0; i < IPMI_IPMB_NUM_SEQ; i++) {
3024 ent = &(intf->seq_table[i]); 2986 ent = &(intf->seq_table[i]);
3025 if (!ent->inuse) 2987 if (!ent->inuse)
@@ -3031,20 +2993,33 @@ static void cleanup_smi_msgs(ipmi_smi_t intf)
3031int ipmi_unregister_smi(ipmi_smi_t intf) 2993int ipmi_unregister_smi(ipmi_smi_t intf)
3032{ 2994{
3033 struct ipmi_smi_watcher *w; 2995 struct ipmi_smi_watcher *w;
3034 int intf_num = intf->intf_num; 2996 int intf_num = intf->intf_num;
2997 ipmi_user_t user;
3035 2998
3036 ipmi_bmc_unregister(intf); 2999 ipmi_bmc_unregister(intf);
3037 3000
3038 mutex_lock(&smi_watchers_mutex); 3001 mutex_lock(&smi_watchers_mutex);
3039 mutex_lock(&ipmi_interfaces_mutex); 3002 mutex_lock(&ipmi_interfaces_mutex);
3040 intf->intf_num = -1; 3003 intf->intf_num = -1;
3041 intf->handlers = NULL; 3004 intf->in_shutdown = true;
3042 list_del_rcu(&intf->link); 3005 list_del_rcu(&intf->link);
3043 mutex_unlock(&ipmi_interfaces_mutex); 3006 mutex_unlock(&ipmi_interfaces_mutex);
3044 synchronize_rcu(); 3007 synchronize_rcu();
3045 3008
3046 cleanup_smi_msgs(intf); 3009 cleanup_smi_msgs(intf);
3047 3010
3011 /* Clean up the effects of users on the lower-level software. */
3012 mutex_lock(&ipmi_interfaces_mutex);
3013 rcu_read_lock();
3014 list_for_each_entry_rcu(user, &intf->users, link) {
3015 module_put(intf->handlers->owner);
3016 if (intf->handlers->dec_usecount)
3017 intf->handlers->dec_usecount(intf->send_info);
3018 }
3019 rcu_read_unlock();
3020 intf->handlers = NULL;
3021 mutex_unlock(&ipmi_interfaces_mutex);
3022
3048 remove_proc_entries(intf); 3023 remove_proc_entries(intf);
3049 3024
3050 /* 3025 /*
@@ -3134,7 +3109,6 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3134 ipmi_user_t user = NULL; 3109 ipmi_user_t user = NULL;
3135 struct ipmi_ipmb_addr *ipmb_addr; 3110 struct ipmi_ipmb_addr *ipmb_addr;
3136 struct ipmi_recv_msg *recv_msg; 3111 struct ipmi_recv_msg *recv_msg;
3137 struct ipmi_smi_handlers *handlers;
3138 3112
3139 if (msg->rsp_size < 10) { 3113 if (msg->rsp_size < 10) {
3140 /* Message not big enough, just ignore it. */ 3114 /* Message not big enough, just ignore it. */
@@ -3188,9 +3162,8 @@ static int handle_ipmb_get_msg_cmd(ipmi_smi_t intf,
3188 } 3162 }
3189#endif 3163#endif
3190 rcu_read_lock(); 3164 rcu_read_lock();
3191 handlers = intf->handlers; 3165 if (!intf->in_shutdown) {
3192 if (handlers) { 3166 smi_send(intf, intf->handlers, msg, 0);
3193 handlers->sender(intf->send_info, msg, 0);
3194 /* 3167 /*
3195 * We used the message, so return the value 3168 * We used the message, so return the value
3196 * that causes it to not be freed or 3169 * that causes it to not be freed or
@@ -3857,32 +3830,32 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3857 3830
3858 /* See if any waiting messages need to be processed. */ 3831 /* See if any waiting messages need to be processed. */
3859 if (!run_to_completion) 3832 if (!run_to_completion)
3860 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3833 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
3861 while (!list_empty(&intf->waiting_msgs)) { 3834 while (!list_empty(&intf->waiting_rcv_msgs)) {
3862 smi_msg = list_entry(intf->waiting_msgs.next, 3835 smi_msg = list_entry(intf->waiting_rcv_msgs.next,
3863 struct ipmi_smi_msg, link); 3836 struct ipmi_smi_msg, link);
3864 list_del(&smi_msg->link);
3865 if (!run_to_completion) 3837 if (!run_to_completion)
3866 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3838 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
3839 flags);
3867 rv = handle_one_recv_msg(intf, smi_msg); 3840 rv = handle_one_recv_msg(intf, smi_msg);
3868 if (!run_to_completion) 3841 if (!run_to_completion)
3869 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3842 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
3870 if (rv == 0) { 3843 if (rv > 0) {
3871 /* Message handled */
3872 ipmi_free_smi_msg(smi_msg);
3873 } else if (rv < 0) {
3874 /* Fatal error on the message, del but don't free. */
3875 } else {
3876 /* 3844 /*
3877 * To preserve message order, quit if we 3845 * To preserve message order, quit if we
3878 * can't handle a message. 3846 * can't handle a message.
3879 */ 3847 */
3880 list_add(&smi_msg->link, &intf->waiting_msgs);
3881 break; 3848 break;
3849 } else {
3850 list_del(&smi_msg->link);
3851 if (rv == 0)
3852 /* Message handled */
3853 ipmi_free_smi_msg(smi_msg);
3854 /* If rv < 0, fatal error, del but don't free. */
3882 } 3855 }
3883 } 3856 }
3884 if (!run_to_completion) 3857 if (!run_to_completion)
3885 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3858 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock, flags);
3886 3859
3887 /* 3860 /*
3888 * If the pretimout count is non-zero, decrement one from it and 3861 * If the pretimout count is non-zero, decrement one from it and
@@ -3903,7 +3876,41 @@ static void handle_new_recv_msgs(ipmi_smi_t intf)
3903 3876
3904static void smi_recv_tasklet(unsigned long val) 3877static void smi_recv_tasklet(unsigned long val)
3905{ 3878{
3906 handle_new_recv_msgs((ipmi_smi_t) val); 3879 unsigned long flags = 0; /* keep us warning-free. */
3880 ipmi_smi_t intf = (ipmi_smi_t) val;
3881 int run_to_completion = intf->run_to_completion;
3882 struct ipmi_smi_msg *newmsg = NULL;
3883
3884 /*
3885 * Start the next message if available.
3886 *
3887 * Do this here, not in the actual receiver, because we may deadlock
3888 * because the lower layer is allowed to hold locks while calling
3889 * message delivery.
3890 */
3891 if (!run_to_completion)
3892 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
3893 if (intf->curr_msg == NULL && !intf->in_shutdown) {
3894 struct list_head *entry = NULL;
3895
3896 /* Pick the high priority queue first. */
3897 if (!list_empty(&intf->hp_xmit_msgs))
3898 entry = intf->hp_xmit_msgs.next;
3899 else if (!list_empty(&intf->xmit_msgs))
3900 entry = intf->xmit_msgs.next;
3901
3902 if (entry) {
3903 list_del(entry);
3904 newmsg = list_entry(entry, struct ipmi_smi_msg, link);
3905 intf->curr_msg = newmsg;
3906 }
3907 }
3908 if (!run_to_completion)
3909 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
3910 if (newmsg)
3911 intf->handlers->sender(intf->send_info, newmsg);
3912
3913 handle_new_recv_msgs(intf);
3907} 3914}
3908 3915
3909/* Handle a new message from the lower layer. */ 3916/* Handle a new message from the lower layer. */
@@ -3911,13 +3918,16 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3911 struct ipmi_smi_msg *msg) 3918 struct ipmi_smi_msg *msg)
3912{ 3919{
3913 unsigned long flags = 0; /* keep us warning-free. */ 3920 unsigned long flags = 0; /* keep us warning-free. */
3914 int run_to_completion; 3921 int run_to_completion = intf->run_to_completion;
3915
3916 3922
3917 if ((msg->data_size >= 2) 3923 if ((msg->data_size >= 2)
3918 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2)) 3924 && (msg->data[0] == (IPMI_NETFN_APP_REQUEST << 2))
3919 && (msg->data[1] == IPMI_SEND_MSG_CMD) 3925 && (msg->data[1] == IPMI_SEND_MSG_CMD)
3920 && (msg->user_data == NULL)) { 3926 && (msg->user_data == NULL)) {
3927
3928 if (intf->in_shutdown)
3929 goto free_msg;
3930
3921 /* 3931 /*
3922 * This is the local response to a command send, start 3932 * This is the local response to a command send, start
3923 * the timer for these. The user_data will not be 3933 * the timer for these. The user_data will not be
@@ -3953,29 +3963,40 @@ void ipmi_smi_msg_received(ipmi_smi_t intf,
3953 /* The message was sent, start the timer. */ 3963 /* The message was sent, start the timer. */
3954 intf_start_seq_timer(intf, msg->msgid); 3964 intf_start_seq_timer(intf, msg->msgid);
3955 3965
3966free_msg:
3956 ipmi_free_smi_msg(msg); 3967 ipmi_free_smi_msg(msg);
3957 goto out; 3968 } else {
3969 /*
3970 * To preserve message order, we keep a queue and deliver from
3971 * a tasklet.
3972 */
3973 if (!run_to_completion)
3974 spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
3975 list_add_tail(&msg->link, &intf->waiting_rcv_msgs);
3976 if (!run_to_completion)
3977 spin_unlock_irqrestore(&intf->waiting_rcv_msgs_lock,
3978 flags);
3958 } 3979 }
3959 3980
3960 /*
3961 * To preserve message order, if the list is not empty, we
3962 * tack this message onto the end of the list.
3963 */
3964 run_to_completion = intf->run_to_completion;
3965 if (!run_to_completion) 3981 if (!run_to_completion)
3966 spin_lock_irqsave(&intf->waiting_msgs_lock, flags); 3982 spin_lock_irqsave(&intf->xmit_msgs_lock, flags);
3967 list_add_tail(&msg->link, &intf->waiting_msgs); 3983 if (msg == intf->curr_msg)
3984 intf->curr_msg = NULL;
3968 if (!run_to_completion) 3985 if (!run_to_completion)
3969 spin_unlock_irqrestore(&intf->waiting_msgs_lock, flags); 3986 spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
3970 3987
3971 tasklet_schedule(&intf->recv_tasklet); 3988 if (run_to_completion)
3972 out: 3989 smi_recv_tasklet((unsigned long) intf);
3973 return; 3990 else
3991 tasklet_schedule(&intf->recv_tasklet);
3974} 3992}
3975EXPORT_SYMBOL(ipmi_smi_msg_received); 3993EXPORT_SYMBOL(ipmi_smi_msg_received);
3976 3994
3977void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf) 3995void ipmi_smi_watchdog_pretimeout(ipmi_smi_t intf)
3978{ 3996{
3997 if (intf->in_shutdown)
3998 return;
3999
3979 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); 4000 atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
3980 tasklet_schedule(&intf->recv_tasklet); 4001 tasklet_schedule(&intf->recv_tasklet);
3981} 4002}
@@ -4017,7 +4038,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
4017 struct ipmi_recv_msg *msg; 4038 struct ipmi_recv_msg *msg;
4018 struct ipmi_smi_handlers *handlers; 4039 struct ipmi_smi_handlers *handlers;
4019 4040
4020 if (intf->intf_num == -1) 4041 if (intf->in_shutdown)
4021 return; 4042 return;
4022 4043
4023 if (!ent->inuse) 4044 if (!ent->inuse)
@@ -4082,8 +4103,7 @@ static void check_msg_timeout(ipmi_smi_t intf, struct seq_table *ent,
4082 ipmi_inc_stat(intf, 4103 ipmi_inc_stat(intf,
4083 retransmitted_ipmb_commands); 4104 retransmitted_ipmb_commands);
4084 4105
4085 intf->handlers->sender(intf->send_info, 4106 smi_send(intf, intf->handlers, smi_msg, 0);
4086 smi_msg, 0);
4087 } else 4107 } else
4088 ipmi_free_smi_msg(smi_msg); 4108 ipmi_free_smi_msg(smi_msg);
4089 4109
@@ -4145,15 +4165,12 @@ static unsigned int ipmi_timeout_handler(ipmi_smi_t intf, long timeout_period)
4145 4165
4146static void ipmi_request_event(ipmi_smi_t intf) 4166static void ipmi_request_event(ipmi_smi_t intf)
4147{ 4167{
4148 struct ipmi_smi_handlers *handlers;
4149
4150 /* No event requests when in maintenance mode. */ 4168 /* No event requests when in maintenance mode. */
4151 if (intf->maintenance_mode_enable) 4169 if (intf->maintenance_mode_enable)
4152 return; 4170 return;
4153 4171
4154 handlers = intf->handlers; 4172 if (!intf->in_shutdown)
4155 if (handlers) 4173 intf->handlers->request_events(intf->send_info);
4156 handlers->request_events(intf->send_info);
4157} 4174}
4158 4175
4159static struct timer_list ipmi_timer; 4176static struct timer_list ipmi_timer;
@@ -4548,6 +4565,7 @@ static int ipmi_init_msghandler(void)
4548 proc_ipmi_root = proc_mkdir("ipmi", NULL); 4565 proc_ipmi_root = proc_mkdir("ipmi", NULL);
4549 if (!proc_ipmi_root) { 4566 if (!proc_ipmi_root) {
4550 printk(KERN_ERR PFX "Unable to create IPMI proc dir"); 4567 printk(KERN_ERR PFX "Unable to create IPMI proc dir");
4568 driver_unregister(&ipmidriver.driver);
4551 return -ENOMEM; 4569 return -ENOMEM;
4552 } 4570 }
4553 4571
diff --git a/drivers/char/ipmi/ipmi_powernv.c b/drivers/char/ipmi/ipmi_powernv.c
new file mode 100644
index 000000000000..79524ed2a3cb
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_powernv.c
@@ -0,0 +1,310 @@
1/*
2 * PowerNV OPAL IPMI driver
3 *
4 * Copyright 2014 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
10 */
11
12#define pr_fmt(fmt) "ipmi-powernv: " fmt
13
14#include <linux/ipmi_smi.h>
15#include <linux/list.h>
16#include <linux/module.h>
17#include <linux/of.h>
18
19#include <asm/opal.h>
20
21
22struct ipmi_smi_powernv {
23 u64 interface_id;
24 struct ipmi_device_id ipmi_id;
25 ipmi_smi_t intf;
26 u64 event;
27 struct notifier_block event_nb;
28
29 /**
30 * We assume that there can only be one outstanding request, so
31 * keep the pending message in cur_msg. We protect this from concurrent
32 * updates through send & recv calls, (and consequently opal_msg, which
33 * is in-use when cur_msg is set) with msg_lock
34 */
35 spinlock_t msg_lock;
36 struct ipmi_smi_msg *cur_msg;
37 struct opal_ipmi_msg *opal_msg;
38};
39
40static int ipmi_powernv_start_processing(void *send_info, ipmi_smi_t intf)
41{
42 struct ipmi_smi_powernv *smi = send_info;
43
44 smi->intf = intf;
45 return 0;
46}
47
48static void send_error_reply(struct ipmi_smi_powernv *smi,
49 struct ipmi_smi_msg *msg, u8 completion_code)
50{
51 msg->rsp[0] = msg->data[0] | 0x4;
52 msg->rsp[1] = msg->data[1];
53 msg->rsp[2] = completion_code;
54 msg->rsp_size = 3;
55 ipmi_smi_msg_received(smi->intf, msg);
56}
57
58static void ipmi_powernv_send(void *send_info, struct ipmi_smi_msg *msg)
59{
60 struct ipmi_smi_powernv *smi = send_info;
61 struct opal_ipmi_msg *opal_msg;
62 unsigned long flags;
63 int comp, rc;
64 size_t size;
65
66 /* ensure data_len will fit in the opal_ipmi_msg buffer... */
67 if (msg->data_size > IPMI_MAX_MSG_LENGTH) {
68 comp = IPMI_REQ_LEN_EXCEEDED_ERR;
69 goto err;
70 }
71
72 /* ... and that we at least have netfn and cmd bytes */
73 if (msg->data_size < 2) {
74 comp = IPMI_REQ_LEN_INVALID_ERR;
75 goto err;
76 }
77
78 spin_lock_irqsave(&smi->msg_lock, flags);
79
80 if (smi->cur_msg) {
81 comp = IPMI_NODE_BUSY_ERR;
82 goto err_unlock;
83 }
84
85 /* format our data for the OPAL API */
86 opal_msg = smi->opal_msg;
87 opal_msg->version = OPAL_IPMI_MSG_FORMAT_VERSION_1;
88 opal_msg->netfn = msg->data[0];
89 opal_msg->cmd = msg->data[1];
90 if (msg->data_size > 2)
91 memcpy(opal_msg->data, msg->data + 2, msg->data_size - 2);
92
93 /* data_size already includes the netfn and cmd bytes */
94 size = sizeof(*opal_msg) + msg->data_size - 2;
95
96 pr_devel("%s: opal_ipmi_send(0x%llx, %p, %ld)\n", __func__,
97 smi->interface_id, opal_msg, size);
98 rc = opal_ipmi_send(smi->interface_id, opal_msg, size);
99 pr_devel("%s: -> %d\n", __func__, rc);
100
101 if (!rc) {
102 smi->cur_msg = msg;
103 spin_unlock_irqrestore(&smi->msg_lock, flags);
104 return;
105 }
106
107 comp = IPMI_ERR_UNSPECIFIED;
108err_unlock:
109 spin_unlock_irqrestore(&smi->msg_lock, flags);
110err:
111 send_error_reply(smi, msg, comp);
112}
113
114static int ipmi_powernv_recv(struct ipmi_smi_powernv *smi)
115{
116 struct opal_ipmi_msg *opal_msg;
117 struct ipmi_smi_msg *msg;
118 unsigned long flags;
119 uint64_t size;
120 int rc;
121
122 pr_devel("%s: opal_ipmi_recv(%llx, msg, sz)\n", __func__,
123 smi->interface_id);
124
125 spin_lock_irqsave(&smi->msg_lock, flags);
126
127 if (!smi->cur_msg) {
128 pr_warn("no current message?\n");
129 return 0;
130 }
131
132 msg = smi->cur_msg;
133 opal_msg = smi->opal_msg;
134
135 size = cpu_to_be64(sizeof(*opal_msg) + IPMI_MAX_MSG_LENGTH);
136
137 rc = opal_ipmi_recv(smi->interface_id,
138 opal_msg,
139 &size);
140 size = be64_to_cpu(size);
141 pr_devel("%s: -> %d (size %lld)\n", __func__,
142 rc, rc == 0 ? size : 0);
143 if (rc) {
144 spin_unlock_irqrestore(&smi->msg_lock, flags);
145 ipmi_free_smi_msg(msg);
146 return 0;
147 }
148
149 if (size < sizeof(*opal_msg)) {
150 spin_unlock_irqrestore(&smi->msg_lock, flags);
151 pr_warn("unexpected IPMI message size %lld\n", size);
152 return 0;
153 }
154
155 if (opal_msg->version != OPAL_IPMI_MSG_FORMAT_VERSION_1) {
156 spin_unlock_irqrestore(&smi->msg_lock, flags);
157 pr_warn("unexpected IPMI message format (version %d)\n",
158 opal_msg->version);
159 return 0;
160 }
161
162 msg->rsp[0] = opal_msg->netfn;
163 msg->rsp[1] = opal_msg->cmd;
164 if (size > sizeof(*opal_msg))
165 memcpy(&msg->rsp[2], opal_msg->data, size - sizeof(*opal_msg));
166 msg->rsp_size = 2 + size - sizeof(*opal_msg);
167
168 smi->cur_msg = NULL;
169 spin_unlock_irqrestore(&smi->msg_lock, flags);
170 ipmi_smi_msg_received(smi->intf, msg);
171 return 0;
172}
173
174static void ipmi_powernv_request_events(void *send_info)
175{
176}
177
178static void ipmi_powernv_set_run_to_completion(void *send_info,
179 bool run_to_completion)
180{
181}
182
183static void ipmi_powernv_poll(void *send_info)
184{
185 struct ipmi_smi_powernv *smi = send_info;
186
187 ipmi_powernv_recv(smi);
188}
189
190static struct ipmi_smi_handlers ipmi_powernv_smi_handlers = {
191 .owner = THIS_MODULE,
192 .start_processing = ipmi_powernv_start_processing,
193 .sender = ipmi_powernv_send,
194 .request_events = ipmi_powernv_request_events,
195 .set_run_to_completion = ipmi_powernv_set_run_to_completion,
196 .poll = ipmi_powernv_poll,
197};
198
199static int ipmi_opal_event(struct notifier_block *nb,
200 unsigned long events, void *change)
201{
202 struct ipmi_smi_powernv *smi = container_of(nb,
203 struct ipmi_smi_powernv, event_nb);
204
205 if (events & smi->event)
206 ipmi_powernv_recv(smi);
207 return 0;
208}
209
210static int ipmi_powernv_probe(struct platform_device *pdev)
211{
212 struct ipmi_smi_powernv *ipmi;
213 struct device *dev;
214 u32 prop;
215 int rc;
216
217 if (!pdev || !pdev->dev.of_node)
218 return -ENODEV;
219
220 dev = &pdev->dev;
221
222 ipmi = devm_kzalloc(dev, sizeof(*ipmi), GFP_KERNEL);
223 if (!ipmi)
224 return -ENOMEM;
225
226 spin_lock_init(&ipmi->msg_lock);
227
228 rc = of_property_read_u32(dev->of_node, "ibm,ipmi-interface-id",
229 &prop);
230 if (rc) {
231 dev_warn(dev, "No interface ID property\n");
232 goto err_free;
233 }
234 ipmi->interface_id = prop;
235
236 rc = of_property_read_u32(dev->of_node, "interrupts", &prop);
237 if (rc) {
238 dev_warn(dev, "No interrupts property\n");
239 goto err_free;
240 }
241
242 ipmi->event = 1ull << prop;
243 ipmi->event_nb.notifier_call = ipmi_opal_event;
244
245 rc = opal_notifier_register(&ipmi->event_nb);
246 if (rc) {
247 dev_warn(dev, "OPAL notifier registration failed (%d)\n", rc);
248 goto err_free;
249 }
250
251 ipmi->opal_msg = devm_kmalloc(dev,
252 sizeof(*ipmi->opal_msg) + IPMI_MAX_MSG_LENGTH,
253 GFP_KERNEL);
254 if (!ipmi->opal_msg) {
255 rc = -ENOMEM;
256 goto err_unregister;
257 }
258
259 /* todo: query actual ipmi_device_id */
260 rc = ipmi_register_smi(&ipmi_powernv_smi_handlers, ipmi,
261 &ipmi->ipmi_id, dev, 0);
262 if (rc) {
263 dev_warn(dev, "IPMI SMI registration failed (%d)\n", rc);
264 goto err_free_msg;
265 }
266
267 dev_set_drvdata(dev, ipmi);
268 return 0;
269
270err_free_msg:
271 devm_kfree(dev, ipmi->opal_msg);
272err_unregister:
273 opal_notifier_unregister(&ipmi->event_nb);
274err_free:
275 devm_kfree(dev, ipmi);
276 return rc;
277}
278
279static int ipmi_powernv_remove(struct platform_device *pdev)
280{
281 struct ipmi_smi_powernv *smi = dev_get_drvdata(&pdev->dev);
282
283 ipmi_unregister_smi(smi->intf);
284 opal_notifier_unregister(&smi->event_nb);
285 return 0;
286}
287
288static const struct of_device_id ipmi_powernv_match[] = {
289 { .compatible = "ibm,opal-ipmi" },
290 { },
291};
292
293
294static struct platform_driver powernv_ipmi_driver = {
295 .driver = {
296 .name = "ipmi-powernv",
297 .owner = THIS_MODULE,
298 .of_match_table = ipmi_powernv_match,
299 },
300 .probe = ipmi_powernv_probe,
301 .remove = ipmi_powernv_remove,
302};
303
304
305module_platform_driver(powernv_ipmi_driver);
306
307MODULE_DEVICE_TABLE(of, ipmi_powernv_match);
308MODULE_DESCRIPTION("powernv IPMI driver");
309MODULE_AUTHOR("Jeremy Kerr <jk@ozlabs.org>");
310MODULE_LICENSE("GPL");
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 5c4e1f625bbb..90c7fdf95419 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -92,12 +92,9 @@ enum si_intf_state {
92 SI_GETTING_FLAGS, 92 SI_GETTING_FLAGS,
93 SI_GETTING_EVENTS, 93 SI_GETTING_EVENTS,
94 SI_CLEARING_FLAGS, 94 SI_CLEARING_FLAGS,
95 SI_CLEARING_FLAGS_THEN_SET_IRQ,
96 SI_GETTING_MESSAGES, 95 SI_GETTING_MESSAGES,
97 SI_ENABLE_INTERRUPTS1, 96 SI_CHECKING_ENABLES,
98 SI_ENABLE_INTERRUPTS2, 97 SI_SETTING_ENABLES
99 SI_DISABLE_INTERRUPTS1,
100 SI_DISABLE_INTERRUPTS2
101 /* FIXME - add watchdog stuff. */ 98 /* FIXME - add watchdog stuff. */
102}; 99};
103 100
@@ -111,10 +108,6 @@ enum si_type {
111}; 108};
112static char *si_to_str[] = { "kcs", "smic", "bt" }; 109static char *si_to_str[] = { "kcs", "smic", "bt" };
113 110
114static char *ipmi_addr_src_to_str[] = { NULL, "hotmod", "hardcoded", "SPMI",
115 "ACPI", "SMBIOS", "PCI",
116 "device-tree", "default" };
117
118#define DEVICE_NAME "ipmi_si" 111#define DEVICE_NAME "ipmi_si"
119 112
120static struct platform_driver ipmi_driver; 113static struct platform_driver ipmi_driver;
@@ -174,8 +167,7 @@ struct smi_info {
174 struct si_sm_handlers *handlers; 167 struct si_sm_handlers *handlers;
175 enum si_type si_type; 168 enum si_type si_type;
176 spinlock_t si_lock; 169 spinlock_t si_lock;
177 struct list_head xmit_msgs; 170 struct ipmi_smi_msg *waiting_msg;
178 struct list_head hp_xmit_msgs;
179 struct ipmi_smi_msg *curr_msg; 171 struct ipmi_smi_msg *curr_msg;
180 enum si_intf_state si_state; 172 enum si_intf_state si_state;
181 173
@@ -254,9 +246,6 @@ struct smi_info {
254 /* The time (in jiffies) the last timeout occurred at. */ 246 /* The time (in jiffies) the last timeout occurred at. */
255 unsigned long last_timeout_jiffies; 247 unsigned long last_timeout_jiffies;
256 248
257 /* Used to gracefully stop the timer without race conditions. */
258 atomic_t stop_operation;
259
260 /* Are we waiting for the events, pretimeouts, received msgs? */ 249 /* Are we waiting for the events, pretimeouts, received msgs? */
261 atomic_t need_watch; 250 atomic_t need_watch;
262 251
@@ -268,6 +257,16 @@ struct smi_info {
268 */ 257 */
269 bool interrupt_disabled; 258 bool interrupt_disabled;
270 259
260 /*
261 * Does the BMC support events?
262 */
263 bool supports_event_msg_buff;
264
265 /*
266 * Did we get an attention that we did not handle?
267 */
268 bool got_attn;
269
271 /* From the get device id response... */ 270 /* From the get device id response... */
272 struct ipmi_device_id device_id; 271 struct ipmi_device_id device_id;
273 272
@@ -332,7 +331,10 @@ static void deliver_recv_msg(struct smi_info *smi_info,
332 struct ipmi_smi_msg *msg) 331 struct ipmi_smi_msg *msg)
333{ 332{
334 /* Deliver the message to the upper layer. */ 333 /* Deliver the message to the upper layer. */
335 ipmi_smi_msg_received(smi_info->intf, msg); 334 if (smi_info->intf)
335 ipmi_smi_msg_received(smi_info->intf, msg);
336 else
337 ipmi_free_smi_msg(msg);
336} 338}
337 339
338static void return_hosed_msg(struct smi_info *smi_info, int cCode) 340static void return_hosed_msg(struct smi_info *smi_info, int cCode)
@@ -356,28 +358,18 @@ static void return_hosed_msg(struct smi_info *smi_info, int cCode)
356static enum si_sm_result start_next_msg(struct smi_info *smi_info) 358static enum si_sm_result start_next_msg(struct smi_info *smi_info)
357{ 359{
358 int rv; 360 int rv;
359 struct list_head *entry = NULL;
360#ifdef DEBUG_TIMING 361#ifdef DEBUG_TIMING
361 struct timeval t; 362 struct timeval t;
362#endif 363#endif
363 364
364 /* Pick the high priority queue first. */ 365 if (!smi_info->waiting_msg) {
365 if (!list_empty(&(smi_info->hp_xmit_msgs))) {
366 entry = smi_info->hp_xmit_msgs.next;
367 } else if (!list_empty(&(smi_info->xmit_msgs))) {
368 entry = smi_info->xmit_msgs.next;
369 }
370
371 if (!entry) {
372 smi_info->curr_msg = NULL; 366 smi_info->curr_msg = NULL;
373 rv = SI_SM_IDLE; 367 rv = SI_SM_IDLE;
374 } else { 368 } else {
375 int err; 369 int err;
376 370
377 list_del(entry); 371 smi_info->curr_msg = smi_info->waiting_msg;
378 smi_info->curr_msg = list_entry(entry, 372 smi_info->waiting_msg = NULL;
379 struct ipmi_smi_msg,
380 link);
381#ifdef DEBUG_TIMING 373#ifdef DEBUG_TIMING
382 do_gettimeofday(&t); 374 do_gettimeofday(&t);
383 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec); 375 printk(KERN_DEBUG "**Start2: %d.%9.9d\n", t.tv_sec, t.tv_usec);
@@ -401,30 +393,15 @@ static enum si_sm_result start_next_msg(struct smi_info *smi_info)
401 return rv; 393 return rv;
402} 394}
403 395
404static void start_enable_irq(struct smi_info *smi_info) 396static void start_check_enables(struct smi_info *smi_info)
405{ 397{
406 unsigned char msg[2]; 398 unsigned char msg[2];
407 399
408 /*
409 * If we are enabling interrupts, we have to tell the
410 * BMC to use them.
411 */
412 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 400 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
413 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD; 401 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
414 402
415 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2); 403 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
416 smi_info->si_state = SI_ENABLE_INTERRUPTS1; 404 smi_info->si_state = SI_CHECKING_ENABLES;
417}
418
419static void start_disable_irq(struct smi_info *smi_info)
420{
421 unsigned char msg[2];
422
423 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
424 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
425
426 smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
427 smi_info->si_state = SI_DISABLE_INTERRUPTS1;
428} 405}
429 406
430static void start_clear_flags(struct smi_info *smi_info) 407static void start_clear_flags(struct smi_info *smi_info)
@@ -440,6 +417,32 @@ static void start_clear_flags(struct smi_info *smi_info)
440 smi_info->si_state = SI_CLEARING_FLAGS; 417 smi_info->si_state = SI_CLEARING_FLAGS;
441} 418}
442 419
420static void start_getting_msg_queue(struct smi_info *smi_info)
421{
422 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
423 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
424 smi_info->curr_msg->data_size = 2;
425
426 smi_info->handlers->start_transaction(
427 smi_info->si_sm,
428 smi_info->curr_msg->data,
429 smi_info->curr_msg->data_size);
430 smi_info->si_state = SI_GETTING_MESSAGES;
431}
432
433static void start_getting_events(struct smi_info *smi_info)
434{
435 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
436 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
437 smi_info->curr_msg->data_size = 2;
438
439 smi_info->handlers->start_transaction(
440 smi_info->si_sm,
441 smi_info->curr_msg->data,
442 smi_info->curr_msg->data_size);
443 smi_info->si_state = SI_GETTING_EVENTS;
444}
445
443static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val) 446static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
444{ 447{
445 smi_info->last_timeout_jiffies = jiffies; 448 smi_info->last_timeout_jiffies = jiffies;
@@ -453,22 +456,45 @@ static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
453 * polled until we can allocate some memory. Once we have some 456 * polled until we can allocate some memory. Once we have some
454 * memory, we will re-enable the interrupt. 457 * memory, we will re-enable the interrupt.
455 */ 458 */
456static inline void disable_si_irq(struct smi_info *smi_info) 459static inline bool disable_si_irq(struct smi_info *smi_info)
457{ 460{
458 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) { 461 if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
459 start_disable_irq(smi_info);
460 smi_info->interrupt_disabled = true; 462 smi_info->interrupt_disabled = true;
461 if (!atomic_read(&smi_info->stop_operation)) 463 start_check_enables(smi_info);
462 smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES); 464 return true;
463 } 465 }
466 return false;
464} 467}
465 468
466static inline void enable_si_irq(struct smi_info *smi_info) 469static inline bool enable_si_irq(struct smi_info *smi_info)
467{ 470{
468 if ((smi_info->irq) && (smi_info->interrupt_disabled)) { 471 if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
469 start_enable_irq(smi_info);
470 smi_info->interrupt_disabled = false; 472 smi_info->interrupt_disabled = false;
473 start_check_enables(smi_info);
474 return true;
471 } 475 }
476 return false;
477}
478
479/*
480 * Allocate a message. If unable to allocate, start the interrupt
481 * disable process and return NULL. If able to allocate but
482 * interrupts are disabled, free the message and return NULL after
483 * starting the interrupt enable process.
484 */
485static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
486{
487 struct ipmi_smi_msg *msg;
488
489 msg = ipmi_alloc_smi_msg();
490 if (!msg) {
491 if (!disable_si_irq(smi_info))
492 smi_info->si_state = SI_NORMAL;
493 } else if (enable_si_irq(smi_info)) {
494 ipmi_free_smi_msg(msg);
495 msg = NULL;
496 }
497 return msg;
472} 498}
473 499
474static void handle_flags(struct smi_info *smi_info) 500static void handle_flags(struct smi_info *smi_info)
@@ -480,45 +506,22 @@ static void handle_flags(struct smi_info *smi_info)
480 506
481 start_clear_flags(smi_info); 507 start_clear_flags(smi_info);
482 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT; 508 smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
483 ipmi_smi_watchdog_pretimeout(smi_info->intf); 509 if (smi_info->intf)
510 ipmi_smi_watchdog_pretimeout(smi_info->intf);
484 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) { 511 } else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
485 /* Messages available. */ 512 /* Messages available. */
486 smi_info->curr_msg = ipmi_alloc_smi_msg(); 513 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
487 if (!smi_info->curr_msg) { 514 if (!smi_info->curr_msg)
488 disable_si_irq(smi_info);
489 smi_info->si_state = SI_NORMAL;
490 return; 515 return;
491 }
492 enable_si_irq(smi_info);
493
494 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
495 smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
496 smi_info->curr_msg->data_size = 2;
497 516
498 smi_info->handlers->start_transaction( 517 start_getting_msg_queue(smi_info);
499 smi_info->si_sm,
500 smi_info->curr_msg->data,
501 smi_info->curr_msg->data_size);
502 smi_info->si_state = SI_GETTING_MESSAGES;
503 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) { 518 } else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
504 /* Events available. */ 519 /* Events available. */
505 smi_info->curr_msg = ipmi_alloc_smi_msg(); 520 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
506 if (!smi_info->curr_msg) { 521 if (!smi_info->curr_msg)
507 disable_si_irq(smi_info);
508 smi_info->si_state = SI_NORMAL;
509 return; 522 return;
510 }
511 enable_si_irq(smi_info);
512
513 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
514 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
515 smi_info->curr_msg->data_size = 2;
516 523
517 smi_info->handlers->start_transaction( 524 start_getting_events(smi_info);
518 smi_info->si_sm,
519 smi_info->curr_msg->data,
520 smi_info->curr_msg->data_size);
521 smi_info->si_state = SI_GETTING_EVENTS;
522 } else if (smi_info->msg_flags & OEM_DATA_AVAIL && 525 } else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
523 smi_info->oem_data_avail_handler) { 526 smi_info->oem_data_avail_handler) {
524 if (smi_info->oem_data_avail_handler(smi_info)) 527 if (smi_info->oem_data_avail_handler(smi_info))
@@ -527,6 +530,55 @@ static void handle_flags(struct smi_info *smi_info)
527 smi_info->si_state = SI_NORMAL; 530 smi_info->si_state = SI_NORMAL;
528} 531}
529 532
533/*
534 * Global enables we care about.
535 */
536#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
537 IPMI_BMC_EVT_MSG_INTR)
538
539static u8 current_global_enables(struct smi_info *smi_info, u8 base,
540 bool *irq_on)
541{
542 u8 enables = 0;
543
544 if (smi_info->supports_event_msg_buff)
545 enables |= IPMI_BMC_EVT_MSG_BUFF;
546 else
547 enables &= ~IPMI_BMC_EVT_MSG_BUFF;
548
549 if (smi_info->irq && !smi_info->interrupt_disabled)
550 enables |= IPMI_BMC_RCV_MSG_INTR;
551 else
552 enables &= ~IPMI_BMC_RCV_MSG_INTR;
553
554 if (smi_info->supports_event_msg_buff &&
555 smi_info->irq && !smi_info->interrupt_disabled)
556
557 enables |= IPMI_BMC_EVT_MSG_INTR;
558 else
559 enables &= ~IPMI_BMC_EVT_MSG_INTR;
560
561 *irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
562
563 return enables;
564}
565
566static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
567{
568 u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
569
570 irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
571
572 if ((bool)irqstate == irq_on)
573 return;
574
575 if (irq_on)
576 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
577 IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
578 else
579 smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
580}
581
530static void handle_transaction_done(struct smi_info *smi_info) 582static void handle_transaction_done(struct smi_info *smi_info)
531{ 583{
532 struct ipmi_smi_msg *msg; 584 struct ipmi_smi_msg *msg;
@@ -581,7 +633,6 @@ static void handle_transaction_done(struct smi_info *smi_info)
581 } 633 }
582 634
583 case SI_CLEARING_FLAGS: 635 case SI_CLEARING_FLAGS:
584 case SI_CLEARING_FLAGS_THEN_SET_IRQ:
585 { 636 {
586 unsigned char msg[3]; 637 unsigned char msg[3];
587 638
@@ -592,10 +643,7 @@ static void handle_transaction_done(struct smi_info *smi_info)
592 dev_warn(smi_info->dev, 643 dev_warn(smi_info->dev,
593 "Error clearing flags: %2.2x\n", msg[2]); 644 "Error clearing flags: %2.2x\n", msg[2]);
594 } 645 }
595 if (smi_info->si_state == SI_CLEARING_FLAGS_THEN_SET_IRQ) 646 smi_info->si_state = SI_NORMAL;
596 start_enable_irq(smi_info);
597 else
598 smi_info->si_state = SI_NORMAL;
599 break; 647 break;
600 } 648 }
601 649
@@ -675,9 +723,11 @@ static void handle_transaction_done(struct smi_info *smi_info)
675 break; 723 break;
676 } 724 }
677 725
678 case SI_ENABLE_INTERRUPTS1: 726 case SI_CHECKING_ENABLES:
679 { 727 {
680 unsigned char msg[4]; 728 unsigned char msg[4];
729 u8 enables;
730 bool irq_on;
681 731
682 /* We got the flags from the SMI, now handle them. */ 732 /* We got the flags from the SMI, now handle them. */
683 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 733 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
@@ -687,70 +737,53 @@ static void handle_transaction_done(struct smi_info *smi_info)
687 dev_warn(smi_info->dev, 737 dev_warn(smi_info->dev,
688 "Maybe ok, but ipmi might run very slowly.\n"); 738 "Maybe ok, but ipmi might run very slowly.\n");
689 smi_info->si_state = SI_NORMAL; 739 smi_info->si_state = SI_NORMAL;
690 } else { 740 break;
741 }
742 enables = current_global_enables(smi_info, 0, &irq_on);
743 if (smi_info->si_type == SI_BT)
744 /* BT has its own interrupt enable bit. */
745 check_bt_irq(smi_info, irq_on);
746 if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
747 /* Enables are not correct, fix them. */
691 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 748 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
692 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 749 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
693 msg[2] = (msg[3] | 750 msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
694 IPMI_BMC_RCV_MSG_INTR |
695 IPMI_BMC_EVT_MSG_INTR);
696 smi_info->handlers->start_transaction( 751 smi_info->handlers->start_transaction(
697 smi_info->si_sm, msg, 3); 752 smi_info->si_sm, msg, 3);
698 smi_info->si_state = SI_ENABLE_INTERRUPTS2; 753 smi_info->si_state = SI_SETTING_ENABLES;
754 } else if (smi_info->supports_event_msg_buff) {
755 smi_info->curr_msg = ipmi_alloc_smi_msg();
756 if (!smi_info->curr_msg) {
757 smi_info->si_state = SI_NORMAL;
758 break;
759 }
760 start_getting_msg_queue(smi_info);
761 } else {
762 smi_info->si_state = SI_NORMAL;
699 } 763 }
700 break; 764 break;
701 } 765 }
702 766
703 case SI_ENABLE_INTERRUPTS2: 767 case SI_SETTING_ENABLES:
704 { 768 {
705 unsigned char msg[4]; 769 unsigned char msg[4];
706 770
707 /* We got the flags from the SMI, now handle them. */
708 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 771 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
709 if (msg[2] != 0) { 772 if (msg[2] != 0)
710 dev_warn(smi_info->dev, 773 dev_warn(smi_info->dev,
711 "Couldn't set irq info: %x.\n", msg[2]); 774 "Could not set the global enables: 0x%x.\n",
712 dev_warn(smi_info->dev, 775 msg[2]);
713 "Maybe ok, but ipmi might run very slowly.\n");
714 } else
715 smi_info->interrupt_disabled = false;
716 smi_info->si_state = SI_NORMAL;
717 break;
718 }
719
720 case SI_DISABLE_INTERRUPTS1:
721 {
722 unsigned char msg[4];
723 776
724 /* We got the flags from the SMI, now handle them. */ 777 if (smi_info->supports_event_msg_buff) {
725 smi_info->handlers->get_result(smi_info->si_sm, msg, 4); 778 smi_info->curr_msg = ipmi_alloc_smi_msg();
726 if (msg[2] != 0) { 779 if (!smi_info->curr_msg) {
727 dev_warn(smi_info->dev, "Could not disable interrupts" 780 smi_info->si_state = SI_NORMAL;
728 ", failed get.\n"); 781 break;
729 smi_info->si_state = SI_NORMAL; 782 }
783 start_getting_msg_queue(smi_info);
730 } else { 784 } else {
731 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 785 smi_info->si_state = SI_NORMAL;
732 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
733 msg[2] = (msg[3] &
734 ~(IPMI_BMC_RCV_MSG_INTR |
735 IPMI_BMC_EVT_MSG_INTR));
736 smi_info->handlers->start_transaction(
737 smi_info->si_sm, msg, 3);
738 smi_info->si_state = SI_DISABLE_INTERRUPTS2;
739 }
740 break;
741 }
742
743 case SI_DISABLE_INTERRUPTS2:
744 {
745 unsigned char msg[4];
746
747 /* We got the flags from the SMI, now handle them. */
748 smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
749 if (msg[2] != 0) {
750 dev_warn(smi_info->dev, "Could not disable interrupts"
751 ", failed set.\n");
752 } 786 }
753 smi_info->si_state = SI_NORMAL;
754 break; 787 break;
755 } 788 }
756 } 789 }
@@ -808,25 +841,35 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
808 * We prefer handling attn over new messages. But don't do 841 * We prefer handling attn over new messages. But don't do
809 * this if there is not yet an upper layer to handle anything. 842 * this if there is not yet an upper layer to handle anything.
810 */ 843 */
811 if (likely(smi_info->intf) && si_sm_result == SI_SM_ATTN) { 844 if (likely(smi_info->intf) &&
845 (si_sm_result == SI_SM_ATTN || smi_info->got_attn)) {
812 unsigned char msg[2]; 846 unsigned char msg[2];
813 847
814 smi_inc_stat(smi_info, attentions); 848 if (smi_info->si_state != SI_NORMAL) {
849 /*
850 * We got an ATTN, but we are doing something else.
851 * Handle the ATTN later.
852 */
853 smi_info->got_attn = true;
854 } else {
855 smi_info->got_attn = false;
856 smi_inc_stat(smi_info, attentions);
815 857
816 /* 858 /*
817 * Got a attn, send down a get message flags to see 859 * Got a attn, send down a get message flags to see
818 * what's causing it. It would be better to handle 860 * what's causing it. It would be better to handle
819 * this in the upper layer, but due to the way 861 * this in the upper layer, but due to the way
820 * interrupts work with the SMI, that's not really 862 * interrupts work with the SMI, that's not really
821 * possible. 863 * possible.
822 */ 864 */
823 msg[0] = (IPMI_NETFN_APP_REQUEST << 2); 865 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
824 msg[1] = IPMI_GET_MSG_FLAGS_CMD; 866 msg[1] = IPMI_GET_MSG_FLAGS_CMD;
825 867
826 smi_info->handlers->start_transaction( 868 smi_info->handlers->start_transaction(
827 smi_info->si_sm, msg, 2); 869 smi_info->si_sm, msg, 2);
828 smi_info->si_state = SI_GETTING_FLAGS; 870 smi_info->si_state = SI_GETTING_FLAGS;
829 goto restart; 871 goto restart;
872 }
830 } 873 }
831 874
832 /* If we are currently idle, try to start the next message. */ 875 /* If we are currently idle, try to start the next message. */
@@ -846,19 +889,21 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
846 */ 889 */
847 atomic_set(&smi_info->req_events, 0); 890 atomic_set(&smi_info->req_events, 0);
848 891
849 smi_info->curr_msg = ipmi_alloc_smi_msg(); 892 /*
850 if (!smi_info->curr_msg) 893 * Take this opportunity to check the interrupt and
851 goto out; 894 * message enable state for the BMC. The BMC can be
852 895 * asynchronously reset, and may thus get interrupts
853 smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2); 896 * disable and messages disabled.
854 smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD; 897 */
855 smi_info->curr_msg->data_size = 2; 898 if (smi_info->supports_event_msg_buff || smi_info->irq) {
899 start_check_enables(smi_info);
900 } else {
901 smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
902 if (!smi_info->curr_msg)
903 goto out;
856 904
857 smi_info->handlers->start_transaction( 905 start_getting_events(smi_info);
858 smi_info->si_sm, 906 }
859 smi_info->curr_msg->data,
860 smi_info->curr_msg->data_size);
861 smi_info->si_state = SI_GETTING_EVENTS;
862 goto restart; 907 goto restart;
863 } 908 }
864 out: 909 out:
@@ -879,8 +924,7 @@ static void check_start_timer_thread(struct smi_info *smi_info)
879} 924}
880 925
881static void sender(void *send_info, 926static void sender(void *send_info,
882 struct ipmi_smi_msg *msg, 927 struct ipmi_smi_msg *msg)
883 int priority)
884{ 928{
885 struct smi_info *smi_info = send_info; 929 struct smi_info *smi_info = send_info;
886 enum si_sm_result result; 930 enum si_sm_result result;
@@ -889,14 +933,8 @@ static void sender(void *send_info,
889 struct timeval t; 933 struct timeval t;
890#endif 934#endif
891 935
892 if (atomic_read(&smi_info->stop_operation)) { 936 BUG_ON(smi_info->waiting_msg);
893 msg->rsp[0] = msg->data[0] | 4; 937 smi_info->waiting_msg = msg;
894 msg->rsp[1] = msg->data[1];
895 msg->rsp[2] = IPMI_ERR_UNSPECIFIED;
896 msg->rsp_size = 3;
897 deliver_recv_msg(smi_info, msg);
898 return;
899 }
900 938
901#ifdef DEBUG_TIMING 939#ifdef DEBUG_TIMING
902 do_gettimeofday(&t); 940 do_gettimeofday(&t);
@@ -905,16 +943,16 @@ static void sender(void *send_info,
905 943
906 if (smi_info->run_to_completion) { 944 if (smi_info->run_to_completion) {
907 /* 945 /*
908 * If we are running to completion, then throw it in 946 * If we are running to completion, start it and run
909 * the list and run transactions until everything is 947 * transactions until everything is clear.
910 * clear. Priority doesn't matter here.
911 */ 948 */
949 smi_info->curr_msg = smi_info->waiting_msg;
950 smi_info->waiting_msg = NULL;
912 951
913 /* 952 /*
914 * Run to completion means we are single-threaded, no 953 * Run to completion means we are single-threaded, no
915 * need for locks. 954 * need for locks.
916 */ 955 */
917 list_add_tail(&(msg->link), &(smi_info->xmit_msgs));
918 956
919 result = smi_event_handler(smi_info, 0); 957 result = smi_event_handler(smi_info, 0);
920 while (result != SI_SM_IDLE) { 958 while (result != SI_SM_IDLE) {
@@ -926,11 +964,6 @@ static void sender(void *send_info,
926 } 964 }
927 965
928 spin_lock_irqsave(&smi_info->si_lock, flags); 966 spin_lock_irqsave(&smi_info->si_lock, flags);
929 if (priority > 0)
930 list_add_tail(&msg->link, &smi_info->hp_xmit_msgs);
931 else
932 list_add_tail(&msg->link, &smi_info->xmit_msgs);
933
934 check_start_timer_thread(smi_info); 967 check_start_timer_thread(smi_info);
935 spin_unlock_irqrestore(&smi_info->si_lock, flags); 968 spin_unlock_irqrestore(&smi_info->si_lock, flags);
936} 969}
@@ -1068,8 +1101,7 @@ static void request_events(void *send_info)
1068{ 1101{
1069 struct smi_info *smi_info = send_info; 1102 struct smi_info *smi_info = send_info;
1070 1103
1071 if (atomic_read(&smi_info->stop_operation) || 1104 if (!smi_info->has_event_buffer)
1072 !smi_info->has_event_buffer)
1073 return; 1105 return;
1074 1106
1075 atomic_set(&smi_info->req_events, 1); 1107 atomic_set(&smi_info->req_events, 1);
@@ -1697,7 +1729,7 @@ static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr)
1697 } 1729 }
1698 *s = '\0'; 1730 *s = '\0';
1699 s++; 1731 s++;
1700 for (i = 0; hotmod_ops[i].name; i++) { 1732 for (i = 0; v[i].name; i++) {
1701 if (strcmp(*curr, v[i].name) == 0) { 1733 if (strcmp(*curr, v[i].name) == 0) {
1702 *val = v[i].val; 1734 *val = v[i].val;
1703 *curr = s; 1735 *curr = s;
@@ -2133,6 +2165,9 @@ static int try_init_spmi(struct SPMITable *spmi)
2133 case 3: /* BT */ 2165 case 3: /* BT */
2134 info->si_type = SI_BT; 2166 info->si_type = SI_BT;
2135 break; 2167 break;
2168 case 4: /* SSIF, just ignore */
2169 kfree(info);
2170 return -EIO;
2136 default: 2171 default:
2137 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n", 2172 printk(KERN_INFO PFX "Unknown ACPI/SPMI SI type %d\n",
2138 spmi->InterfaceType); 2173 spmi->InterfaceType);
@@ -2250,6 +2285,8 @@ static int ipmi_pnp_probe(struct pnp_dev *dev,
2250 case 3: 2285 case 3:
2251 info->si_type = SI_BT; 2286 info->si_type = SI_BT;
2252 break; 2287 break;
2288 case 4: /* SSIF, just ignore */
2289 goto err_free;
2253 default: 2290 default:
2254 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp); 2291 dev_info(&dev->dev, "unknown IPMI type %lld\n", tmp);
2255 goto err_free; 2292 goto err_free;
@@ -2913,9 +2950,11 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2913 goto out; 2950 goto out;
2914 } 2951 }
2915 2952
2916 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) 2953 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
2917 /* buffer is already enabled, nothing to do. */ 2954 /* buffer is already enabled, nothing to do. */
2955 smi_info->supports_event_msg_buff = true;
2918 goto out; 2956 goto out;
2957 }
2919 2958
2920 msg[0] = IPMI_NETFN_APP_REQUEST << 2; 2959 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
2921 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD; 2960 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
@@ -2948,6 +2987,9 @@ static int try_enable_event_buffer(struct smi_info *smi_info)
2948 * that the event buffer is not supported. 2987 * that the event buffer is not supported.
2949 */ 2988 */
2950 rv = -ENOENT; 2989 rv = -ENOENT;
2990 else
2991 smi_info->supports_event_msg_buff = true;
2992
2951 out: 2993 out:
2952 kfree(resp); 2994 kfree(resp);
2953 return rv; 2995 return rv;
@@ -3188,15 +3230,10 @@ static void setup_xaction_handlers(struct smi_info *smi_info)
3188 3230
3189static inline void wait_for_timer_and_thread(struct smi_info *smi_info) 3231static inline void wait_for_timer_and_thread(struct smi_info *smi_info)
3190{ 3232{
3191 if (smi_info->intf) { 3233 if (smi_info->thread != NULL)
3192 /* 3234 kthread_stop(smi_info->thread);
3193 * The timer and thread are only running if the 3235 if (smi_info->timer_running)
3194 * interface has been started up and registered.
3195 */
3196 if (smi_info->thread != NULL)
3197 kthread_stop(smi_info->thread);
3198 del_timer_sync(&smi_info->si_timer); 3236 del_timer_sync(&smi_info->si_timer);
3199 }
3200} 3237}
3201 3238
3202static struct ipmi_default_vals 3239static struct ipmi_default_vals
@@ -3274,8 +3311,8 @@ static int add_smi(struct smi_info *new_smi)
3274 int rv = 0; 3311 int rv = 0;
3275 3312
3276 printk(KERN_INFO PFX "Adding %s-specified %s state machine", 3313 printk(KERN_INFO PFX "Adding %s-specified %s state machine",
3277 ipmi_addr_src_to_str[new_smi->addr_source], 3314 ipmi_addr_src_to_str(new_smi->addr_source),
3278 si_to_str[new_smi->si_type]); 3315 si_to_str[new_smi->si_type]);
3279 mutex_lock(&smi_infos_lock); 3316 mutex_lock(&smi_infos_lock);
3280 if (!is_new_interface(new_smi)) { 3317 if (!is_new_interface(new_smi)) {
3281 printk(KERN_CONT " duplicate interface\n"); 3318 printk(KERN_CONT " duplicate interface\n");
@@ -3305,7 +3342,7 @@ static int try_smi_init(struct smi_info *new_smi)
3305 printk(KERN_INFO PFX "Trying %s-specified %s state" 3342 printk(KERN_INFO PFX "Trying %s-specified %s state"
3306 " machine at %s address 0x%lx, slave address 0x%x," 3343 " machine at %s address 0x%lx, slave address 0x%x,"
3307 " irq %d\n", 3344 " irq %d\n",
3308 ipmi_addr_src_to_str[new_smi->addr_source], 3345 ipmi_addr_src_to_str(new_smi->addr_source),
3309 si_to_str[new_smi->si_type], 3346 si_to_str[new_smi->si_type],
3310 addr_space_to_str[new_smi->io.addr_type], 3347 addr_space_to_str[new_smi->io.addr_type],
3311 new_smi->io.addr_data, 3348 new_smi->io.addr_data,
@@ -3371,8 +3408,7 @@ static int try_smi_init(struct smi_info *new_smi)
3371 setup_oem_data_handler(new_smi); 3408 setup_oem_data_handler(new_smi);
3372 setup_xaction_handlers(new_smi); 3409 setup_xaction_handlers(new_smi);
3373 3410
3374 INIT_LIST_HEAD(&(new_smi->xmit_msgs)); 3411 new_smi->waiting_msg = NULL;
3375 INIT_LIST_HEAD(&(new_smi->hp_xmit_msgs));
3376 new_smi->curr_msg = NULL; 3412 new_smi->curr_msg = NULL;
3377 atomic_set(&new_smi->req_events, 0); 3413 atomic_set(&new_smi->req_events, 0);
3378 new_smi->run_to_completion = false; 3414 new_smi->run_to_completion = false;
@@ -3380,7 +3416,6 @@ static int try_smi_init(struct smi_info *new_smi)
3380 atomic_set(&new_smi->stats[i], 0); 3416 atomic_set(&new_smi->stats[i], 0);
3381 3417
3382 new_smi->interrupt_disabled = true; 3418 new_smi->interrupt_disabled = true;
3383 atomic_set(&new_smi->stop_operation, 0);
3384 atomic_set(&new_smi->need_watch, 0); 3419 atomic_set(&new_smi->need_watch, 0);
3385 new_smi->intf_num = smi_num; 3420 new_smi->intf_num = smi_num;
3386 smi_num++; 3421 smi_num++;
@@ -3394,9 +3429,15 @@ static int try_smi_init(struct smi_info *new_smi)
3394 * timer to avoid racing with the timer. 3429 * timer to avoid racing with the timer.
3395 */ 3430 */
3396 start_clear_flags(new_smi); 3431 start_clear_flags(new_smi);
3397 /* IRQ is defined to be set when non-zero. */ 3432
3398 if (new_smi->irq) 3433 /*
3399 new_smi->si_state = SI_CLEARING_FLAGS_THEN_SET_IRQ; 3434 * IRQ is defined to be set when non-zero. req_events will
3435 * cause a global flags check that will enable interrupts.
3436 */
3437 if (new_smi->irq) {
3438 new_smi->interrupt_disabled = false;
3439 atomic_set(&new_smi->req_events, 1);
3440 }
3400 3441
3401 if (!new_smi->dev) { 3442 if (!new_smi->dev) {
3402 /* 3443 /*
@@ -3428,7 +3469,6 @@ static int try_smi_init(struct smi_info *new_smi)
3428 new_smi, 3469 new_smi,
3429 &new_smi->device_id, 3470 &new_smi->device_id,
3430 new_smi->dev, 3471 new_smi->dev,
3431 "bmc",
3432 new_smi->slave_addr); 3472 new_smi->slave_addr);
3433 if (rv) { 3473 if (rv) {
3434 dev_err(new_smi->dev, "Unable to register device: error %d\n", 3474 dev_err(new_smi->dev, "Unable to register device: error %d\n",
@@ -3466,15 +3506,15 @@ static int try_smi_init(struct smi_info *new_smi)
3466 return 0; 3506 return 0;
3467 3507
3468 out_err_stop_timer: 3508 out_err_stop_timer:
3469 atomic_inc(&new_smi->stop_operation);
3470 wait_for_timer_and_thread(new_smi); 3509 wait_for_timer_and_thread(new_smi);
3471 3510
3472 out_err: 3511 out_err:
3473 new_smi->interrupt_disabled = true; 3512 new_smi->interrupt_disabled = true;
3474 3513
3475 if (new_smi->intf) { 3514 if (new_smi->intf) {
3476 ipmi_unregister_smi(new_smi->intf); 3515 ipmi_smi_t intf = new_smi->intf;
3477 new_smi->intf = NULL; 3516 new_smi->intf = NULL;
3517 ipmi_unregister_smi(intf);
3478 } 3518 }
3479 3519
3480 if (new_smi->irq_cleanup) { 3520 if (new_smi->irq_cleanup) {
@@ -3653,60 +3693,49 @@ module_init(init_ipmi_si);
3653static void cleanup_one_si(struct smi_info *to_clean) 3693static void cleanup_one_si(struct smi_info *to_clean)
3654{ 3694{
3655 int rv = 0; 3695 int rv = 0;
3656 unsigned long flags;
3657 3696
3658 if (!to_clean) 3697 if (!to_clean)
3659 return; 3698 return;
3660 3699
3700 if (to_clean->intf) {
3701 ipmi_smi_t intf = to_clean->intf;
3702
3703 to_clean->intf = NULL;
3704 rv = ipmi_unregister_smi(intf);
3705 if (rv) {
3706 pr_err(PFX "Unable to unregister device: errno=%d\n",
3707 rv);
3708 }
3709 }
3710
3661 if (to_clean->dev) 3711 if (to_clean->dev)
3662 dev_set_drvdata(to_clean->dev, NULL); 3712 dev_set_drvdata(to_clean->dev, NULL);
3663 3713
3664 list_del(&to_clean->link); 3714 list_del(&to_clean->link);
3665 3715
3666 /* Tell the driver that we are shutting down. */
3667 atomic_inc(&to_clean->stop_operation);
3668
3669 /* 3716 /*
3670 * Make sure the timer and thread are stopped and will not run 3717 * Make sure that interrupts, the timer and the thread are
3671 * again. 3718 * stopped and will not run again.
3672 */ 3719 */
3720 if (to_clean->irq_cleanup)
3721 to_clean->irq_cleanup(to_clean);
3673 wait_for_timer_and_thread(to_clean); 3722 wait_for_timer_and_thread(to_clean);
3674 3723
3675 /* 3724 /*
3676 * Timeouts are stopped, now make sure the interrupts are off 3725 * Timeouts are stopped, now make sure the interrupts are off
3677 * for the device. A little tricky with locks to make sure 3726 * in the BMC. Note that timers and CPU interrupts are off,
3678 * there are no races. 3727 * so no need for locks.
3679 */ 3728 */
3680 spin_lock_irqsave(&to_clean->si_lock, flags);
3681 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3729 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3682 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3683 poll(to_clean); 3730 poll(to_clean);
3684 schedule_timeout_uninterruptible(1); 3731 schedule_timeout_uninterruptible(1);
3685 spin_lock_irqsave(&to_clean->si_lock, flags);
3686 } 3732 }
3687 disable_si_irq(to_clean); 3733 disable_si_irq(to_clean);
3688 spin_unlock_irqrestore(&to_clean->si_lock, flags);
3689 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) { 3734 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3690 poll(to_clean); 3735 poll(to_clean);
3691 schedule_timeout_uninterruptible(1); 3736 schedule_timeout_uninterruptible(1);
3692 } 3737 }
3693 3738
3694 /* Clean up interrupts and make sure that everything is done. */
3695 if (to_clean->irq_cleanup)
3696 to_clean->irq_cleanup(to_clean);
3697 while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
3698 poll(to_clean);
3699 schedule_timeout_uninterruptible(1);
3700 }
3701
3702 if (to_clean->intf)
3703 rv = ipmi_unregister_smi(to_clean->intf);
3704
3705 if (rv) {
3706 printk(KERN_ERR PFX "Unable to unregister device: errno=%d\n",
3707 rv);
3708 }
3709
3710 if (to_clean->handlers) 3739 if (to_clean->handlers)
3711 to_clean->handlers->cleanup(to_clean->si_sm); 3740 to_clean->handlers->cleanup(to_clean->si_sm);
3712 3741
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
new file mode 100644
index 000000000000..e178ac27e73c
--- /dev/null
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -0,0 +1,1870 @@
1/*
2 * ipmi_ssif.c
3 *
4 * The interface to the IPMI driver for SMBus access to a SMBus
5 * compliant device. Called SSIF by the IPMI spec.
6 *
7 * Author: Intel Corporation
8 * Todd Davis <todd.c.davis@intel.com>
9 *
10 * Rewritten by Corey Minyard <minyard@acm.org> to support the
11 * non-blocking I2C interface, add support for multi-part
12 * transactions, add PEC support, and general clenaup.
13 *
14 * Copyright 2003 Intel Corporation
15 * Copyright 2005 MontaVista Software
16 *
17 * This program is free software; you can redistribute it and/or modify it
18 * under the terms of the GNU General Public License as published by the
19 * Free Software Foundation; either version 2 of the License, or (at your
20 * option) any later version.
21 */
22
23/*
24 * This file holds the "policy" for the interface to the SSIF state
25 * machine. It does the configuration, handles timers and interrupts,
26 * and drives the real SSIF state machine.
27 */
28
29/*
30 * TODO: Figure out how to use SMB alerts. This will require a new
31 * interface into the I2C driver, I believe.
32 */
33
34#include <linux/version.h>
35#if defined(MODVERSIONS)
36#include <linux/modversions.h>
37#endif
38
39#include <linux/module.h>
40#include <linux/moduleparam.h>
41#include <linux/sched.h>
42#include <linux/seq_file.h>
43#include <linux/timer.h>
44#include <linux/delay.h>
45#include <linux/errno.h>
46#include <linux/spinlock.h>
47#include <linux/slab.h>
48#include <linux/list.h>
49#include <linux/i2c.h>
50#include <linux/ipmi_smi.h>
51#include <linux/init.h>
52#include <linux/dmi.h>
53#include <linux/kthread.h>
54#include <linux/acpi.h>
55
56#define PFX "ipmi_ssif: "
57#define DEVICE_NAME "ipmi_ssif"
58
59#define IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD 0x57
60
61#define SSIF_IPMI_REQUEST 2
62#define SSIF_IPMI_MULTI_PART_REQUEST_START 6
63#define SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE 7
64#define SSIF_IPMI_RESPONSE 3
65#define SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE 9
66
67/* ssif_debug is a bit-field
68 * SSIF_DEBUG_MSG - commands and their responses
69 * SSIF_DEBUG_STATES - message states
70 * SSIF_DEBUG_TIMING - Measure times between events in the driver
71 */
72#define SSIF_DEBUG_TIMING 4
73#define SSIF_DEBUG_STATE 2
74#define SSIF_DEBUG_MSG 1
75#define SSIF_NODEBUG 0
76#define SSIF_DEFAULT_DEBUG (SSIF_NODEBUG)
77
78/*
79 * Timer values
80 */
81#define SSIF_MSG_USEC 20000 /* 20ms between message tries. */
82#define SSIF_MSG_PART_USEC 5000 /* 5ms for a message part */
83
84/* How many times to we retry sending/receiving the message. */
85#define SSIF_SEND_RETRIES 5
86#define SSIF_RECV_RETRIES 250
87
88#define SSIF_MSG_MSEC (SSIF_MSG_USEC / 1000)
89#define SSIF_MSG_JIFFIES ((SSIF_MSG_USEC * 1000) / TICK_NSEC)
90#define SSIF_MSG_PART_JIFFIES ((SSIF_MSG_PART_USEC * 1000) / TICK_NSEC)
91
92enum ssif_intf_state {
93 SSIF_NORMAL,
94 SSIF_GETTING_FLAGS,
95 SSIF_GETTING_EVENTS,
96 SSIF_CLEARING_FLAGS,
97 SSIF_GETTING_MESSAGES,
98 /* FIXME - add watchdog stuff. */
99};
100
101#define SSIF_IDLE(ssif) ((ssif)->ssif_state == SSIF_NORMAL \
102 && (ssif)->curr_msg == NULL)
103
104/*
105 * Indexes into stats[] in ssif_info below.
106 */
107enum ssif_stat_indexes {
108 /* Number of total messages sent. */
109 SSIF_STAT_sent_messages = 0,
110
111 /*
112 * Number of message parts sent. Messages may be broken into
113 * parts if they are long.
114 */
115 SSIF_STAT_sent_messages_parts,
116
117 /*
118 * Number of time a message was retried.
119 */
120 SSIF_STAT_send_retries,
121
122 /*
123 * Number of times the send of a message failed.
124 */
125 SSIF_STAT_send_errors,
126
127 /*
128 * Number of message responses received.
129 */
130 SSIF_STAT_received_messages,
131
132 /*
133 * Number of message fragments received.
134 */
135 SSIF_STAT_received_message_parts,
136
137 /*
138 * Number of times the receive of a message was retried.
139 */
140 SSIF_STAT_receive_retries,
141
142 /*
143 * Number of errors receiving messages.
144 */
145 SSIF_STAT_receive_errors,
146
147 /*
148 * Number of times a flag fetch was requested.
149 */
150 SSIF_STAT_flag_fetches,
151
152 /*
153 * Number of times the hardware didn't follow the state machine.
154 */
155 SSIF_STAT_hosed,
156
157 /*
158 * Number of received events.
159 */
160 SSIF_STAT_events,
161
162 /* Number of asyncronous messages received. */
163 SSIF_STAT_incoming_messages,
164
165 /* Number of watchdog pretimeouts. */
166 SSIF_STAT_watchdog_pretimeouts,
167
168 /* Always add statistics before this value, it must be last. */
169 SSIF_NUM_STATS
170};
171
172struct ssif_addr_info {
173 unsigned short addr;
174 struct i2c_board_info binfo;
175 char *adapter_name;
176 int debug;
177 int slave_addr;
178 enum ipmi_addr_src addr_src;
179 union ipmi_smi_info_union addr_info;
180
181 struct mutex clients_mutex;
182 struct list_head clients;
183
184 struct list_head link;
185};
186
187struct ssif_info;
188
189typedef void (*ssif_i2c_done)(struct ssif_info *ssif_info, int result,
190 unsigned char *data, unsigned int len);
191
192struct ssif_info {
193 ipmi_smi_t intf;
194 int intf_num;
195 spinlock_t lock;
196 struct ipmi_smi_msg *waiting_msg;
197 struct ipmi_smi_msg *curr_msg;
198 enum ssif_intf_state ssif_state;
199 unsigned long ssif_debug;
200
201 struct ipmi_smi_handlers handlers;
202
203 enum ipmi_addr_src addr_source; /* ACPI, PCI, SMBIOS, hardcode, etc. */
204 union ipmi_smi_info_union addr_info;
205
206 /*
207 * Flags from the last GET_MSG_FLAGS command, used when an ATTN
208 * is set to hold the flags until we are done handling everything
209 * from the flags.
210 */
211#define RECEIVE_MSG_AVAIL 0x01
212#define EVENT_MSG_BUFFER_FULL 0x02
213#define WDT_PRE_TIMEOUT_INT 0x08
214 unsigned char msg_flags;
215
216 bool has_event_buffer;
217
218 /*
219 * If set to true, this will request events the next time the
220 * state machine is idle.
221 */
222 bool req_events;
223
224 /*
225 * If set to true, this will request flags the next time the
226 * state machine is idle.
227 */
228 bool req_flags;
229
230 /*
231 * Used to perform timer operations when run-to-completion
232 * mode is on. This is a countdown timer.
233 */
234 int rtc_us_timer;
235
236 /* Used for sending/receiving data. +1 for the length. */
237 unsigned char data[IPMI_MAX_MSG_LENGTH + 1];
238 unsigned int data_len;
239
240 /* Temp receive buffer, gets copied into data. */
241 unsigned char recv[I2C_SMBUS_BLOCK_MAX];
242
243 struct i2c_client *client;
244 ssif_i2c_done done_handler;
245
246 /* Thread interface handling */
247 struct task_struct *thread;
248 struct completion wake_thread;
249 bool stopping;
250 int i2c_read_write;
251 int i2c_command;
252 unsigned char *i2c_data;
253 unsigned int i2c_size;
254
255 /* From the device id response. */
256 struct ipmi_device_id device_id;
257
258 struct timer_list retry_timer;
259 int retries_left;
260
261 /* Info from SSIF cmd */
262 unsigned char max_xmit_msg_size;
263 unsigned char max_recv_msg_size;
264 unsigned int multi_support;
265 int supports_pec;
266
267#define SSIF_NO_MULTI 0
268#define SSIF_MULTI_2_PART 1
269#define SSIF_MULTI_n_PART 2
270 unsigned char *multi_data;
271 unsigned int multi_len;
272 unsigned int multi_pos;
273
274 atomic_t stats[SSIF_NUM_STATS];
275};
276
277#define ssif_inc_stat(ssif, stat) \
278 atomic_inc(&(ssif)->stats[SSIF_STAT_ ## stat])
279#define ssif_get_stat(ssif, stat) \
280 ((unsigned int) atomic_read(&(ssif)->stats[SSIF_STAT_ ## stat]))
281
282static bool initialized;
283
284static atomic_t next_intf = ATOMIC_INIT(0);
285
286static void return_hosed_msg(struct ssif_info *ssif_info,
287 struct ipmi_smi_msg *msg);
288static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags);
289static int start_send(struct ssif_info *ssif_info,
290 unsigned char *data,
291 unsigned int len);
292
293static unsigned long *ipmi_ssif_lock_cond(struct ssif_info *ssif_info,
294 unsigned long *flags)
295{
296 spin_lock_irqsave(&ssif_info->lock, *flags);
297 return flags;
298}
299
300static void ipmi_ssif_unlock_cond(struct ssif_info *ssif_info,
301 unsigned long *flags)
302{
303 spin_unlock_irqrestore(&ssif_info->lock, *flags);
304}
305
306static void deliver_recv_msg(struct ssif_info *ssif_info,
307 struct ipmi_smi_msg *msg)
308{
309 ipmi_smi_t intf = ssif_info->intf;
310
311 if (!intf) {
312 ipmi_free_smi_msg(msg);
313 } else if (msg->rsp_size < 0) {
314 return_hosed_msg(ssif_info, msg);
315 pr_err(PFX
316 "Malformed message in deliver_recv_msg: rsp_size = %d\n",
317 msg->rsp_size);
318 } else {
319 ipmi_smi_msg_received(intf, msg);
320 }
321}
322
323static void return_hosed_msg(struct ssif_info *ssif_info,
324 struct ipmi_smi_msg *msg)
325{
326 ssif_inc_stat(ssif_info, hosed);
327
328 /* Make it a response */
329 msg->rsp[0] = msg->data[0] | 4;
330 msg->rsp[1] = msg->data[1];
331 msg->rsp[2] = 0xFF; /* Unknown error. */
332 msg->rsp_size = 3;
333
334 deliver_recv_msg(ssif_info, msg);
335}
336
337/*
338 * Must be called with the message lock held. This will release the
339 * message lock. Note that the caller will check SSIF_IDLE and start a
340 * new operation, so there is no need to check for new messages to
341 * start in here.
342 */
343static void start_clear_flags(struct ssif_info *ssif_info, unsigned long *flags)
344{
345 unsigned char msg[3];
346
347 ssif_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
348 ssif_info->ssif_state = SSIF_CLEARING_FLAGS;
349 ipmi_ssif_unlock_cond(ssif_info, flags);
350
351 /* Make sure the watchdog pre-timeout flag is not set at startup. */
352 msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
353 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
354 msg[2] = WDT_PRE_TIMEOUT_INT;
355
356 if (start_send(ssif_info, msg, 3) != 0) {
357 /* Error, just go to normal state. */
358 ssif_info->ssif_state = SSIF_NORMAL;
359 }
360}
361
362static void start_flag_fetch(struct ssif_info *ssif_info, unsigned long *flags)
363{
364 unsigned char mb[2];
365
366 ssif_info->req_flags = false;
367 ssif_info->ssif_state = SSIF_GETTING_FLAGS;
368 ipmi_ssif_unlock_cond(ssif_info, flags);
369
370 mb[0] = (IPMI_NETFN_APP_REQUEST << 2);
371 mb[1] = IPMI_GET_MSG_FLAGS_CMD;
372 if (start_send(ssif_info, mb, 2) != 0)
373 ssif_info->ssif_state = SSIF_NORMAL;
374}
375
376static void check_start_send(struct ssif_info *ssif_info, unsigned long *flags,
377 struct ipmi_smi_msg *msg)
378{
379 if (start_send(ssif_info, msg->data, msg->data_size) != 0) {
380 unsigned long oflags;
381
382 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
383 ssif_info->curr_msg = NULL;
384 ssif_info->ssif_state = SSIF_NORMAL;
385 ipmi_ssif_unlock_cond(ssif_info, flags);
386 ipmi_free_smi_msg(msg);
387 }
388}
389
390static void start_event_fetch(struct ssif_info *ssif_info, unsigned long *flags)
391{
392 struct ipmi_smi_msg *msg;
393
394 ssif_info->req_events = false;
395
396 msg = ipmi_alloc_smi_msg();
397 if (!msg) {
398 ssif_info->ssif_state = SSIF_NORMAL;
399 return;
400 }
401
402 ssif_info->curr_msg = msg;
403 ssif_info->ssif_state = SSIF_GETTING_EVENTS;
404 ipmi_ssif_unlock_cond(ssif_info, flags);
405
406 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
407 msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
408 msg->data_size = 2;
409
410 check_start_send(ssif_info, flags, msg);
411}
412
413static void start_recv_msg_fetch(struct ssif_info *ssif_info,
414 unsigned long *flags)
415{
416 struct ipmi_smi_msg *msg;
417
418 msg = ipmi_alloc_smi_msg();
419 if (!msg) {
420 ssif_info->ssif_state = SSIF_NORMAL;
421 return;
422 }
423
424 ssif_info->curr_msg = msg;
425 ssif_info->ssif_state = SSIF_GETTING_MESSAGES;
426 ipmi_ssif_unlock_cond(ssif_info, flags);
427
428 msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
429 msg->data[1] = IPMI_GET_MSG_CMD;
430 msg->data_size = 2;
431
432 check_start_send(ssif_info, flags, msg);
433}
434
435/*
436 * Must be called with the message lock held. This will release the
437 * message lock. Note that the caller will check SSIF_IDLE and start a
438 * new operation, so there is no need to check for new messages to
439 * start in here.
440 */
441static void handle_flags(struct ssif_info *ssif_info, unsigned long *flags)
442{
443 if (ssif_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
444 ipmi_smi_t intf = ssif_info->intf;
445 /* Watchdog pre-timeout */
446 ssif_inc_stat(ssif_info, watchdog_pretimeouts);
447 start_clear_flags(ssif_info, flags);
448 if (intf)
449 ipmi_smi_watchdog_pretimeout(intf);
450 } else if (ssif_info->msg_flags & RECEIVE_MSG_AVAIL)
451 /* Messages available. */
452 start_recv_msg_fetch(ssif_info, flags);
453 else if (ssif_info->msg_flags & EVENT_MSG_BUFFER_FULL)
454 /* Events available. */
455 start_event_fetch(ssif_info, flags);
456 else {
457 ssif_info->ssif_state = SSIF_NORMAL;
458 ipmi_ssif_unlock_cond(ssif_info, flags);
459 }
460}
461
462static int ipmi_ssif_thread(void *data)
463{
464 struct ssif_info *ssif_info = data;
465
466 while (!kthread_should_stop()) {
467 int result;
468
469 /* Wait for something to do */
470 wait_for_completion(&ssif_info->wake_thread);
471 init_completion(&ssif_info->wake_thread);
472
473 if (ssif_info->stopping)
474 break;
475
476 if (ssif_info->i2c_read_write == I2C_SMBUS_WRITE) {
477 result = i2c_smbus_write_block_data(
478 ssif_info->client, SSIF_IPMI_REQUEST,
479 ssif_info->i2c_data[0],
480 ssif_info->i2c_data + 1);
481 ssif_info->done_handler(ssif_info, result, NULL, 0);
482 } else {
483 result = i2c_smbus_read_block_data(
484 ssif_info->client, SSIF_IPMI_RESPONSE,
485 ssif_info->i2c_data);
486 if (result < 0)
487 ssif_info->done_handler(ssif_info, result,
488 NULL, 0);
489 else
490 ssif_info->done_handler(ssif_info, 0,
491 ssif_info->i2c_data,
492 result);
493 }
494 }
495
496 return 0;
497}
498
499static int ssif_i2c_send(struct ssif_info *ssif_info,
500 ssif_i2c_done handler,
501 int read_write, int command,
502 unsigned char *data, unsigned int size)
503{
504 ssif_info->done_handler = handler;
505
506 ssif_info->i2c_read_write = read_write;
507 ssif_info->i2c_command = command;
508 ssif_info->i2c_data = data;
509 ssif_info->i2c_size = size;
510 complete(&ssif_info->wake_thread);
511 return 0;
512}
513
514
515static void msg_done_handler(struct ssif_info *ssif_info, int result,
516 unsigned char *data, unsigned int len);
517
518static void retry_timeout(unsigned long data)
519{
520 struct ssif_info *ssif_info = (void *) data;
521 int rv;
522
523 if (ssif_info->stopping)
524 return;
525
526 ssif_info->rtc_us_timer = 0;
527
528 rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
529 SSIF_IPMI_RESPONSE,
530 ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
531 if (rv < 0) {
532 /* request failed, just return the error. */
533 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
534 pr_info("Error from i2c_non_blocking_op(5)\n");
535
536 msg_done_handler(ssif_info, -EIO, NULL, 0);
537 }
538}
539
540static int start_resend(struct ssif_info *ssif_info);
541
542static void msg_done_handler(struct ssif_info *ssif_info, int result,
543 unsigned char *data, unsigned int len)
544{
545 struct ipmi_smi_msg *msg;
546 unsigned long oflags, *flags;
547 int rv;
548
549 /*
550 * We are single-threaded here, so no need for a lock until we
551 * start messing with driver states or the queues.
552 */
553
554 if (result < 0) {
555 ssif_info->retries_left--;
556 if (ssif_info->retries_left > 0) {
557 ssif_inc_stat(ssif_info, receive_retries);
558
559 mod_timer(&ssif_info->retry_timer,
560 jiffies + SSIF_MSG_JIFFIES);
561 ssif_info->rtc_us_timer = SSIF_MSG_USEC;
562 return;
563 }
564
565 ssif_inc_stat(ssif_info, receive_errors);
566
567 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
568 pr_info("Error in msg_done_handler: %d\n", result);
569 len = 0;
570 goto continue_op;
571 }
572
573 if ((len > 1) && (ssif_info->multi_pos == 0)
574 && (data[0] == 0x00) && (data[1] == 0x01)) {
575 /* Start of multi-part read. Start the next transaction. */
576 int i;
577
578 ssif_inc_stat(ssif_info, received_message_parts);
579
580 /* Remove the multi-part read marker. */
581 for (i = 0; i < (len-2); i++)
582 ssif_info->data[i] = data[i+2];
583 len -= 2;
584 ssif_info->multi_len = len;
585 ssif_info->multi_pos = 1;
586
587 rv = ssif_i2c_send(ssif_info, msg_done_handler, I2C_SMBUS_READ,
588 SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
589 ssif_info->recv, I2C_SMBUS_BLOCK_DATA);
590 if (rv < 0) {
591 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
592 pr_info("Error from i2c_non_blocking_op(1)\n");
593
594 result = -EIO;
595 } else
596 return;
597 } else if (ssif_info->multi_pos) {
598 /* Middle of multi-part read. Start the next transaction. */
599 int i;
600 unsigned char blocknum;
601
602 if (len == 0) {
603 result = -EIO;
604 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
605 pr_info(PFX "Middle message with no data\n");
606
607 goto continue_op;
608 }
609
610 blocknum = data[ssif_info->multi_len];
611
612 if (ssif_info->multi_len+len-1 > IPMI_MAX_MSG_LENGTH) {
613 /* Received message too big, abort the operation. */
614 result = -E2BIG;
615 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
616 pr_info("Received message too big\n");
617
618 goto continue_op;
619 }
620
621 /* Remove the blocknum from the data. */
622 for (i = 0; i < (len-1); i++)
623 ssif_info->data[i+ssif_info->multi_len] = data[i+1];
624 len--;
625 ssif_info->multi_len += len;
626 if (blocknum == 0xff) {
627 /* End of read */
628 len = ssif_info->multi_len;
629 data = ssif_info->data;
630 } else if ((blocknum+1) != ssif_info->multi_pos) {
631 /*
632 * Out of sequence block, just abort. Block
633 * numbers start at zero for the second block,
634 * but multi_pos starts at one, so the +1.
635 */
636 result = -EIO;
637 } else {
638 ssif_inc_stat(ssif_info, received_message_parts);
639
640 ssif_info->multi_pos++;
641
642 rv = ssif_i2c_send(ssif_info, msg_done_handler,
643 I2C_SMBUS_READ,
644 SSIF_IPMI_MULTI_PART_RESPONSE_MIDDLE,
645 ssif_info->recv,
646 I2C_SMBUS_BLOCK_DATA);
647 if (rv < 0) {
648 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
649 pr_info(PFX
650 "Error from i2c_non_blocking_op(2)\n");
651
652 result = -EIO;
653 } else
654 return;
655 }
656 }
657
658 if (result < 0) {
659 ssif_inc_stat(ssif_info, receive_errors);
660 } else {
661 ssif_inc_stat(ssif_info, received_messages);
662 ssif_inc_stat(ssif_info, received_message_parts);
663 }
664
665
666 continue_op:
667 if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
668 pr_info(PFX "DONE 1: state = %d, result=%d.\n",
669 ssif_info->ssif_state, result);
670
671 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
672 msg = ssif_info->curr_msg;
673 if (msg) {
674 msg->rsp_size = len;
675 if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
676 msg->rsp_size = IPMI_MAX_MSG_LENGTH;
677 memcpy(msg->rsp, data, msg->rsp_size);
678 ssif_info->curr_msg = NULL;
679 }
680
681 switch (ssif_info->ssif_state) {
682 case SSIF_NORMAL:
683 ipmi_ssif_unlock_cond(ssif_info, flags);
684 if (!msg)
685 break;
686
687 if (result < 0)
688 return_hosed_msg(ssif_info, msg);
689 else
690 deliver_recv_msg(ssif_info, msg);
691 break;
692
693 case SSIF_GETTING_FLAGS:
694 /* We got the flags from the SSIF, now handle them. */
695 if ((result < 0) || (len < 4) || (data[2] != 0)) {
696 /*
697 * Error fetching flags, or invalid length,
698 * just give up for now.
699 */
700 ssif_info->ssif_state = SSIF_NORMAL;
701 ipmi_ssif_unlock_cond(ssif_info, flags);
702 pr_warn(PFX "Error getting flags: %d %d, %x\n",
703 result, len, data[2]);
704 } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
705 || data[1] != IPMI_GET_MSG_FLAGS_CMD) {
706 pr_warn(PFX "Invalid response getting flags: %x %x\n",
707 data[0], data[1]);
708 } else {
709 ssif_inc_stat(ssif_info, flag_fetches);
710 ssif_info->msg_flags = data[3];
711 handle_flags(ssif_info, flags);
712 }
713 break;
714
715 case SSIF_CLEARING_FLAGS:
716 /* We cleared the flags. */
717 if ((result < 0) || (len < 3) || (data[2] != 0)) {
718 /* Error clearing flags */
719 pr_warn(PFX "Error clearing flags: %d %d, %x\n",
720 result, len, data[2]);
721 } else if (data[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
722 || data[1] != IPMI_CLEAR_MSG_FLAGS_CMD) {
723 pr_warn(PFX "Invalid response clearing flags: %x %x\n",
724 data[0], data[1]);
725 }
726 ssif_info->ssif_state = SSIF_NORMAL;
727 ipmi_ssif_unlock_cond(ssif_info, flags);
728 break;
729
730 case SSIF_GETTING_EVENTS:
731 if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
732 /* Error getting event, probably done. */
733 msg->done(msg);
734
735 /* Take off the event flag. */
736 ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
737 handle_flags(ssif_info, flags);
738 } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
739 || msg->rsp[1] != IPMI_READ_EVENT_MSG_BUFFER_CMD) {
740 pr_warn(PFX "Invalid response getting events: %x %x\n",
741 msg->rsp[0], msg->rsp[1]);
742 msg->done(msg);
743 /* Take off the event flag. */
744 ssif_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
745 handle_flags(ssif_info, flags);
746 } else {
747 handle_flags(ssif_info, flags);
748 ssif_inc_stat(ssif_info, events);
749 deliver_recv_msg(ssif_info, msg);
750 }
751 break;
752
753 case SSIF_GETTING_MESSAGES:
754 if ((result < 0) || (len < 3) || (msg->rsp[2] != 0)) {
755 /* Error getting event, probably done. */
756 msg->done(msg);
757
758 /* Take off the msg flag. */
759 ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
760 handle_flags(ssif_info, flags);
761 } else if (msg->rsp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2
762 || msg->rsp[1] != IPMI_GET_MSG_CMD) {
763 pr_warn(PFX "Invalid response clearing flags: %x %x\n",
764 msg->rsp[0], msg->rsp[1]);
765 msg->done(msg);
766
767 /* Take off the msg flag. */
768 ssif_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
769 handle_flags(ssif_info, flags);
770 } else {
771 ssif_inc_stat(ssif_info, incoming_messages);
772 handle_flags(ssif_info, flags);
773 deliver_recv_msg(ssif_info, msg);
774 }
775 break;
776 }
777
778 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
779 if (SSIF_IDLE(ssif_info) && !ssif_info->stopping) {
780 if (ssif_info->req_events)
781 start_event_fetch(ssif_info, flags);
782 else if (ssif_info->req_flags)
783 start_flag_fetch(ssif_info, flags);
784 else
785 start_next_msg(ssif_info, flags);
786 } else
787 ipmi_ssif_unlock_cond(ssif_info, flags);
788
789 if (ssif_info->ssif_debug & SSIF_DEBUG_STATE)
790 pr_info(PFX "DONE 2: state = %d.\n", ssif_info->ssif_state);
791}
792
793static void msg_written_handler(struct ssif_info *ssif_info, int result,
794 unsigned char *data, unsigned int len)
795{
796 int rv;
797
798 /* We are single-threaded here, so no need for a lock. */
799 if (result < 0) {
800 ssif_info->retries_left--;
801 if (ssif_info->retries_left > 0) {
802 if (!start_resend(ssif_info)) {
803 ssif_inc_stat(ssif_info, send_retries);
804 return;
805 }
806 /* request failed, just return the error. */
807 ssif_inc_stat(ssif_info, send_errors);
808
809 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
810 pr_info(PFX
811 "Out of retries in msg_written_handler\n");
812 msg_done_handler(ssif_info, -EIO, NULL, 0);
813 return;
814 }
815
816 ssif_inc_stat(ssif_info, send_errors);
817
818 /*
819 * Got an error on transmit, let the done routine
820 * handle it.
821 */
822 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
823 pr_info("Error in msg_written_handler: %d\n", result);
824
825 msg_done_handler(ssif_info, result, NULL, 0);
826 return;
827 }
828
829 if (ssif_info->multi_data) {
830 /* In the middle of a multi-data write. */
831 int left;
832
833 ssif_inc_stat(ssif_info, sent_messages_parts);
834
835 left = ssif_info->multi_len - ssif_info->multi_pos;
836 if (left > 32)
837 left = 32;
838 /* Length byte. */
839 ssif_info->multi_data[ssif_info->multi_pos] = left;
840 ssif_info->multi_pos += left;
841 if (left < 32)
842 /*
843 * Write is finished. Note that we must end
844 * with a write of less than 32 bytes to
845 * complete the transaction, even if it is
846 * zero bytes.
847 */
848 ssif_info->multi_data = NULL;
849
850 rv = ssif_i2c_send(ssif_info, msg_written_handler,
851 I2C_SMBUS_WRITE,
852 SSIF_IPMI_MULTI_PART_REQUEST_MIDDLE,
853 ssif_info->multi_data + ssif_info->multi_pos,
854 I2C_SMBUS_BLOCK_DATA);
855 if (rv < 0) {
856 /* request failed, just return the error. */
857 ssif_inc_stat(ssif_info, send_errors);
858
859 if (ssif_info->ssif_debug & SSIF_DEBUG_MSG)
860 pr_info("Error from i2c_non_blocking_op(3)\n");
861 msg_done_handler(ssif_info, -EIO, NULL, 0);
862 }
863 } else {
864 ssif_inc_stat(ssif_info, sent_messages);
865 ssif_inc_stat(ssif_info, sent_messages_parts);
866
867 /* Wait a jiffie then request the next message */
868 ssif_info->retries_left = SSIF_RECV_RETRIES;
869 ssif_info->rtc_us_timer = SSIF_MSG_PART_USEC;
870 mod_timer(&ssif_info->retry_timer,
871 jiffies + SSIF_MSG_PART_JIFFIES);
872 return;
873 }
874}
875
876static int start_resend(struct ssif_info *ssif_info)
877{
878 int rv;
879 int command;
880
881 if (ssif_info->data_len > 32) {
882 command = SSIF_IPMI_MULTI_PART_REQUEST_START;
883 ssif_info->multi_data = ssif_info->data;
884 ssif_info->multi_len = ssif_info->data_len;
885 /*
886 * Subtle thing, this is 32, not 33, because we will
887 * overwrite the thing at position 32 (which was just
888 * transmitted) with the new length.
889 */
890 ssif_info->multi_pos = 32;
891 ssif_info->data[0] = 32;
892 } else {
893 ssif_info->multi_data = NULL;
894 command = SSIF_IPMI_REQUEST;
895 ssif_info->data[0] = ssif_info->data_len;
896 }
897
898 rv = ssif_i2c_send(ssif_info, msg_written_handler, I2C_SMBUS_WRITE,
899 command, ssif_info->data, I2C_SMBUS_BLOCK_DATA);
900 if (rv && (ssif_info->ssif_debug & SSIF_DEBUG_MSG))
901 pr_info("Error from i2c_non_blocking_op(4)\n");
902 return rv;
903}
904
905static int start_send(struct ssif_info *ssif_info,
906 unsigned char *data,
907 unsigned int len)
908{
909 if (len > IPMI_MAX_MSG_LENGTH)
910 return -E2BIG;
911 if (len > ssif_info->max_xmit_msg_size)
912 return -E2BIG;
913
914 ssif_info->retries_left = SSIF_SEND_RETRIES;
915 memcpy(ssif_info->data+1, data, len);
916 ssif_info->data_len = len;
917 return start_resend(ssif_info);
918}
919
920/* Must be called with the message lock held. */
921static void start_next_msg(struct ssif_info *ssif_info, unsigned long *flags)
922{
923 struct ipmi_smi_msg *msg;
924 unsigned long oflags;
925
926 restart:
927 if (!SSIF_IDLE(ssif_info)) {
928 ipmi_ssif_unlock_cond(ssif_info, flags);
929 return;
930 }
931
932 if (!ssif_info->waiting_msg) {
933 ssif_info->curr_msg = NULL;
934 ipmi_ssif_unlock_cond(ssif_info, flags);
935 } else {
936 int rv;
937
938 ssif_info->curr_msg = ssif_info->waiting_msg;
939 ssif_info->waiting_msg = NULL;
940 ipmi_ssif_unlock_cond(ssif_info, flags);
941 rv = start_send(ssif_info,
942 ssif_info->curr_msg->data,
943 ssif_info->curr_msg->data_size);
944 if (rv) {
945 msg = ssif_info->curr_msg;
946 ssif_info->curr_msg = NULL;
947 return_hosed_msg(ssif_info, msg);
948 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
949 goto restart;
950 }
951 }
952}
953
954static void sender(void *send_info,
955 struct ipmi_smi_msg *msg)
956{
957 struct ssif_info *ssif_info = (struct ssif_info *) send_info;
958 unsigned long oflags, *flags;
959
960 BUG_ON(ssif_info->waiting_msg);
961 ssif_info->waiting_msg = msg;
962
963 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
964 start_next_msg(ssif_info, flags);
965
966 if (ssif_info->ssif_debug & SSIF_DEBUG_TIMING) {
967 struct timeval t;
968
969 do_gettimeofday(&t);
970 pr_info("**Enqueue %02x %02x: %ld.%6.6ld\n",
971 msg->data[0], msg->data[1], t.tv_sec, t.tv_usec);
972 }
973}
974
975static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
976{
977 struct ssif_info *ssif_info = send_info;
978
979 data->addr_src = ssif_info->addr_source;
980 data->dev = &ssif_info->client->dev;
981 data->addr_info = ssif_info->addr_info;
982 get_device(data->dev);
983
984 return 0;
985}
986
987/*
988 * Instead of having our own timer to periodically check the message
989 * flags, we let the message handler drive us.
990 */
991static void request_events(void *send_info)
992{
993 struct ssif_info *ssif_info = (struct ssif_info *) send_info;
994 unsigned long oflags, *flags;
995
996 if (!ssif_info->has_event_buffer)
997 return;
998
999 flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
1000 /*
1001 * Request flags first, not events, because the lower layer
1002 * doesn't have a way to send an attention. But make sure
1003 * event checking still happens.
1004 */
1005 ssif_info->req_events = true;
1006 if (SSIF_IDLE(ssif_info))
1007 start_flag_fetch(ssif_info, flags);
1008 else {
1009 ssif_info->req_flags = true;
1010 ipmi_ssif_unlock_cond(ssif_info, flags);
1011 }
1012}
1013
1014static int inc_usecount(void *send_info)
1015{
1016 struct ssif_info *ssif_info = send_info;
1017
1018 if (!i2c_get_adapter(ssif_info->client->adapter->nr))
1019 return -ENODEV;
1020
1021 i2c_use_client(ssif_info->client);
1022 return 0;
1023}
1024
1025static void dec_usecount(void *send_info)
1026{
1027 struct ssif_info *ssif_info = send_info;
1028
1029 i2c_release_client(ssif_info->client);
1030 i2c_put_adapter(ssif_info->client->adapter);
1031}
1032
1033static int ssif_start_processing(void *send_info,
1034 ipmi_smi_t intf)
1035{
1036 struct ssif_info *ssif_info = send_info;
1037
1038 ssif_info->intf = intf;
1039
1040 return 0;
1041}
1042
1043#define MAX_SSIF_BMCS 4
1044
1045static unsigned short addr[MAX_SSIF_BMCS];
1046static int num_addrs;
1047module_param_array(addr, ushort, &num_addrs, 0);
1048MODULE_PARM_DESC(addr, "The addresses to scan for IPMI BMCs on the SSIFs.");
1049
1050static char *adapter_name[MAX_SSIF_BMCS];
1051static int num_adapter_names;
1052module_param_array(adapter_name, charp, &num_adapter_names, 0);
1053MODULE_PARM_DESC(adapter_name, "The string name of the I2C device that has the BMC. By default all devices are scanned.");
1054
1055static int slave_addrs[MAX_SSIF_BMCS];
1056static int num_slave_addrs;
1057module_param_array(slave_addrs, int, &num_slave_addrs, 0);
1058MODULE_PARM_DESC(slave_addrs,
1059 "The default IPMB slave address for the controller.");
1060
1061/*
1062 * Bit 0 enables message debugging, bit 1 enables state debugging, and
1063 * bit 2 enables timing debugging. This is an array indexed by
1064 * interface number"
1065 */
1066static int dbg[MAX_SSIF_BMCS];
1067static int num_dbg;
1068module_param_array(dbg, int, &num_dbg, 0);
1069MODULE_PARM_DESC(dbg, "Turn on debugging.");
1070
1071static bool ssif_dbg_probe;
1072module_param_named(dbg_probe, ssif_dbg_probe, bool, 0);
1073MODULE_PARM_DESC(dbg_probe, "Enable debugging of probing of adapters.");
1074
1075static int use_thread;
1076module_param(use_thread, int, 0);
1077MODULE_PARM_DESC(use_thread, "Use the thread interface.");
1078
1079static bool ssif_tryacpi = 1;
1080module_param_named(tryacpi, ssif_tryacpi, bool, 0);
1081MODULE_PARM_DESC(tryacpi, "Setting this to zero will disable the default scan of the interfaces identified via ACPI");
1082
1083static bool ssif_trydmi = 1;
1084module_param_named(trydmi, ssif_trydmi, bool, 0);
1085MODULE_PARM_DESC(trydmi, "Setting this to zero will disable the default scan of the interfaces identified via DMI (SMBIOS)");
1086
1087static DEFINE_MUTEX(ssif_infos_mutex);
1088static LIST_HEAD(ssif_infos);
1089
1090static int ssif_remove(struct i2c_client *client)
1091{
1092 struct ssif_info *ssif_info = i2c_get_clientdata(client);
1093 int rv;
1094
1095 if (!ssif_info)
1096 return 0;
1097
1098 i2c_set_clientdata(client, NULL);
1099
1100 /*
1101 * After this point, we won't deliver anything asychronously
1102 * to the message handler. We can unregister ourself.
1103 */
1104 rv = ipmi_unregister_smi(ssif_info->intf);
1105 if (rv) {
1106 pr_err(PFX "Unable to unregister device: errno=%d\n", rv);
1107 return rv;
1108 }
1109 ssif_info->intf = NULL;
1110
1111 /* make sure the driver is not looking for flags any more. */
1112 while (ssif_info->ssif_state != SSIF_NORMAL)
1113 schedule_timeout(1);
1114
1115 ssif_info->stopping = true;
1116 del_timer_sync(&ssif_info->retry_timer);
1117 if (ssif_info->thread) {
1118 complete(&ssif_info->wake_thread);
1119 kthread_stop(ssif_info->thread);
1120 }
1121
1122 /*
1123 * No message can be outstanding now, we have removed the
1124 * upper layer and it permitted us to do so.
1125 */
1126 kfree(ssif_info);
1127 return 0;
1128}
1129
1130static int do_cmd(struct i2c_client *client, int len, unsigned char *msg,
1131 int *resp_len, unsigned char *resp)
1132{
1133 int retry_cnt;
1134 int ret;
1135
1136 retry_cnt = SSIF_SEND_RETRIES;
1137 retry1:
1138 ret = i2c_smbus_write_block_data(client, SSIF_IPMI_REQUEST, len, msg);
1139 if (ret) {
1140 retry_cnt--;
1141 if (retry_cnt > 0)
1142 goto retry1;
1143 return -ENODEV;
1144 }
1145
1146 ret = -ENODEV;
1147 retry_cnt = SSIF_RECV_RETRIES;
1148 while (retry_cnt > 0) {
1149 ret = i2c_smbus_read_block_data(client, SSIF_IPMI_RESPONSE,
1150 resp);
1151 if (ret > 0)
1152 break;
1153 msleep(SSIF_MSG_MSEC);
1154 retry_cnt--;
1155 if (retry_cnt <= 0)
1156 break;
1157 }
1158
1159 if (ret > 0) {
1160 /* Validate that the response is correct. */
1161 if (ret < 3 ||
1162 (resp[0] != (msg[0] | (1 << 2))) ||
1163 (resp[1] != msg[1]))
1164 ret = -EINVAL;
1165 else {
1166 *resp_len = ret;
1167 ret = 0;
1168 }
1169 }
1170
1171 return ret;
1172}
1173
1174static int ssif_detect(struct i2c_client *client, struct i2c_board_info *info)
1175{
1176 unsigned char *resp;
1177 unsigned char msg[3];
1178 int rv;
1179 int len;
1180
1181 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1182 if (!resp)
1183 return -ENOMEM;
1184
1185 /* Do a Get Device ID command, since it is required. */
1186 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1187 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1188 rv = do_cmd(client, 2, msg, &len, resp);
1189 if (rv)
1190 rv = -ENODEV;
1191 else
1192 strlcpy(info->type, DEVICE_NAME, I2C_NAME_SIZE);
1193 kfree(resp);
1194 return rv;
1195}
1196
1197static int smi_type_proc_show(struct seq_file *m, void *v)
1198{
1199 return seq_puts(m, "ssif\n");
1200}
1201
1202static int smi_type_proc_open(struct inode *inode, struct file *file)
1203{
1204 return single_open(file, smi_type_proc_show, inode->i_private);
1205}
1206
1207static const struct file_operations smi_type_proc_ops = {
1208 .open = smi_type_proc_open,
1209 .read = seq_read,
1210 .llseek = seq_lseek,
1211 .release = single_release,
1212};
1213
1214static int smi_stats_proc_show(struct seq_file *m, void *v)
1215{
1216 struct ssif_info *ssif_info = m->private;
1217
1218 seq_printf(m, "sent_messages: %u\n",
1219 ssif_get_stat(ssif_info, sent_messages));
1220 seq_printf(m, "sent_messages_parts: %u\n",
1221 ssif_get_stat(ssif_info, sent_messages_parts));
1222 seq_printf(m, "send_retries: %u\n",
1223 ssif_get_stat(ssif_info, send_retries));
1224 seq_printf(m, "send_errors: %u\n",
1225 ssif_get_stat(ssif_info, send_errors));
1226 seq_printf(m, "received_messages: %u\n",
1227 ssif_get_stat(ssif_info, received_messages));
1228 seq_printf(m, "received_message_parts: %u\n",
1229 ssif_get_stat(ssif_info, received_message_parts));
1230 seq_printf(m, "receive_retries: %u\n",
1231 ssif_get_stat(ssif_info, receive_retries));
1232 seq_printf(m, "receive_errors: %u\n",
1233 ssif_get_stat(ssif_info, receive_errors));
1234 seq_printf(m, "flag_fetches: %u\n",
1235 ssif_get_stat(ssif_info, flag_fetches));
1236 seq_printf(m, "hosed: %u\n",
1237 ssif_get_stat(ssif_info, hosed));
1238 seq_printf(m, "events: %u\n",
1239 ssif_get_stat(ssif_info, events));
1240 seq_printf(m, "watchdog_pretimeouts: %u\n",
1241 ssif_get_stat(ssif_info, watchdog_pretimeouts));
1242 return 0;
1243}
1244
1245static int smi_stats_proc_open(struct inode *inode, struct file *file)
1246{
1247 return single_open(file, smi_stats_proc_show, PDE_DATA(inode));
1248}
1249
1250static const struct file_operations smi_stats_proc_ops = {
1251 .open = smi_stats_proc_open,
1252 .read = seq_read,
1253 .llseek = seq_lseek,
1254 .release = single_release,
1255};
1256
1257static struct ssif_addr_info *ssif_info_find(unsigned short addr,
1258 char *adapter_name,
1259 bool match_null_name)
1260{
1261 struct ssif_addr_info *info, *found = NULL;
1262
1263restart:
1264 list_for_each_entry(info, &ssif_infos, link) {
1265 if (info->binfo.addr == addr) {
1266 if (info->adapter_name || adapter_name) {
1267 if (!info->adapter_name != !adapter_name) {
1268 /* One is NULL and one is not */
1269 continue;
1270 }
1271 if (strcmp(info->adapter_name, adapter_name))
1272 /* Names to not match */
1273 continue;
1274 }
1275 found = info;
1276 break;
1277 }
1278 }
1279
1280 if (!found && match_null_name) {
1281 /* Try to get an exact match first, then try with a NULL name */
1282 adapter_name = NULL;
1283 match_null_name = false;
1284 goto restart;
1285 }
1286
1287 return found;
1288}
1289
1290static bool check_acpi(struct ssif_info *ssif_info, struct device *dev)
1291{
1292#ifdef CONFIG_ACPI
1293 acpi_handle acpi_handle;
1294
1295 acpi_handle = ACPI_HANDLE(dev);
1296 if (acpi_handle) {
1297 ssif_info->addr_source = SI_ACPI;
1298 ssif_info->addr_info.acpi_info.acpi_handle = acpi_handle;
1299 return true;
1300 }
1301#endif
1302 return false;
1303}
1304
1305static int ssif_probe(struct i2c_client *client, const struct i2c_device_id *id)
1306{
1307 unsigned char msg[3];
1308 unsigned char *resp;
1309 struct ssif_info *ssif_info;
1310 int rv = 0;
1311 int len;
1312 int i;
1313 u8 slave_addr = 0;
1314 struct ssif_addr_info *addr_info = NULL;
1315
1316
1317 resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
1318 if (!resp)
1319 return -ENOMEM;
1320
1321 ssif_info = kzalloc(sizeof(*ssif_info), GFP_KERNEL);
1322 if (!ssif_info) {
1323 kfree(resp);
1324 return -ENOMEM;
1325 }
1326
1327 if (!check_acpi(ssif_info, &client->dev)) {
1328 addr_info = ssif_info_find(client->addr, client->adapter->name,
1329 true);
1330 if (!addr_info) {
1331 /* Must have come in through sysfs. */
1332 ssif_info->addr_source = SI_HOTMOD;
1333 } else {
1334 ssif_info->addr_source = addr_info->addr_src;
1335 ssif_info->ssif_debug = addr_info->debug;
1336 ssif_info->addr_info = addr_info->addr_info;
1337 slave_addr = addr_info->slave_addr;
1338 }
1339 }
1340
1341 pr_info(PFX "Trying %s-specified SSIF interface at i2c address 0x%x, adapter %s, slave address 0x%x\n",
1342 ipmi_addr_src_to_str(ssif_info->addr_source),
1343 client->addr, client->adapter->name, slave_addr);
1344
1345 /*
1346 * Do a Get Device ID command, since it comes back with some
1347 * useful info.
1348 */
1349 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1350 msg[1] = IPMI_GET_DEVICE_ID_CMD;
1351 rv = do_cmd(client, 2, msg, &len, resp);
1352 if (rv)
1353 goto out;
1354
1355 rv = ipmi_demangle_device_id(resp, len, &ssif_info->device_id);
1356 if (rv)
1357 goto out;
1358
1359 ssif_info->client = client;
1360 i2c_set_clientdata(client, ssif_info);
1361
1362 /* Now check for system interface capabilities */
1363 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1364 msg[1] = IPMI_GET_SYSTEM_INTERFACE_CAPABILITIES_CMD;
1365 msg[2] = 0; /* SSIF */
1366 rv = do_cmd(client, 3, msg, &len, resp);
1367 if (!rv && (len >= 3) && (resp[2] == 0)) {
1368 if (len < 7) {
1369 if (ssif_dbg_probe)
1370 pr_info(PFX "SSIF info too short: %d\n", len);
1371 goto no_support;
1372 }
1373
1374 /* Got a good SSIF response, handle it. */
1375 ssif_info->max_xmit_msg_size = resp[5];
1376 ssif_info->max_recv_msg_size = resp[6];
1377 ssif_info->multi_support = (resp[4] >> 6) & 0x3;
1378 ssif_info->supports_pec = (resp[4] >> 3) & 0x1;
1379
1380 /* Sanitize the data */
1381 switch (ssif_info->multi_support) {
1382 case SSIF_NO_MULTI:
1383 if (ssif_info->max_xmit_msg_size > 32)
1384 ssif_info->max_xmit_msg_size = 32;
1385 if (ssif_info->max_recv_msg_size > 32)
1386 ssif_info->max_recv_msg_size = 32;
1387 break;
1388
1389 case SSIF_MULTI_2_PART:
1390 if (ssif_info->max_xmit_msg_size > 64)
1391 ssif_info->max_xmit_msg_size = 64;
1392 if (ssif_info->max_recv_msg_size > 62)
1393 ssif_info->max_recv_msg_size = 62;
1394 break;
1395
1396 case SSIF_MULTI_n_PART:
1397 break;
1398
1399 default:
1400 /* Data is not sane, just give up. */
1401 goto no_support;
1402 }
1403 } else {
1404 no_support:
1405 /* Assume no multi-part or PEC support */
1406 pr_info(PFX "Error fetching SSIF: %d %d %2.2x, your system probably doesn't support this command so using defaults\n",
1407 rv, len, resp[2]);
1408
1409 ssif_info->max_xmit_msg_size = 32;
1410 ssif_info->max_recv_msg_size = 32;
1411 ssif_info->multi_support = SSIF_NO_MULTI;
1412 ssif_info->supports_pec = 0;
1413 }
1414
1415 /* Make sure the NMI timeout is cleared. */
1416 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1417 msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
1418 msg[2] = WDT_PRE_TIMEOUT_INT;
1419 rv = do_cmd(client, 3, msg, &len, resp);
1420 if (rv || (len < 3) || (resp[2] != 0))
1421 pr_warn(PFX "Unable to clear message flags: %d %d %2.2x\n",
1422 rv, len, resp[2]);
1423
1424 /* Attempt to enable the event buffer. */
1425 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1426 msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
1427 rv = do_cmd(client, 2, msg, &len, resp);
1428 if (rv || (len < 4) || (resp[2] != 0)) {
1429 pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
1430 rv, len, resp[2]);
1431 rv = 0; /* Not fatal */
1432 goto found;
1433 }
1434
1435 if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
1436 ssif_info->has_event_buffer = true;
1437 /* buffer is already enabled, nothing to do. */
1438 goto found;
1439 }
1440
1441 msg[0] = IPMI_NETFN_APP_REQUEST << 2;
1442 msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
1443 msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
1444 rv = do_cmd(client, 3, msg, &len, resp);
1445 if (rv || (len < 2)) {
1446 pr_warn(PFX "Error getting global enables: %d %d %2.2x\n",
1447 rv, len, resp[2]);
1448 rv = 0; /* Not fatal */
1449 goto found;
1450 }
1451
1452 if (resp[2] == 0)
1453 /* A successful return means the event buffer is supported. */
1454 ssif_info->has_event_buffer = true;
1455
1456 found:
1457 ssif_info->intf_num = atomic_inc_return(&next_intf);
1458
1459 if (ssif_dbg_probe) {
1460 pr_info("ssif_probe: i2c_probe found device at i2c address %x\n",
1461 client->addr);
1462 }
1463
1464 spin_lock_init(&ssif_info->lock);
1465 ssif_info->ssif_state = SSIF_NORMAL;
1466 init_timer(&ssif_info->retry_timer);
1467 ssif_info->retry_timer.data = (unsigned long) ssif_info;
1468 ssif_info->retry_timer.function = retry_timeout;
1469
1470 for (i = 0; i < SSIF_NUM_STATS; i++)
1471 atomic_set(&ssif_info->stats[i], 0);
1472
1473 if (ssif_info->supports_pec)
1474 ssif_info->client->flags |= I2C_CLIENT_PEC;
1475
1476 ssif_info->handlers.owner = THIS_MODULE;
1477 ssif_info->handlers.start_processing = ssif_start_processing;
1478 ssif_info->handlers.get_smi_info = get_smi_info;
1479 ssif_info->handlers.sender = sender;
1480 ssif_info->handlers.request_events = request_events;
1481 ssif_info->handlers.inc_usecount = inc_usecount;
1482 ssif_info->handlers.dec_usecount = dec_usecount;
1483
1484 {
1485 unsigned int thread_num;
1486
1487 thread_num = ((ssif_info->client->adapter->nr << 8) |
1488 ssif_info->client->addr);
1489 init_completion(&ssif_info->wake_thread);
1490 ssif_info->thread = kthread_run(ipmi_ssif_thread, ssif_info,
1491 "kssif%4.4x", thread_num);
1492 if (IS_ERR(ssif_info->thread)) {
1493 rv = PTR_ERR(ssif_info->thread);
1494 dev_notice(&ssif_info->client->dev,
1495 "Could not start kernel thread: error %d\n",
1496 rv);
1497 goto out;
1498 }
1499 }
1500
1501 rv = ipmi_register_smi(&ssif_info->handlers,
1502 ssif_info,
1503 &ssif_info->device_id,
1504 &ssif_info->client->dev,
1505 slave_addr);
1506 if (rv) {
1507 pr_err(PFX "Unable to register device: error %d\n", rv);
1508 goto out;
1509 }
1510
1511 rv = ipmi_smi_add_proc_entry(ssif_info->intf, "type",
1512 &smi_type_proc_ops,
1513 ssif_info);
1514 if (rv) {
1515 pr_err(PFX "Unable to create proc entry: %d\n", rv);
1516 goto out_err_unreg;
1517 }
1518
1519 rv = ipmi_smi_add_proc_entry(ssif_info->intf, "ssif_stats",
1520 &smi_stats_proc_ops,
1521 ssif_info);
1522 if (rv) {
1523 pr_err(PFX "Unable to create proc entry: %d\n", rv);
1524 goto out_err_unreg;
1525 }
1526
1527 out:
1528 if (rv)
1529 kfree(ssif_info);
1530 kfree(resp);
1531 return rv;
1532
1533 out_err_unreg:
1534 ipmi_unregister_smi(ssif_info->intf);
1535 goto out;
1536}
1537
1538static int ssif_adapter_handler(struct device *adev, void *opaque)
1539{
1540 struct ssif_addr_info *addr_info = opaque;
1541
1542 if (adev->type != &i2c_adapter_type)
1543 return 0;
1544
1545 i2c_new_device(to_i2c_adapter(adev), &addr_info->binfo);
1546
1547 if (!addr_info->adapter_name)
1548 return 1; /* Only try the first I2C adapter by default. */
1549 return 0;
1550}
1551
1552static int new_ssif_client(int addr, char *adapter_name,
1553 int debug, int slave_addr,
1554 enum ipmi_addr_src addr_src)
1555{
1556 struct ssif_addr_info *addr_info;
1557 int rv = 0;
1558
1559 mutex_lock(&ssif_infos_mutex);
1560 if (ssif_info_find(addr, adapter_name, false)) {
1561 rv = -EEXIST;
1562 goto out_unlock;
1563 }
1564
1565 addr_info = kzalloc(sizeof(*addr_info), GFP_KERNEL);
1566 if (!addr_info) {
1567 rv = -ENOMEM;
1568 goto out_unlock;
1569 }
1570
1571 if (adapter_name) {
1572 addr_info->adapter_name = kstrdup(adapter_name, GFP_KERNEL);
1573 if (!addr_info->adapter_name) {
1574 kfree(addr_info);
1575 rv = -ENOMEM;
1576 goto out_unlock;
1577 }
1578 }
1579
1580 strncpy(addr_info->binfo.type, DEVICE_NAME,
1581 sizeof(addr_info->binfo.type));
1582 addr_info->binfo.addr = addr;
1583 addr_info->binfo.platform_data = addr_info;
1584 addr_info->debug = debug;
1585 addr_info->slave_addr = slave_addr;
1586 addr_info->addr_src = addr_src;
1587
1588 list_add_tail(&addr_info->link, &ssif_infos);
1589
1590 if (initialized)
1591 i2c_for_each_dev(addr_info, ssif_adapter_handler);
1592 /* Otherwise address list will get it */
1593
1594out_unlock:
1595 mutex_unlock(&ssif_infos_mutex);
1596 return rv;
1597}
1598
1599static void free_ssif_clients(void)
1600{
1601 struct ssif_addr_info *info, *tmp;
1602
1603 mutex_lock(&ssif_infos_mutex);
1604 list_for_each_entry_safe(info, tmp, &ssif_infos, link) {
1605 list_del(&info->link);
1606 kfree(info->adapter_name);
1607 kfree(info);
1608 }
1609 mutex_unlock(&ssif_infos_mutex);
1610}
1611
1612static unsigned short *ssif_address_list(void)
1613{
1614 struct ssif_addr_info *info;
1615 unsigned int count = 0, i;
1616 unsigned short *address_list;
1617
1618 list_for_each_entry(info, &ssif_infos, link)
1619 count++;
1620
1621 address_list = kzalloc(sizeof(*address_list) * (count + 1), GFP_KERNEL);
1622 if (!address_list)
1623 return NULL;
1624
1625 i = 0;
1626 list_for_each_entry(info, &ssif_infos, link) {
1627 unsigned short addr = info->binfo.addr;
1628 int j;
1629
1630 for (j = 0; j < i; j++) {
1631 if (address_list[j] == addr)
1632 goto skip_addr;
1633 }
1634 address_list[i] = addr;
1635skip_addr:
1636 i++;
1637 }
1638 address_list[i] = I2C_CLIENT_END;
1639
1640 return address_list;
1641}
1642
1643#ifdef CONFIG_ACPI
1644static struct acpi_device_id ssif_acpi_match[] = {
1645 { "IPI0001", 0 },
1646 { },
1647};
1648MODULE_DEVICE_TABLE(acpi, ssif_acpi_match);
1649
1650/*
1651 * Once we get an ACPI failure, we don't try any more, because we go
1652 * through the tables sequentially. Once we don't find a table, there
1653 * are no more.
1654 */
1655static int acpi_failure;
1656
1657/*
1658 * Defined in the IPMI 2.0 spec.
1659 */
1660struct SPMITable {
1661 s8 Signature[4];
1662 u32 Length;
1663 u8 Revision;
1664 u8 Checksum;
1665 s8 OEMID[6];
1666 s8 OEMTableID[8];
1667 s8 OEMRevision[4];
1668 s8 CreatorID[4];
1669 s8 CreatorRevision[4];
1670 u8 InterfaceType;
1671 u8 IPMIlegacy;
1672 s16 SpecificationRevision;
1673
1674 /*
1675 * Bit 0 - SCI interrupt supported
1676 * Bit 1 - I/O APIC/SAPIC
1677 */
1678 u8 InterruptType;
1679
1680 /*
1681 * If bit 0 of InterruptType is set, then this is the SCI
1682 * interrupt in the GPEx_STS register.
1683 */
1684 u8 GPE;
1685
1686 s16 Reserved;
1687
1688 /*
1689 * If bit 1 of InterruptType is set, then this is the I/O
1690 * APIC/SAPIC interrupt.
1691 */
1692 u32 GlobalSystemInterrupt;
1693
1694 /* The actual register address. */
1695 struct acpi_generic_address addr;
1696
1697 u8 UID[4];
1698
1699 s8 spmi_id[1]; /* A '\0' terminated array starts here. */
1700};
1701
1702static int try_init_spmi(struct SPMITable *spmi)
1703{
1704 unsigned short myaddr;
1705
1706 if (num_addrs >= MAX_SSIF_BMCS)
1707 return -1;
1708
1709 if (spmi->IPMIlegacy != 1) {
1710 pr_warn("IPMI: Bad SPMI legacy: %d\n", spmi->IPMIlegacy);
1711 return -ENODEV;
1712 }
1713
1714 if (spmi->InterfaceType != 4)
1715 return -ENODEV;
1716
1717 if (spmi->addr.space_id != ACPI_ADR_SPACE_SMBUS) {
1718 pr_warn(PFX "Invalid ACPI SSIF I/O Address type: %d\n",
1719 spmi->addr.space_id);
1720 return -EIO;
1721 }
1722
1723 myaddr = spmi->addr.address >> 1;
1724
1725 return new_ssif_client(myaddr, NULL, 0, 0, SI_SPMI);
1726}
1727
1728static void spmi_find_bmc(void)
1729{
1730 acpi_status status;
1731 struct SPMITable *spmi;
1732 int i;
1733
1734 if (acpi_disabled)
1735 return;
1736
1737 if (acpi_failure)
1738 return;
1739
1740 for (i = 0; ; i++) {
1741 status = acpi_get_table(ACPI_SIG_SPMI, i+1,
1742 (struct acpi_table_header **)&spmi);
1743 if (status != AE_OK)
1744 return;
1745
1746 try_init_spmi(spmi);
1747 }
1748}
1749#else
1750static void spmi_find_bmc(void) { }
1751#endif
1752
1753#ifdef CONFIG_DMI
1754static int decode_dmi(const struct dmi_device *dmi_dev)
1755{
1756 struct dmi_header *dm = dmi_dev->device_data;
1757 u8 *data = (u8 *) dm;
1758 u8 len = dm->length;
1759 unsigned short myaddr;
1760 int slave_addr;
1761
1762 if (num_addrs >= MAX_SSIF_BMCS)
1763 return -1;
1764
1765 if (len < 9)
1766 return -1;
1767
1768 if (data[0x04] != 4) /* Not SSIF */
1769 return -1;
1770
1771 if ((data[8] >> 1) == 0) {
1772 /*
1773 * Some broken systems put the I2C address in
1774 * the slave address field. We try to
1775 * accommodate them here.
1776 */
1777 myaddr = data[6] >> 1;
1778 slave_addr = 0;
1779 } else {
1780 myaddr = data[8] >> 1;
1781 slave_addr = data[6];
1782 }
1783
1784 return new_ssif_client(myaddr, NULL, 0, 0, SI_SMBIOS);
1785}
1786
1787static void dmi_iterator(void)
1788{
1789 const struct dmi_device *dev = NULL;
1790
1791 while ((dev = dmi_find_device(DMI_DEV_TYPE_IPMI, NULL, dev)))
1792 decode_dmi(dev);
1793}
1794#else
1795static void dmi_iterator(void) { }
1796#endif
1797
1798static const struct i2c_device_id ssif_id[] = {
1799 { DEVICE_NAME, 0 },
1800 { }
1801};
1802MODULE_DEVICE_TABLE(i2c, ssif_id);
1803
1804static struct i2c_driver ssif_i2c_driver = {
1805 .class = I2C_CLASS_HWMON,
1806 .driver = {
1807 .owner = THIS_MODULE,
1808 .name = DEVICE_NAME
1809 },
1810 .probe = ssif_probe,
1811 .remove = ssif_remove,
1812 .id_table = ssif_id,
1813 .detect = ssif_detect
1814};
1815
1816static int init_ipmi_ssif(void)
1817{
1818 int i;
1819 int rv;
1820
1821 if (initialized)
1822 return 0;
1823
1824 pr_info("IPMI SSIF Interface driver\n");
1825
1826 /* build list for i2c from addr list */
1827 for (i = 0; i < num_addrs; i++) {
1828 rv = new_ssif_client(addr[i], adapter_name[i],
1829 dbg[i], slave_addrs[i],
1830 SI_HARDCODED);
1831 if (!rv)
1832 pr_err(PFX
1833 "Couldn't add hardcoded device at addr 0x%x\n",
1834 addr[i]);
1835 }
1836
1837 if (ssif_tryacpi)
1838 ssif_i2c_driver.driver.acpi_match_table =
1839 ACPI_PTR(ssif_acpi_match);
1840 if (ssif_trydmi)
1841 dmi_iterator();
1842 if (ssif_tryacpi)
1843 spmi_find_bmc();
1844
1845 ssif_i2c_driver.address_list = ssif_address_list();
1846
1847 rv = i2c_add_driver(&ssif_i2c_driver);
1848 if (!rv)
1849 initialized = true;
1850
1851 return rv;
1852}
1853module_init(init_ipmi_ssif);
1854
1855static void cleanup_ipmi_ssif(void)
1856{
1857 if (!initialized)
1858 return;
1859
1860 initialized = false;
1861
1862 i2c_del_driver(&ssif_i2c_driver);
1863
1864 free_ssif_clients();
1865}
1866module_exit(cleanup_ipmi_ssif);
1867
1868MODULE_AUTHOR("Todd C Davis <todd.c.davis@intel.com>, Corey Minyard <minyard@acm.org>");
1869MODULE_DESCRIPTION("IPMI driver for management controllers on a SMBus");
1870MODULE_LICENSE("GPL");
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 455fd17d938e..3f44f292d066 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -28,7 +28,7 @@ config COMMON_CLK_WM831X
28 depends on MFD_WM831X 28 depends on MFD_WM831X
29 ---help--- 29 ---help---
30 Supports the clocking subsystem of the WM831x/2x series of 30 Supports the clocking subsystem of the WM831x/2x series of
31 PMICs from Wolfson Microlectronics. 31 PMICs from Wolfson Microelectronics.
32 32
33source "drivers/clk/versatile/Kconfig" 33source "drivers/clk/versatile/Kconfig"
34 34
diff --git a/drivers/clk/clk-ls1x.c b/drivers/clk/clk-ls1x.c
index f20b750235f6..ca80103ac188 100644
--- a/drivers/clk/clk-ls1x.c
+++ b/drivers/clk/clk-ls1x.c
@@ -15,7 +15,8 @@
15 15
16#include <loongson1.h> 16#include <loongson1.h>
17 17
18#define OSC 33 18#define OSC (33 * 1000000)
19#define DIV_APB 2
19 20
20static DEFINE_SPINLOCK(_lock); 21static DEFINE_SPINLOCK(_lock);
21 22
@@ -29,13 +30,12 @@ static void ls1x_pll_clk_disable(struct clk_hw *hw)
29} 30}
30 31
31static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw, 32static unsigned long ls1x_pll_recalc_rate(struct clk_hw *hw,
32 unsigned long parent_rate) 33 unsigned long parent_rate)
33{ 34{
34 u32 pll, rate; 35 u32 pll, rate;
35 36
36 pll = __raw_readl(LS1X_CLK_PLL_FREQ); 37 pll = __raw_readl(LS1X_CLK_PLL_FREQ);
37 rate = ((12 + (pll & 0x3f)) * 1000000) + 38 rate = 12 + (pll & 0x3f) + (((pll >> 8) & 0x3ff) >> 10);
38 ((((pll >> 8) & 0x3ff) * 1000000) >> 10);
39 rate *= OSC; 39 rate *= OSC;
40 rate >>= 1; 40 rate >>= 1;
41 41
@@ -48,8 +48,10 @@ static const struct clk_ops ls1x_pll_clk_ops = {
48 .recalc_rate = ls1x_pll_recalc_rate, 48 .recalc_rate = ls1x_pll_recalc_rate,
49}; 49};
50 50
51static struct clk * __init clk_register_pll(struct device *dev, 51static struct clk *__init clk_register_pll(struct device *dev,
52 const char *name, const char *parent_name, unsigned long flags) 52 const char *name,
53 const char *parent_name,
54 unsigned long flags)
53{ 55{
54 struct clk_hw *hw; 56 struct clk_hw *hw;
55 struct clk *clk; 57 struct clk *clk;
@@ -78,34 +80,83 @@ static struct clk * __init clk_register_pll(struct device *dev,
78 return clk; 80 return clk;
79} 81}
80 82
83static const char const *cpu_parents[] = { "cpu_clk_div", "osc_33m_clk", };
84static const char const *ahb_parents[] = { "ahb_clk_div", "osc_33m_clk", };
85static const char const *dc_parents[] = { "dc_clk_div", "osc_33m_clk", };
86
81void __init ls1x_clk_init(void) 87void __init ls1x_clk_init(void)
82{ 88{
83 struct clk *clk; 89 struct clk *clk;
84 90
85 clk = clk_register_pll(NULL, "pll_clk", NULL, CLK_IS_ROOT); 91 clk = clk_register_fixed_rate(NULL, "osc_33m_clk", NULL, CLK_IS_ROOT,
86 clk_prepare_enable(clk); 92 OSC);
87 93 clk_register_clkdev(clk, "osc_33m_clk", NULL);
88 clk = clk_register_divider(NULL, "cpu_clk", "pll_clk", 94
89 CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_CPU_SHIFT, 95 /* clock derived from 33 MHz OSC clk */
90 DIV_CPU_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock); 96 clk = clk_register_pll(NULL, "pll_clk", "osc_33m_clk", 0);
91 clk_prepare_enable(clk); 97 clk_register_clkdev(clk, "pll_clk", NULL);
92 clk_register_clkdev(clk, "cpu", NULL); 98
93 99 /* clock derived from PLL clk */
94 clk = clk_register_divider(NULL, "dc_clk", "pll_clk", 100 /* _____
95 CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT, 101 * _______________________| |
96 DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock); 102 * OSC ___/ | MUX |___ CPU CLK
97 clk_prepare_enable(clk); 103 * \___ PLL ___ CPU DIV ___| |
98 clk_register_clkdev(clk, "dc", NULL); 104 * |_____|
99 105 */
100 clk = clk_register_divider(NULL, "ahb_clk", "pll_clk", 106 clk = clk_register_divider(NULL, "cpu_clk_div", "pll_clk",
101 CLK_SET_RATE_PARENT, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT, 107 CLK_GET_RATE_NOCACHE, LS1X_CLK_PLL_DIV,
102 DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock); 108 DIV_CPU_SHIFT, DIV_CPU_WIDTH,
103 clk_prepare_enable(clk); 109 CLK_DIVIDER_ONE_BASED |
104 clk_register_clkdev(clk, "ahb", NULL); 110 CLK_DIVIDER_ROUND_CLOSEST, &_lock);
111 clk_register_clkdev(clk, "cpu_clk_div", NULL);
112 clk = clk_register_mux(NULL, "cpu_clk", cpu_parents,
113 ARRAY_SIZE(cpu_parents),
114 CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
115 BYPASS_CPU_SHIFT, BYPASS_CPU_WIDTH, 0, &_lock);
116 clk_register_clkdev(clk, "cpu_clk", NULL);
117
118 /* _____
119 * _______________________| |
120 * OSC ___/ | MUX |___ DC CLK
121 * \___ PLL ___ DC DIV ___| |
122 * |_____|
123 */
124 clk = clk_register_divider(NULL, "dc_clk_div", "pll_clk",
125 0, LS1X_CLK_PLL_DIV, DIV_DC_SHIFT,
126 DIV_DC_WIDTH, CLK_DIVIDER_ONE_BASED, &_lock);
127 clk_register_clkdev(clk, "dc_clk_div", NULL);
128 clk = clk_register_mux(NULL, "dc_clk", dc_parents,
129 ARRAY_SIZE(dc_parents),
130 CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
131 BYPASS_DC_SHIFT, BYPASS_DC_WIDTH, 0, &_lock);
132 clk_register_clkdev(clk, "dc_clk", NULL);
133
134 /* _____
135 * _______________________| |
136 * OSC ___/ | MUX |___ DDR CLK
137 * \___ PLL ___ DDR DIV ___| |
138 * |_____|
139 */
140 clk = clk_register_divider(NULL, "ahb_clk_div", "pll_clk",
141 0, LS1X_CLK_PLL_DIV, DIV_DDR_SHIFT,
142 DIV_DDR_WIDTH, CLK_DIVIDER_ONE_BASED,
143 &_lock);
144 clk_register_clkdev(clk, "ahb_clk_div", NULL);
145 clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
146 ARRAY_SIZE(ahb_parents),
147 CLK_SET_RATE_NO_REPARENT, LS1X_CLK_PLL_DIV,
148 BYPASS_DDR_SHIFT, BYPASS_DDR_WIDTH, 0, &_lock);
149 clk_register_clkdev(clk, "ahb_clk", NULL);
105 clk_register_clkdev(clk, "stmmaceth", NULL); 150 clk_register_clkdev(clk, "stmmaceth", NULL);
106 151
107 clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1, 2); 152 /* clock derived from AHB clk */
108 clk_prepare_enable(clk); 153 /* APB clk is always half of the AHB clk */
109 clk_register_clkdev(clk, "apb", NULL); 154 clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
155 DIV_APB);
156 clk_register_clkdev(clk, "apb_clk", NULL);
157 clk_register_clkdev(clk, "ls1x_i2c", NULL);
158 clk_register_clkdev(clk, "ls1x_pwmtimer", NULL);
159 clk_register_clkdev(clk, "ls1x_spi", NULL);
160 clk_register_clkdev(clk, "ls1x_wdt", NULL);
110 clk_register_clkdev(clk, "serial8250", NULL); 161 clk_register_clkdev(clk, "serial8250", NULL);
111} 162}
diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig
index f657a48d20eb..fc01ec27d3c8 100644
--- a/drivers/clocksource/Kconfig
+++ b/drivers/clocksource/Kconfig
@@ -224,4 +224,9 @@ config CLKSRC_VERSATILE
224 ARM Versatile, RealView and Versatile Express reference 224 ARM Versatile, RealView and Versatile Express reference
225 platforms. 225 platforms.
226 226
227config CLKSRC_MIPS_GIC
228 bool
229 depends on MIPS_GIC
230 select CLKSRC_OF
231
227endmenu 232endmenu
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index fae0435cc23d..94d90b24b56b 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -47,3 +47,4 @@ obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST) += dummy_timer.o
47obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o 47obj-$(CONFIG_ARCH_KEYSTONE) += timer-keystone.o
48obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o 48obj-$(CONFIG_ARCH_INTEGRATOR_AP) += timer-integrator-ap.o
49obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o 49obj-$(CONFIG_CLKSRC_VERSATILE) += versatile.o
50obj-$(CONFIG_CLKSRC_MIPS_GIC) += mips-gic-timer.o
diff --git a/drivers/clocksource/mips-gic-timer.c b/drivers/clocksource/mips-gic-timer.c
new file mode 100644
index 000000000000..3bd31b1321f6
--- /dev/null
+++ b/drivers/clocksource/mips-gic-timer.c
@@ -0,0 +1,166 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/clockchips.h>
9#include <linux/cpu.h>
10#include <linux/init.h>
11#include <linux/interrupt.h>
12#include <linux/irqchip/mips-gic.h>
13#include <linux/notifier.h>
14#include <linux/of_irq.h>
15#include <linux/percpu.h>
16#include <linux/smp.h>
17#include <linux/time.h>
18
19static DEFINE_PER_CPU(struct clock_event_device, gic_clockevent_device);
20static int gic_timer_irq;
21static unsigned int gic_frequency;
22
23static int gic_next_event(unsigned long delta, struct clock_event_device *evt)
24{
25 u64 cnt;
26 int res;
27
28 cnt = gic_read_count();
29 cnt += (u64)delta;
30 gic_write_cpu_compare(cnt, cpumask_first(evt->cpumask));
31 res = ((int)(gic_read_count() - cnt) >= 0) ? -ETIME : 0;
32 return res;
33}
34
35static void gic_set_clock_mode(enum clock_event_mode mode,
36 struct clock_event_device *evt)
37{
38 /* Nothing to do ... */
39}
40
41static irqreturn_t gic_compare_interrupt(int irq, void *dev_id)
42{
43 struct clock_event_device *cd = dev_id;
44
45 gic_write_compare(gic_read_compare());
46 cd->event_handler(cd);
47 return IRQ_HANDLED;
48}
49
50struct irqaction gic_compare_irqaction = {
51 .handler = gic_compare_interrupt,
52 .percpu_dev_id = &gic_clockevent_device,
53 .flags = IRQF_PERCPU | IRQF_TIMER,
54 .name = "timer",
55};
56
57static void gic_clockevent_cpu_init(struct clock_event_device *cd)
58{
59 unsigned int cpu = smp_processor_id();
60
61 cd->name = "MIPS GIC";
62 cd->features = CLOCK_EVT_FEAT_ONESHOT |
63 CLOCK_EVT_FEAT_C3STOP;
64
65 cd->rating = 350;
66 cd->irq = gic_timer_irq;
67 cd->cpumask = cpumask_of(cpu);
68 cd->set_next_event = gic_next_event;
69 cd->set_mode = gic_set_clock_mode;
70
71 clockevents_config_and_register(cd, gic_frequency, 0x300, 0x7fffffff);
72
73 enable_percpu_irq(gic_timer_irq, IRQ_TYPE_NONE);
74}
75
76static void gic_clockevent_cpu_exit(struct clock_event_device *cd)
77{
78 disable_percpu_irq(gic_timer_irq);
79}
80
81static int gic_cpu_notifier(struct notifier_block *nb, unsigned long action,
82 void *data)
83{
84 switch (action & ~CPU_TASKS_FROZEN) {
85 case CPU_STARTING:
86 gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
87 break;
88 case CPU_DYING:
89 gic_clockevent_cpu_exit(this_cpu_ptr(&gic_clockevent_device));
90 break;
91 }
92
93 return NOTIFY_OK;
94}
95
96static struct notifier_block gic_cpu_nb = {
97 .notifier_call = gic_cpu_notifier,
98};
99
100static int gic_clockevent_init(void)
101{
102 if (!cpu_has_counter || !gic_frequency)
103 return -ENXIO;
104
105 setup_percpu_irq(gic_timer_irq, &gic_compare_irqaction);
106
107 register_cpu_notifier(&gic_cpu_nb);
108
109 gic_clockevent_cpu_init(this_cpu_ptr(&gic_clockevent_device));
110
111 return 0;
112}
113
114static cycle_t gic_hpt_read(struct clocksource *cs)
115{
116 return gic_read_count();
117}
118
119static struct clocksource gic_clocksource = {
120 .name = "GIC",
121 .read = gic_hpt_read,
122 .flags = CLOCK_SOURCE_IS_CONTINUOUS,
123};
124
125static void __init __gic_clocksource_init(void)
126{
127 /* Set clocksource mask. */
128 gic_clocksource.mask = CLOCKSOURCE_MASK(gic_get_count_width());
129
130 /* Calculate a somewhat reasonable rating value. */
131 gic_clocksource.rating = 200 + gic_frequency / 10000000;
132
133 clocksource_register_hz(&gic_clocksource, gic_frequency);
134
135 gic_clockevent_init();
136}
137
138void __init gic_clocksource_init(unsigned int frequency)
139{
140 gic_frequency = frequency;
141 gic_timer_irq = MIPS_GIC_IRQ_BASE +
142 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_COMPARE);
143
144 __gic_clocksource_init();
145}
146
147static void __init gic_clocksource_of_init(struct device_node *node)
148{
149 if (WARN_ON(!gic_present || !node->parent ||
150 !of_device_is_compatible(node->parent, "mti,gic")))
151 return;
152
153 if (of_property_read_u32(node, "clock-frequency", &gic_frequency)) {
154 pr_err("GIC frequency not specified.\n");
155 return;
156 }
157 gic_timer_irq = irq_of_parse_and_map(node, 0);
158 if (!gic_timer_irq) {
159 pr_err("GIC timer IRQ not specified.\n");
160 return;
161 }
162
163 __gic_clocksource_init();
164}
165CLOCKSOURCE_OF_DECLARE(mips_gic_timer, "mti,gic-timer",
166 gic_clocksource_of_init);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 9bc2720628a4..91ebe282b106 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -197,7 +197,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
197 197
198 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); 198 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk);
199 if (ret) { 199 if (ret) {
200 pr_err("%s: Failed to allocate resources\n: %d", __func__, ret); 200 pr_err("%s: Failed to allocate resources: %d\n", __func__, ret);
201 return ret; 201 return ret;
202 } 202 }
203 203
diff --git a/drivers/dma-buf/fence.c b/drivers/dma-buf/fence.c
index 7bb9d65d9a2c..e5541117b3e9 100644
--- a/drivers/dma-buf/fence.c
+++ b/drivers/dma-buf/fence.c
@@ -283,7 +283,7 @@ EXPORT_SYMBOL(fence_add_callback);
283 * @cb: [in] the callback to remove 283 * @cb: [in] the callback to remove
284 * 284 *
285 * Remove a previously queued callback from the fence. This function returns 285 * Remove a previously queued callback from the fence. This function returns
286 * true if the callback is succesfully removed, or false if the fence has 286 * true if the callback is successfully removed, or false if the fence has
287 * already been signaled. 287 * already been signaled.
288 * 288 *
289 * *WARNING*: 289 * *WARNING*:
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index de469821bc1b..f2b2c4e87aef 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -107,6 +107,13 @@ config AT_HDMAC
107 help 107 help
108 Support the Atmel AHB DMA controller. 108 Support the Atmel AHB DMA controller.
109 109
110config AT_XDMAC
111 tristate "Atmel XDMA support"
112 depends on ARCH_AT91
113 select DMA_ENGINE
114 help
115 Support the Atmel XDMA controller.
116
110config FSL_DMA 117config FSL_DMA
111 tristate "Freescale Elo series DMA support" 118 tristate "Freescale Elo series DMA support"
112 depends on FSL_SOC 119 depends on FSL_SOC
@@ -395,12 +402,12 @@ config XILINX_VDMA
395 402
396config DMA_SUN6I 403config DMA_SUN6I
397 tristate "Allwinner A31 SoCs DMA support" 404 tristate "Allwinner A31 SoCs DMA support"
398 depends on MACH_SUN6I || COMPILE_TEST 405 depends on MACH_SUN6I || MACH_SUN8I || COMPILE_TEST
399 depends on RESET_CONTROLLER 406 depends on RESET_CONTROLLER
400 select DMA_ENGINE 407 select DMA_ENGINE
401 select DMA_VIRTUAL_CHANNELS 408 select DMA_VIRTUAL_CHANNELS
402 help 409 help
403 Support for the DMA engine for Allwinner A31 SoCs. 410 Support for the DMA engine first found in Allwinner A31 SoCs.
404 411
405config NBPFAXI_DMA 412config NBPFAXI_DMA
406 tristate "Renesas Type-AXI NBPF DMA support" 413 tristate "Renesas Type-AXI NBPF DMA support"
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index cb626c179911..2022b5451377 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -16,6 +16,7 @@ obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/
16obj-$(CONFIG_MV_XOR) += mv_xor.o 16obj-$(CONFIG_MV_XOR) += mv_xor.o
17obj-$(CONFIG_DW_DMAC_CORE) += dw/ 17obj-$(CONFIG_DW_DMAC_CORE) += dw/
18obj-$(CONFIG_AT_HDMAC) += at_hdmac.o 18obj-$(CONFIG_AT_HDMAC) += at_hdmac.o
19obj-$(CONFIG_AT_XDMAC) += at_xdmac.o
19obj-$(CONFIG_MX3_IPU) += ipu/ 20obj-$(CONFIG_MX3_IPU) += ipu/
20obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o 21obj-$(CONFIG_TXX9_DMAC) += txx9dmac.o
21obj-$(CONFIG_SH_DMAE_BASE) += sh/ 22obj-$(CONFIG_SH_DMAE_BASE) += sh/
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index e34024b000a4..1364d00881dd 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -2164,7 +2164,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2164 __func__, ret); 2164 __func__, ret);
2165 goto out_no_memcpy; 2165 goto out_no_memcpy;
2166 } 2166 }
2167 pl08x->memcpy.chancnt = ret;
2168 2167
2169 /* Register slave channels */ 2168 /* Register slave channels */
2170 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave, 2169 ret = pl08x_dma_init_virtual_channels(pl08x, &pl08x->slave,
@@ -2175,7 +2174,6 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
2175 __func__, ret); 2174 __func__, ret);
2176 goto out_no_slave; 2175 goto out_no_slave;
2177 } 2176 }
2178 pl08x->slave.chancnt = ret;
2179 2177
2180 ret = dma_async_device_register(&pl08x->memcpy); 2178 ret = dma_async_device_register(&pl08x->memcpy);
2181 if (ret) { 2179 if (ret) {
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
new file mode 100644
index 000000000000..b60d77a22df6
--- /dev/null
+++ b/drivers/dma/at_xdmac.c
@@ -0,0 +1,1524 @@
1/*
2 * Driver for the Atmel Extensible DMA Controller (aka XDMAC on AT91 systems)
3 *
4 * Copyright (C) 2014 Atmel Corporation
5 *
6 * Author: Ludovic Desroches <ludovic.desroches@atmel.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published by
10 * the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program. If not, see <http://www.gnu.org/licenses/>.
19 */
20
21#include <asm/barrier.h>
22#include <dt-bindings/dma/at91.h>
23#include <linux/clk.h>
24#include <linux/dmaengine.h>
25#include <linux/dmapool.h>
26#include <linux/interrupt.h>
27#include <linux/irq.h>
28#include <linux/list.h>
29#include <linux/module.h>
30#include <linux/of_dma.h>
31#include <linux/of_platform.h>
32#include <linux/platform_device.h>
33#include <linux/pm.h>
34
35#include "dmaengine.h"
36
37/* Global registers */
38#define AT_XDMAC_GTYPE 0x00 /* Global Type Register */
39#define AT_XDMAC_NB_CH(i) (((i) & 0x1F) + 1) /* Number of Channels Minus One */
40#define AT_XDMAC_FIFO_SZ(i) (((i) >> 5) & 0x7FF) /* Number of Bytes */
41#define AT_XDMAC_NB_REQ(i) ((((i) >> 16) & 0x3F) + 1) /* Number of Peripheral Requests Minus One */
42#define AT_XDMAC_GCFG 0x04 /* Global Configuration Register */
43#define AT_XDMAC_GWAC 0x08 /* Global Weighted Arbiter Configuration Register */
44#define AT_XDMAC_GIE 0x0C /* Global Interrupt Enable Register */
45#define AT_XDMAC_GID 0x10 /* Global Interrupt Disable Register */
46#define AT_XDMAC_GIM 0x14 /* Global Interrupt Mask Register */
47#define AT_XDMAC_GIS 0x18 /* Global Interrupt Status Register */
48#define AT_XDMAC_GE 0x1C /* Global Channel Enable Register */
49#define AT_XDMAC_GD 0x20 /* Global Channel Disable Register */
50#define AT_XDMAC_GS 0x24 /* Global Channel Status Register */
51#define AT_XDMAC_GRS 0x28 /* Global Channel Read Suspend Register */
52#define AT_XDMAC_GWS 0x2C /* Global Write Suspend Register */
53#define AT_XDMAC_GRWS 0x30 /* Global Channel Read Write Suspend Register */
54#define AT_XDMAC_GRWR 0x34 /* Global Channel Read Write Resume Register */
55#define AT_XDMAC_GSWR 0x38 /* Global Channel Software Request Register */
56#define AT_XDMAC_GSWS 0x3C /* Global channel Software Request Status Register */
57#define AT_XDMAC_GSWF 0x40 /* Global Channel Software Flush Request Register */
58#define AT_XDMAC_VERSION 0xFFC /* XDMAC Version Register */
59
60/* Channel relative registers offsets */
61#define AT_XDMAC_CIE 0x00 /* Channel Interrupt Enable Register */
62#define AT_XDMAC_CIE_BIE BIT(0) /* End of Block Interrupt Enable Bit */
63#define AT_XDMAC_CIE_LIE BIT(1) /* End of Linked List Interrupt Enable Bit */
64#define AT_XDMAC_CIE_DIE BIT(2) /* End of Disable Interrupt Enable Bit */
65#define AT_XDMAC_CIE_FIE BIT(3) /* End of Flush Interrupt Enable Bit */
66#define AT_XDMAC_CIE_RBEIE BIT(4) /* Read Bus Error Interrupt Enable Bit */
67#define AT_XDMAC_CIE_WBEIE BIT(5) /* Write Bus Error Interrupt Enable Bit */
68#define AT_XDMAC_CIE_ROIE BIT(6) /* Request Overflow Interrupt Enable Bit */
69#define AT_XDMAC_CID 0x04 /* Channel Interrupt Disable Register */
70#define AT_XDMAC_CID_BID BIT(0) /* End of Block Interrupt Disable Bit */
71#define AT_XDMAC_CID_LID BIT(1) /* End of Linked List Interrupt Disable Bit */
72#define AT_XDMAC_CID_DID BIT(2) /* End of Disable Interrupt Disable Bit */
73#define AT_XDMAC_CID_FID BIT(3) /* End of Flush Interrupt Disable Bit */
74#define AT_XDMAC_CID_RBEID BIT(4) /* Read Bus Error Interrupt Disable Bit */
75#define AT_XDMAC_CID_WBEID BIT(5) /* Write Bus Error Interrupt Disable Bit */
76#define AT_XDMAC_CID_ROID BIT(6) /* Request Overflow Interrupt Disable Bit */
77#define AT_XDMAC_CIM 0x08 /* Channel Interrupt Mask Register */
78#define AT_XDMAC_CIM_BIM BIT(0) /* End of Block Interrupt Mask Bit */
79#define AT_XDMAC_CIM_LIM BIT(1) /* End of Linked List Interrupt Mask Bit */
80#define AT_XDMAC_CIM_DIM BIT(2) /* End of Disable Interrupt Mask Bit */
81#define AT_XDMAC_CIM_FIM BIT(3) /* End of Flush Interrupt Mask Bit */
82#define AT_XDMAC_CIM_RBEIM BIT(4) /* Read Bus Error Interrupt Mask Bit */
83#define AT_XDMAC_CIM_WBEIM BIT(5) /* Write Bus Error Interrupt Mask Bit */
84#define AT_XDMAC_CIM_ROIM BIT(6) /* Request Overflow Interrupt Mask Bit */
85#define AT_XDMAC_CIS 0x0C /* Channel Interrupt Status Register */
86#define AT_XDMAC_CIS_BIS BIT(0) /* End of Block Interrupt Status Bit */
87#define AT_XDMAC_CIS_LIS BIT(1) /* End of Linked List Interrupt Status Bit */
88#define AT_XDMAC_CIS_DIS BIT(2) /* End of Disable Interrupt Status Bit */
89#define AT_XDMAC_CIS_FIS BIT(3) /* End of Flush Interrupt Status Bit */
90#define AT_XDMAC_CIS_RBEIS BIT(4) /* Read Bus Error Interrupt Status Bit */
91#define AT_XDMAC_CIS_WBEIS BIT(5) /* Write Bus Error Interrupt Status Bit */
92#define AT_XDMAC_CIS_ROIS BIT(6) /* Request Overflow Interrupt Status Bit */
93#define AT_XDMAC_CSA 0x10 /* Channel Source Address Register */
94#define AT_XDMAC_CDA 0x14 /* Channel Destination Address Register */
95#define AT_XDMAC_CNDA 0x18 /* Channel Next Descriptor Address Register */
96#define AT_XDMAC_CNDA_NDAIF(i) ((i) & 0x1) /* Channel x Next Descriptor Interface */
97#define AT_XDMAC_CNDA_NDA(i) ((i) & 0xfffffffc) /* Channel x Next Descriptor Address */
98#define AT_XDMAC_CNDC 0x1C /* Channel Next Descriptor Control Register */
99#define AT_XDMAC_CNDC_NDE (0x1 << 0) /* Channel x Next Descriptor Enable */
100#define AT_XDMAC_CNDC_NDSUP (0x1 << 1) /* Channel x Next Descriptor Source Update */
101#define AT_XDMAC_CNDC_NDDUP (0x1 << 2) /* Channel x Next Descriptor Destination Update */
102#define AT_XDMAC_CNDC_NDVIEW_NDV0 (0x0 << 3) /* Channel x Next Descriptor View 0 */
103#define AT_XDMAC_CNDC_NDVIEW_NDV1 (0x1 << 3) /* Channel x Next Descriptor View 1 */
104#define AT_XDMAC_CNDC_NDVIEW_NDV2 (0x2 << 3) /* Channel x Next Descriptor View 2 */
105#define AT_XDMAC_CNDC_NDVIEW_NDV3 (0x3 << 3) /* Channel x Next Descriptor View 3 */
106#define AT_XDMAC_CUBC 0x20 /* Channel Microblock Control Register */
107#define AT_XDMAC_CBC 0x24 /* Channel Block Control Register */
108#define AT_XDMAC_CC 0x28 /* Channel Configuration Register */
109#define AT_XDMAC_CC_TYPE (0x1 << 0) /* Channel Transfer Type */
110#define AT_XDMAC_CC_TYPE_MEM_TRAN (0x0 << 0) /* Memory to Memory Transfer */
111#define AT_XDMAC_CC_TYPE_PER_TRAN (0x1 << 0) /* Peripheral to Memory or Memory to Peripheral Transfer */
112#define AT_XDMAC_CC_MBSIZE_MASK (0x3 << 1)
113#define AT_XDMAC_CC_MBSIZE_SINGLE (0x0 << 1)
114#define AT_XDMAC_CC_MBSIZE_FOUR (0x1 << 1)
115#define AT_XDMAC_CC_MBSIZE_EIGHT (0x2 << 1)
116#define AT_XDMAC_CC_MBSIZE_SIXTEEN (0x3 << 1)
117#define AT_XDMAC_CC_DSYNC (0x1 << 4) /* Channel Synchronization */
118#define AT_XDMAC_CC_DSYNC_PER2MEM (0x0 << 4)
119#define AT_XDMAC_CC_DSYNC_MEM2PER (0x1 << 4)
120#define AT_XDMAC_CC_PROT (0x1 << 5) /* Channel Protection */
121#define AT_XDMAC_CC_PROT_SEC (0x0 << 5)
122#define AT_XDMAC_CC_PROT_UNSEC (0x1 << 5)
123#define AT_XDMAC_CC_SWREQ (0x1 << 6) /* Channel Software Request Trigger */
124#define AT_XDMAC_CC_SWREQ_HWR_CONNECTED (0x0 << 6)
125#define AT_XDMAC_CC_SWREQ_SWR_CONNECTED (0x1 << 6)
126#define AT_XDMAC_CC_MEMSET (0x1 << 7) /* Channel Fill Block of memory */
127#define AT_XDMAC_CC_MEMSET_NORMAL_MODE (0x0 << 7)
128#define AT_XDMAC_CC_MEMSET_HW_MODE (0x1 << 7)
129#define AT_XDMAC_CC_CSIZE(i) ((0x7 & (i)) << 8) /* Channel Chunk Size */
130#define AT_XDMAC_CC_DWIDTH_OFFSET 11
131#define AT_XDMAC_CC_DWIDTH_MASK (0x3 << AT_XDMAC_CC_DWIDTH_OFFSET)
132#define AT_XDMAC_CC_DWIDTH(i) ((0x3 & (i)) << AT_XDMAC_CC_DWIDTH_OFFSET) /* Channel Data Width */
133#define AT_XDMAC_CC_DWIDTH_BYTE 0x0
134#define AT_XDMAC_CC_DWIDTH_HALFWORD 0x1
135#define AT_XDMAC_CC_DWIDTH_WORD 0x2
136#define AT_XDMAC_CC_DWIDTH_DWORD 0x3
137#define AT_XDMAC_CC_SIF(i) ((0x1 & (i)) << 13) /* Channel Source Interface Identifier */
138#define AT_XDMAC_CC_DIF(i) ((0x1 & (i)) << 14) /* Channel Destination Interface Identifier */
139#define AT_XDMAC_CC_SAM_MASK (0x3 << 16) /* Channel Source Addressing Mode */
140#define AT_XDMAC_CC_SAM_FIXED_AM (0x0 << 16)
141#define AT_XDMAC_CC_SAM_INCREMENTED_AM (0x1 << 16)
142#define AT_XDMAC_CC_SAM_UBS_AM (0x2 << 16)
143#define AT_XDMAC_CC_SAM_UBS_DS_AM (0x3 << 16)
144#define AT_XDMAC_CC_DAM_MASK (0x3 << 18) /* Channel Source Addressing Mode */
145#define AT_XDMAC_CC_DAM_FIXED_AM (0x0 << 18)
146#define AT_XDMAC_CC_DAM_INCREMENTED_AM (0x1 << 18)
147#define AT_XDMAC_CC_DAM_UBS_AM (0x2 << 18)
148#define AT_XDMAC_CC_DAM_UBS_DS_AM (0x3 << 18)
149#define AT_XDMAC_CC_INITD (0x1 << 21) /* Channel Initialization Terminated (read only) */
150#define AT_XDMAC_CC_INITD_TERMINATED (0x0 << 21)
151#define AT_XDMAC_CC_INITD_IN_PROGRESS (0x1 << 21)
152#define AT_XDMAC_CC_RDIP (0x1 << 22) /* Read in Progress (read only) */
153#define AT_XDMAC_CC_RDIP_DONE (0x0 << 22)
154#define AT_XDMAC_CC_RDIP_IN_PROGRESS (0x1 << 22)
155#define AT_XDMAC_CC_WRIP (0x1 << 23) /* Write in Progress (read only) */
156#define AT_XDMAC_CC_WRIP_DONE (0x0 << 23)
157#define AT_XDMAC_CC_WRIP_IN_PROGRESS (0x1 << 23)
158#define AT_XDMAC_CC_PERID(i) (0x7f & (h) << 24) /* Channel Peripheral Identifier */
159#define AT_XDMAC_CDS_MSP 0x2C /* Channel Data Stride Memory Set Pattern */
160#define AT_XDMAC_CSUS 0x30 /* Channel Source Microblock Stride */
161#define AT_XDMAC_CDUS 0x34 /* Channel Destination Microblock Stride */
162
163#define AT_XDMAC_CHAN_REG_BASE 0x50 /* Channel registers base address */
164
165/* Microblock control members */
166#define AT_XDMAC_MBR_UBC_UBLEN_MAX 0xFFFFFFUL /* Maximum Microblock Length */
167#define AT_XDMAC_MBR_UBC_NDE (0x1 << 24) /* Next Descriptor Enable */
168#define AT_XDMAC_MBR_UBC_NSEN (0x1 << 25) /* Next Descriptor Source Update */
169#define AT_XDMAC_MBR_UBC_NDEN (0x1 << 26) /* Next Descriptor Destination Update */
170#define AT_XDMAC_MBR_UBC_NDV0 (0x0 << 27) /* Next Descriptor View 0 */
171#define AT_XDMAC_MBR_UBC_NDV1 (0x1 << 27) /* Next Descriptor View 1 */
172#define AT_XDMAC_MBR_UBC_NDV2 (0x2 << 27) /* Next Descriptor View 2 */
173#define AT_XDMAC_MBR_UBC_NDV3 (0x3 << 27) /* Next Descriptor View 3 */
174
175#define AT_XDMAC_MAX_CHAN 0x20
176
177enum atc_status {
178 AT_XDMAC_CHAN_IS_CYCLIC = 0,
179 AT_XDMAC_CHAN_IS_PAUSED,
180};
181
182/* ----- Channels ----- */
183struct at_xdmac_chan {
184 struct dma_chan chan;
185 void __iomem *ch_regs;
186 u32 mask; /* Channel Mask */
187 u32 cfg[3]; /* Channel Configuration Register */
188 #define AT_XDMAC_CUR_CFG 0 /* Current channel conf */
189 #define AT_XDMAC_DEV_TO_MEM_CFG 1 /* Predifined dev to mem channel conf */
190 #define AT_XDMAC_MEM_TO_DEV_CFG 2 /* Predifined mem to dev channel conf */
191 u8 perid; /* Peripheral ID */
192 u8 perif; /* Peripheral Interface */
193 u8 memif; /* Memory Interface */
194 u32 per_src_addr;
195 u32 per_dst_addr;
196 u32 save_cim;
197 u32 save_cnda;
198 u32 save_cndc;
199 unsigned long status;
200 struct tasklet_struct tasklet;
201
202 spinlock_t lock;
203
204 struct list_head xfers_list;
205 struct list_head free_descs_list;
206};
207
208
209/* ----- Controller ----- */
210struct at_xdmac {
211 struct dma_device dma;
212 void __iomem *regs;
213 int irq;
214 struct clk *clk;
215 u32 save_gim;
216 u32 save_gs;
217 struct dma_pool *at_xdmac_desc_pool;
218 struct at_xdmac_chan chan[0];
219};
220
221
222/* ----- Descriptors ----- */
223
224/* Linked List Descriptor */
225struct at_xdmac_lld {
226 dma_addr_t mbr_nda; /* Next Descriptor Member */
227 u32 mbr_ubc; /* Microblock Control Member */
228 dma_addr_t mbr_sa; /* Source Address Member */
229 dma_addr_t mbr_da; /* Destination Address Member */
230 u32 mbr_cfg; /* Configuration Register */
231};
232
233
234struct at_xdmac_desc {
235 struct at_xdmac_lld lld;
236 enum dma_transfer_direction direction;
237 struct dma_async_tx_descriptor tx_dma_desc;
238 struct list_head desc_node;
239 /* Following members are only used by the first descriptor */
240 bool active_xfer;
241 unsigned int xfer_size;
242 struct list_head descs_list;
243 struct list_head xfer_node;
244};
245
246static inline void __iomem *at_xdmac_chan_reg_base(struct at_xdmac *atxdmac, unsigned int chan_nb)
247{
248 return atxdmac->regs + (AT_XDMAC_CHAN_REG_BASE + chan_nb * 0x40);
249}
250
251#define at_xdmac_read(atxdmac, reg) readl_relaxed((atxdmac)->regs + (reg))
252#define at_xdmac_write(atxdmac, reg, value) \
253 writel_relaxed((value), (atxdmac)->regs + (reg))
254
255#define at_xdmac_chan_read(atchan, reg) readl_relaxed((atchan)->ch_regs + (reg))
256#define at_xdmac_chan_write(atchan, reg, value) writel_relaxed((value), (atchan)->ch_regs + (reg))
257
258static inline struct at_xdmac_chan *to_at_xdmac_chan(struct dma_chan *dchan)
259{
260 return container_of(dchan, struct at_xdmac_chan, chan);
261}
262
263static struct device *chan2dev(struct dma_chan *chan)
264{
265 return &chan->dev->device;
266}
267
268static inline struct at_xdmac *to_at_xdmac(struct dma_device *ddev)
269{
270 return container_of(ddev, struct at_xdmac, dma);
271}
272
273static inline struct at_xdmac_desc *txd_to_at_desc(struct dma_async_tx_descriptor *txd)
274{
275 return container_of(txd, struct at_xdmac_desc, tx_dma_desc);
276}
277
278static inline int at_xdmac_chan_is_cyclic(struct at_xdmac_chan *atchan)
279{
280 return test_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
281}
282
283static inline int at_xdmac_chan_is_paused(struct at_xdmac_chan *atchan)
284{
285 return test_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
286}
287
288static inline int at_xdmac_csize(u32 maxburst)
289{
290 int csize;
291
292 csize = ffs(maxburst) - 1;
293 if (csize > 4)
294 csize = -EINVAL;
295
296 return csize;
297};
298
299static inline u8 at_xdmac_get_dwidth(u32 cfg)
300{
301 return (cfg & AT_XDMAC_CC_DWIDTH_MASK) >> AT_XDMAC_CC_DWIDTH_OFFSET;
302};
303
304static unsigned int init_nr_desc_per_channel = 64;
305module_param(init_nr_desc_per_channel, uint, 0644);
306MODULE_PARM_DESC(init_nr_desc_per_channel,
307 "initial descriptors per channel (default: 64)");
308
309
310static bool at_xdmac_chan_is_enabled(struct at_xdmac_chan *atchan)
311{
312 return at_xdmac_chan_read(atchan, AT_XDMAC_GS) & atchan->mask;
313}
314
315static void at_xdmac_off(struct at_xdmac *atxdmac)
316{
317 at_xdmac_write(atxdmac, AT_XDMAC_GD, -1L);
318
319 /* Wait that all chans are disabled. */
320 while (at_xdmac_read(atxdmac, AT_XDMAC_GS))
321 cpu_relax();
322
323 at_xdmac_write(atxdmac, AT_XDMAC_GID, -1L);
324}
325
326/* Call with lock hold. */
327static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
328 struct at_xdmac_desc *first)
329{
330 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
331 u32 reg;
332
333 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, first);
334
335 if (at_xdmac_chan_is_enabled(atchan))
336 return;
337
338 /* Set transfer as active to not try to start it again. */
339 first->active_xfer = true;
340
341 /* Tell xdmac where to get the first descriptor. */
342 reg = AT_XDMAC_CNDA_NDA(first->tx_dma_desc.phys)
343 | AT_XDMAC_CNDA_NDAIF(atchan->memif);
344 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, reg);
345
346 /*
347 * When doing memory to memory transfer we need to use the next
348 * descriptor view 2 since some fields of the configuration register
349 * depend on transfer size and src/dest addresses.
350 */
351 if (is_slave_direction(first->direction)) {
352 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
353 if (first->direction == DMA_MEM_TO_DEV)
354 atchan->cfg[AT_XDMAC_CUR_CFG] =
355 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
356 else
357 atchan->cfg[AT_XDMAC_CUR_CFG] =
358 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
359 at_xdmac_chan_write(atchan, AT_XDMAC_CC,
360 atchan->cfg[AT_XDMAC_CUR_CFG]);
361 } else {
362 /*
363 * No need to write AT_XDMAC_CC reg, it will be done when the
364 * descriptor is fecthed.
365 */
366 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
367 }
368
369 reg |= AT_XDMAC_CNDC_NDDUP
370 | AT_XDMAC_CNDC_NDSUP
371 | AT_XDMAC_CNDC_NDE;
372 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, reg);
373
374 dev_vdbg(chan2dev(&atchan->chan),
375 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
376 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
377 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
378 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
379 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
380 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
381 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
382
383 at_xdmac_chan_write(atchan, AT_XDMAC_CID, 0xffffffff);
384 reg = AT_XDMAC_CIE_RBEIE | AT_XDMAC_CIE_WBEIE | AT_XDMAC_CIE_ROIE;
385 /*
386 * There is no end of list when doing cyclic dma, we need to get
387 * an interrupt after each periods.
388 */
389 if (at_xdmac_chan_is_cyclic(atchan))
390 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
391 reg | AT_XDMAC_CIE_BIE);
392 else
393 at_xdmac_chan_write(atchan, AT_XDMAC_CIE,
394 reg | AT_XDMAC_CIE_LIE);
395 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atchan->mask);
396 dev_vdbg(chan2dev(&atchan->chan),
397 "%s: enable channel (0x%08x)\n", __func__, atchan->mask);
398 wmb();
399 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
400
401 dev_vdbg(chan2dev(&atchan->chan),
402 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
403 __func__, at_xdmac_chan_read(atchan, AT_XDMAC_CC),
404 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
405 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
406 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
407 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
408 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
409
410}
411
412static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
413{
414 struct at_xdmac_desc *desc = txd_to_at_desc(tx);
415 struct at_xdmac_chan *atchan = to_at_xdmac_chan(tx->chan);
416 dma_cookie_t cookie;
417
418 spin_lock_bh(&atchan->lock);
419 cookie = dma_cookie_assign(tx);
420
421 dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
422 __func__, atchan, desc);
423 list_add_tail(&desc->xfer_node, &atchan->xfers_list);
424 if (list_is_singular(&atchan->xfers_list))
425 at_xdmac_start_xfer(atchan, desc);
426
427 spin_unlock_bh(&atchan->lock);
428 return cookie;
429}
430
431static struct at_xdmac_desc *at_xdmac_alloc_desc(struct dma_chan *chan,
432 gfp_t gfp_flags)
433{
434 struct at_xdmac_desc *desc;
435 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
436 dma_addr_t phys;
437
438 desc = dma_pool_alloc(atxdmac->at_xdmac_desc_pool, gfp_flags, &phys);
439 if (desc) {
440 memset(desc, 0, sizeof(*desc));
441 INIT_LIST_HEAD(&desc->descs_list);
442 dma_async_tx_descriptor_init(&desc->tx_dma_desc, chan);
443 desc->tx_dma_desc.tx_submit = at_xdmac_tx_submit;
444 desc->tx_dma_desc.phys = phys;
445 }
446
447 return desc;
448}
449
450/* Call must be protected by lock. */
451static struct at_xdmac_desc *at_xdmac_get_desc(struct at_xdmac_chan *atchan)
452{
453 struct at_xdmac_desc *desc;
454
455 if (list_empty(&atchan->free_descs_list)) {
456 desc = at_xdmac_alloc_desc(&atchan->chan, GFP_NOWAIT);
457 } else {
458 desc = list_first_entry(&atchan->free_descs_list,
459 struct at_xdmac_desc, desc_node);
460 list_del(&desc->desc_node);
461 desc->active_xfer = false;
462 }
463
464 return desc;
465}
466
467static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
468 struct of_dma *of_dma)
469{
470 struct at_xdmac *atxdmac = of_dma->of_dma_data;
471 struct at_xdmac_chan *atchan;
472 struct dma_chan *chan;
473 struct device *dev = atxdmac->dma.dev;
474
475 if (dma_spec->args_count != 1) {
476 dev_err(dev, "dma phandler args: bad number of args\n");
477 return NULL;
478 }
479
480 chan = dma_get_any_slave_channel(&atxdmac->dma);
481 if (!chan) {
482 dev_err(dev, "can't get a dma channel\n");
483 return NULL;
484 }
485
486 atchan = to_at_xdmac_chan(chan);
487 atchan->memif = AT91_XDMAC_DT_GET_MEM_IF(dma_spec->args[0]);
488 atchan->perif = AT91_XDMAC_DT_GET_PER_IF(dma_spec->args[0]);
489 atchan->perid = AT91_XDMAC_DT_GET_PERID(dma_spec->args[0]);
490 dev_dbg(dev, "chan dt cfg: memif=%u perif=%u perid=%u\n",
491 atchan->memif, atchan->perif, atchan->perid);
492
493 return chan;
494}
495
496static int at_xdmac_set_slave_config(struct dma_chan *chan,
497 struct dma_slave_config *sconfig)
498{
499 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
500 u8 dwidth;
501 int csize;
502
503 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
504 AT91_XDMAC_DT_PERID(atchan->perid)
505 | AT_XDMAC_CC_DAM_INCREMENTED_AM
506 | AT_XDMAC_CC_SAM_FIXED_AM
507 | AT_XDMAC_CC_DIF(atchan->memif)
508 | AT_XDMAC_CC_SIF(atchan->perif)
509 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
510 | AT_XDMAC_CC_DSYNC_PER2MEM
511 | AT_XDMAC_CC_MBSIZE_SIXTEEN
512 | AT_XDMAC_CC_TYPE_PER_TRAN;
513 csize = at_xdmac_csize(sconfig->src_maxburst);
514 if (csize < 0) {
515 dev_err(chan2dev(chan), "invalid src maxburst value\n");
516 return -EINVAL;
517 }
518 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
519 dwidth = ffs(sconfig->src_addr_width) - 1;
520 atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
521
522
523 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
524 AT91_XDMAC_DT_PERID(atchan->perid)
525 | AT_XDMAC_CC_DAM_FIXED_AM
526 | AT_XDMAC_CC_SAM_INCREMENTED_AM
527 | AT_XDMAC_CC_DIF(atchan->perif)
528 | AT_XDMAC_CC_SIF(atchan->memif)
529 | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
530 | AT_XDMAC_CC_DSYNC_MEM2PER
531 | AT_XDMAC_CC_MBSIZE_SIXTEEN
532 | AT_XDMAC_CC_TYPE_PER_TRAN;
533 csize = at_xdmac_csize(sconfig->dst_maxburst);
534 if (csize < 0) {
535 dev_err(chan2dev(chan), "invalid src maxburst value\n");
536 return -EINVAL;
537 }
538 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
539 dwidth = ffs(sconfig->dst_addr_width) - 1;
540 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
541
542 /* Src and dst addr are needed to configure the link list descriptor. */
543 atchan->per_src_addr = sconfig->src_addr;
544 atchan->per_dst_addr = sconfig->dst_addr;
545
546 dev_dbg(chan2dev(chan),
547 "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
548 __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
549 atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
550 atchan->per_src_addr, atchan->per_dst_addr);
551
552 return 0;
553}
554
555static struct dma_async_tx_descriptor *
556at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
557 unsigned int sg_len, enum dma_transfer_direction direction,
558 unsigned long flags, void *context)
559{
560 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
561 struct at_xdmac_desc *first = NULL, *prev = NULL;
562 struct scatterlist *sg;
563 int i;
564 u32 cfg;
565 unsigned int xfer_size = 0;
566
567 if (!sgl)
568 return NULL;
569
570 if (!is_slave_direction(direction)) {
571 dev_err(chan2dev(chan), "invalid DMA direction\n");
572 return NULL;
573 }
574
575 dev_dbg(chan2dev(chan), "%s: sg_len=%d, dir=%s, flags=0x%lx\n",
576 __func__, sg_len,
577 direction == DMA_MEM_TO_DEV ? "to device" : "from device",
578 flags);
579
580 /* Protect dma_sconfig field that can be modified by set_slave_conf. */
581 spin_lock_bh(&atchan->lock);
582
583 /* Prepare descriptors. */
584 for_each_sg(sgl, sg, sg_len, i) {
585 struct at_xdmac_desc *desc = NULL;
586 u32 len, mem;
587
588 len = sg_dma_len(sg);
589 mem = sg_dma_address(sg);
590 if (unlikely(!len)) {
591 dev_err(chan2dev(chan), "sg data length is zero\n");
592 spin_unlock_bh(&atchan->lock);
593 return NULL;
594 }
595 dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
596 __func__, i, len, mem);
597
598 desc = at_xdmac_get_desc(atchan);
599 if (!desc) {
600 dev_err(chan2dev(chan), "can't get descriptor\n");
601 if (first)
602 list_splice_init(&first->descs_list, &atchan->free_descs_list);
603 spin_unlock_bh(&atchan->lock);
604 return NULL;
605 }
606
607 /* Linked list descriptor setup. */
608 if (direction == DMA_DEV_TO_MEM) {
609 desc->lld.mbr_sa = atchan->per_src_addr;
610 desc->lld.mbr_da = mem;
611 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
612 } else {
613 desc->lld.mbr_sa = mem;
614 desc->lld.mbr_da = atchan->per_dst_addr;
615 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
616 }
617 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1 /* next descriptor view */
618 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
619 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
620 | (i == sg_len - 1 ? 0 : AT_XDMAC_MBR_UBC_NDE) /* descriptor fetch */
621 | len / (1 << at_xdmac_get_dwidth(cfg)); /* microblock length */
622 dev_dbg(chan2dev(chan),
623 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
624 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
625
626 /* Chain lld. */
627 if (prev) {
628 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
629 dev_dbg(chan2dev(chan),
630 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
631 __func__, prev, &prev->lld.mbr_nda);
632 }
633
634 prev = desc;
635 if (!first)
636 first = desc;
637
638 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
639 __func__, desc, first);
640 list_add_tail(&desc->desc_node, &first->descs_list);
641 xfer_size += len;
642 }
643
644 spin_unlock_bh(&atchan->lock);
645
646 first->tx_dma_desc.flags = flags;
647 first->xfer_size = xfer_size;
648 first->direction = direction;
649
650 return &first->tx_dma_desc;
651}
652
653static struct dma_async_tx_descriptor *
654at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
655 size_t buf_len, size_t period_len,
656 enum dma_transfer_direction direction,
657 unsigned long flags)
658{
659 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
660 struct at_xdmac_desc *first = NULL, *prev = NULL;
661 unsigned int periods = buf_len / period_len;
662 int i;
663 u32 cfg;
664
665 dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
666 __func__, &buf_addr, buf_len, period_len,
667 direction == DMA_MEM_TO_DEV ? "mem2per" : "per2mem", flags);
668
669 if (!is_slave_direction(direction)) {
670 dev_err(chan2dev(chan), "invalid DMA direction\n");
671 return NULL;
672 }
673
674 if (test_and_set_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status)) {
675 dev_err(chan2dev(chan), "channel currently used\n");
676 return NULL;
677 }
678
679 for (i = 0; i < periods; i++) {
680 struct at_xdmac_desc *desc = NULL;
681
682 spin_lock_bh(&atchan->lock);
683 desc = at_xdmac_get_desc(atchan);
684 if (!desc) {
685 dev_err(chan2dev(chan), "can't get descriptor\n");
686 if (first)
687 list_splice_init(&first->descs_list, &atchan->free_descs_list);
688 spin_unlock_bh(&atchan->lock);
689 return NULL;
690 }
691 spin_unlock_bh(&atchan->lock);
692 dev_dbg(chan2dev(chan),
693 "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
694 __func__, desc, &desc->tx_dma_desc.phys);
695
696 if (direction == DMA_DEV_TO_MEM) {
697 desc->lld.mbr_sa = atchan->per_src_addr;
698 desc->lld.mbr_da = buf_addr + i * period_len;
699 cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
700 } else {
701 desc->lld.mbr_sa = buf_addr + i * period_len;
702 desc->lld.mbr_da = atchan->per_dst_addr;
703 cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
704 }
705 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
706 | AT_XDMAC_MBR_UBC_NDEN
707 | AT_XDMAC_MBR_UBC_NSEN
708 | AT_XDMAC_MBR_UBC_NDE
709 | period_len >> at_xdmac_get_dwidth(cfg);
710
711 dev_dbg(chan2dev(chan),
712 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
713 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
714
715 /* Chain lld. */
716 if (prev) {
717 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
718 dev_dbg(chan2dev(chan),
719 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
720 __func__, prev, &prev->lld.mbr_nda);
721 }
722
723 prev = desc;
724 if (!first)
725 first = desc;
726
727 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
728 __func__, desc, first);
729 list_add_tail(&desc->desc_node, &first->descs_list);
730 }
731
732 prev->lld.mbr_nda = first->tx_dma_desc.phys;
733 dev_dbg(chan2dev(chan),
734 "%s: chain lld: prev=0x%p, mbr_nda=%pad\n",
735 __func__, prev, &prev->lld.mbr_nda);
736 first->tx_dma_desc.flags = flags;
737 first->xfer_size = buf_len;
738 first->direction = direction;
739
740 return &first->tx_dma_desc;
741}
742
743static struct dma_async_tx_descriptor *
744at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
745 size_t len, unsigned long flags)
746{
747 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
748 struct at_xdmac_desc *first = NULL, *prev = NULL;
749 size_t remaining_size = len, xfer_size = 0, ublen;
750 dma_addr_t src_addr = src, dst_addr = dest;
751 u32 dwidth;
752 /*
753 * WARNING: We don't know the direction, it involves we can't
754 * dynamically set the source and dest interface so we have to use the
755 * same one. Only interface 0 allows EBI access. Hopefully we can
756 * access DDR through both ports (at least on SAMA5D4x), so we can use
757 * the same interface for source and dest, that solves the fact we
758 * don't know the direction.
759 */
760 u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
761 | AT_XDMAC_CC_SAM_INCREMENTED_AM
762 | AT_XDMAC_CC_DIF(0)
763 | AT_XDMAC_CC_SIF(0)
764 | AT_XDMAC_CC_MBSIZE_SIXTEEN
765 | AT_XDMAC_CC_TYPE_MEM_TRAN;
766
767 dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
768 __func__, &src, &dest, len, flags);
769
770 if (unlikely(!len))
771 return NULL;
772
773 /*
774 * Check address alignment to select the greater data width we can use.
775 * Some XDMAC implementations don't provide dword transfer, in this
776 * case selecting dword has the same behavior as selecting word transfers.
777 */
778 if (!((src_addr | dst_addr) & 7)) {
779 dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
780 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
781 } else if (!((src_addr | dst_addr) & 3)) {
782 dwidth = AT_XDMAC_CC_DWIDTH_WORD;
783 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
784 } else if (!((src_addr | dst_addr) & 1)) {
785 dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
786 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
787 } else {
788 dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
789 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
790 }
791
792 /* Prepare descriptors. */
793 while (remaining_size) {
794 struct at_xdmac_desc *desc = NULL;
795
796 dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
797
798 spin_lock_bh(&atchan->lock);
799 desc = at_xdmac_get_desc(atchan);
800 spin_unlock_bh(&atchan->lock);
801 if (!desc) {
802 dev_err(chan2dev(chan), "can't get descriptor\n");
803 if (first)
804 list_splice_init(&first->descs_list, &atchan->free_descs_list);
805 return NULL;
806 }
807
808 /* Update src and dest addresses. */
809 src_addr += xfer_size;
810 dst_addr += xfer_size;
811
812 if (remaining_size >= AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)
813 xfer_size = AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth;
814 else
815 xfer_size = remaining_size;
816
817 dev_dbg(chan2dev(chan), "%s: xfer_size=%zu\n", __func__, xfer_size);
818
819 /* Check remaining length and change data width if needed. */
820 if (!((src_addr | dst_addr | xfer_size) & 7)) {
821 dwidth = AT_XDMAC_CC_DWIDTH_DWORD;
822 dev_dbg(chan2dev(chan), "%s: dwidth: double word\n", __func__);
823 } else if (!((src_addr | dst_addr | xfer_size) & 3)) {
824 dwidth = AT_XDMAC_CC_DWIDTH_WORD;
825 dev_dbg(chan2dev(chan), "%s: dwidth: word\n", __func__);
826 } else if (!((src_addr | dst_addr | xfer_size) & 1)) {
827 dwidth = AT_XDMAC_CC_DWIDTH_HALFWORD;
828 dev_dbg(chan2dev(chan), "%s: dwidth: half word\n", __func__);
829 } else if ((src_addr | dst_addr | xfer_size) & 1) {
830 dwidth = AT_XDMAC_CC_DWIDTH_BYTE;
831 dev_dbg(chan2dev(chan), "%s: dwidth: byte\n", __func__);
832 }
833 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
834
835 ublen = xfer_size >> dwidth;
836 remaining_size -= xfer_size;
837
838 desc->lld.mbr_sa = src_addr;
839 desc->lld.mbr_da = dst_addr;
840 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2
841 | AT_XDMAC_MBR_UBC_NDEN
842 | AT_XDMAC_MBR_UBC_NSEN
843 | (remaining_size ? AT_XDMAC_MBR_UBC_NDE : 0)
844 | ublen;
845 desc->lld.mbr_cfg = chan_cc;
846
847 dev_dbg(chan2dev(chan),
848 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
849 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc, desc->lld.mbr_cfg);
850
851 /* Chain lld. */
852 if (prev) {
853 prev->lld.mbr_nda = desc->tx_dma_desc.phys;
854 dev_dbg(chan2dev(chan),
855 "%s: chain lld: prev=0x%p, mbr_nda=0x%08x\n",
856 __func__, prev, prev->lld.mbr_nda);
857 }
858
859 prev = desc;
860 if (!first)
861 first = desc;
862
863 dev_dbg(chan2dev(chan), "%s: add desc 0x%p to descs_list 0x%p\n",
864 __func__, desc, first);
865 list_add_tail(&desc->desc_node, &first->descs_list);
866 }
867
868 first->tx_dma_desc.flags = flags;
869 first->xfer_size = len;
870
871 return &first->tx_dma_desc;
872}
873
874static enum dma_status
875at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
876 struct dma_tx_state *txstate)
877{
878 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
879 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
880 struct at_xdmac_desc *desc, *_desc;
881 struct list_head *descs_list;
882 enum dma_status ret;
883 int residue;
884 u32 cur_nda, mask, value;
885 u8 dwidth = at_xdmac_get_dwidth(atchan->cfg[AT_XDMAC_CUR_CFG]);
886
887 ret = dma_cookie_status(chan, cookie, txstate);
888 if (ret == DMA_COMPLETE)
889 return ret;
890
891 if (!txstate)
892 return ret;
893
894 spin_lock_bh(&atchan->lock);
895
896 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
897
898 /*
899 * If the transfer has not been started yet, don't need to compute the
900 * residue, it's the transfer length.
901 */
902 if (!desc->active_xfer) {
903 dma_set_residue(txstate, desc->xfer_size);
904 spin_unlock_bh(&atchan->lock);
905 return ret;
906 }
907
908 residue = desc->xfer_size;
909 /*
910 * Flush FIFO: only relevant when the transfer is source peripheral
911 * synchronized.
912 */
913 mask = AT_XDMAC_CC_TYPE | AT_XDMAC_CC_DSYNC;
914 value = AT_XDMAC_CC_TYPE_PER_TRAN | AT_XDMAC_CC_DSYNC_PER2MEM;
915 if ((atchan->cfg[AT_XDMAC_CUR_CFG] & mask) == value) {
916 at_xdmac_write(atxdmac, AT_XDMAC_GSWF, atchan->mask);
917 while (!(at_xdmac_chan_read(atchan, AT_XDMAC_CIS) & AT_XDMAC_CIS_FIS))
918 cpu_relax();
919 }
920
921 cur_nda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA) & 0xfffffffc;
922 /*
923 * Remove size of all microblocks already transferred and the current
924 * one. Then add the remaining size to transfer of the current
925 * microblock.
926 */
927 descs_list = &desc->descs_list;
928 list_for_each_entry_safe(desc, _desc, descs_list, desc_node) {
929 residue -= (desc->lld.mbr_ubc & 0xffffff) << dwidth;
930 if ((desc->lld.mbr_nda & 0xfffffffc) == cur_nda)
931 break;
932 }
933 residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
934
935 spin_unlock_bh(&atchan->lock);
936
937 dma_set_residue(txstate, residue);
938
939 dev_dbg(chan2dev(chan),
940 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
941 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
942
943 return ret;
944}
945
946/* Call must be protected by lock. */
947static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
948 struct at_xdmac_desc *desc)
949{
950 dev_dbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
951
952 /*
953 * Remove the transfer from the transfer list then move the transfer
954 * descriptors into the free descriptors list.
955 */
956 list_del(&desc->xfer_node);
957 list_splice_init(&desc->descs_list, &atchan->free_descs_list);
958}
959
960static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
961{
962 struct at_xdmac_desc *desc;
963
964 spin_lock_bh(&atchan->lock);
965
966 /*
967 * If channel is enabled, do nothing, advance_work will be triggered
968 * after the interruption.
969 */
970 if (!at_xdmac_chan_is_enabled(atchan) && !list_empty(&atchan->xfers_list)) {
971 desc = list_first_entry(&atchan->xfers_list,
972 struct at_xdmac_desc,
973 xfer_node);
974 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
975 if (!desc->active_xfer)
976 at_xdmac_start_xfer(atchan, desc);
977 }
978
979 spin_unlock_bh(&atchan->lock);
980}
981
982static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
983{
984 struct at_xdmac_desc *desc;
985 struct dma_async_tx_descriptor *txd;
986
987 desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
988 txd = &desc->tx_dma_desc;
989
990 if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
991 txd->callback(txd->callback_param);
992}
993
994static void at_xdmac_tasklet(unsigned long data)
995{
996 struct at_xdmac_chan *atchan = (struct at_xdmac_chan *)data;
997 struct at_xdmac_desc *desc;
998 u32 error_mask;
999
1000 dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
1001 __func__, atchan->status);
1002
1003 error_mask = AT_XDMAC_CIS_RBEIS
1004 | AT_XDMAC_CIS_WBEIS
1005 | AT_XDMAC_CIS_ROIS;
1006
1007 if (at_xdmac_chan_is_cyclic(atchan)) {
1008 at_xdmac_handle_cyclic(atchan);
1009 } else if ((atchan->status & AT_XDMAC_CIS_LIS)
1010 || (atchan->status & error_mask)) {
1011 struct dma_async_tx_descriptor *txd;
1012
1013 if (atchan->status & AT_XDMAC_CIS_RBEIS)
1014 dev_err(chan2dev(&atchan->chan), "read bus error!!!");
1015 if (atchan->status & AT_XDMAC_CIS_WBEIS)
1016 dev_err(chan2dev(&atchan->chan), "write bus error!!!");
1017 if (atchan->status & AT_XDMAC_CIS_ROIS)
1018 dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
1019
1020 spin_lock_bh(&atchan->lock);
1021 desc = list_first_entry(&atchan->xfers_list,
1022 struct at_xdmac_desc,
1023 xfer_node);
1024 dev_vdbg(chan2dev(&atchan->chan), "%s: desc 0x%p\n", __func__, desc);
1025 BUG_ON(!desc->active_xfer);
1026
1027 txd = &desc->tx_dma_desc;
1028
1029 at_xdmac_remove_xfer(atchan, desc);
1030 spin_unlock_bh(&atchan->lock);
1031
1032 if (!at_xdmac_chan_is_cyclic(atchan)) {
1033 dma_cookie_complete(txd);
1034 if (txd->callback && (txd->flags & DMA_PREP_INTERRUPT))
1035 txd->callback(txd->callback_param);
1036 }
1037
1038 dma_run_dependencies(txd);
1039
1040 at_xdmac_advance_work(atchan);
1041 }
1042}
1043
1044static irqreturn_t at_xdmac_interrupt(int irq, void *dev_id)
1045{
1046 struct at_xdmac *atxdmac = (struct at_xdmac *)dev_id;
1047 struct at_xdmac_chan *atchan;
1048 u32 imr, status, pending;
1049 u32 chan_imr, chan_status;
1050 int i, ret = IRQ_NONE;
1051
1052 do {
1053 imr = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1054 status = at_xdmac_read(atxdmac, AT_XDMAC_GIS);
1055 pending = status & imr;
1056
1057 dev_vdbg(atxdmac->dma.dev,
1058 "%s: status=0x%08x, imr=0x%08x, pending=0x%08x\n",
1059 __func__, status, imr, pending);
1060
1061 if (!pending)
1062 break;
1063
1064 /* We have to find which channel has generated the interrupt. */
1065 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1066 if (!((1 << i) & pending))
1067 continue;
1068
1069 atchan = &atxdmac->chan[i];
1070 chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1071 chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
1072 atchan->status = chan_status & chan_imr;
1073 dev_vdbg(atxdmac->dma.dev,
1074 "%s: chan%d: imr=0x%x, status=0x%x\n",
1075 __func__, i, chan_imr, chan_status);
1076 dev_vdbg(chan2dev(&atchan->chan),
1077 "%s: CC=0x%08x CNDA=0x%08x, CNDC=0x%08x, CSA=0x%08x, CDA=0x%08x, CUBC=0x%08x\n",
1078 __func__,
1079 at_xdmac_chan_read(atchan, AT_XDMAC_CC),
1080 at_xdmac_chan_read(atchan, AT_XDMAC_CNDA),
1081 at_xdmac_chan_read(atchan, AT_XDMAC_CNDC),
1082 at_xdmac_chan_read(atchan, AT_XDMAC_CSA),
1083 at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
1084 at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
1085
1086 if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
1087 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1088
1089 tasklet_schedule(&atchan->tasklet);
1090 ret = IRQ_HANDLED;
1091 }
1092
1093 } while (pending);
1094
1095 return ret;
1096}
1097
1098static void at_xdmac_issue_pending(struct dma_chan *chan)
1099{
1100 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1101
1102 dev_dbg(chan2dev(&atchan->chan), "%s\n", __func__);
1103
1104 if (!at_xdmac_chan_is_cyclic(atchan))
1105 at_xdmac_advance_work(atchan);
1106
1107 return;
1108}
1109
1110static int at_xdmac_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1111 unsigned long arg)
1112{
1113 struct at_xdmac_desc *desc, *_desc;
1114 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1115 struct at_xdmac *atxdmac = to_at_xdmac(atchan->chan.device);
1116 int ret = 0;
1117
1118 dev_dbg(chan2dev(chan), "%s: cmd=%d\n", __func__, cmd);
1119
1120 spin_lock_bh(&atchan->lock);
1121
1122 switch (cmd) {
1123 case DMA_PAUSE:
1124 at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
1125 set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1126 break;
1127
1128 case DMA_RESUME:
1129 if (!at_xdmac_chan_is_paused(atchan))
1130 break;
1131
1132 at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
1133 clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
1134 break;
1135
1136 case DMA_TERMINATE_ALL:
1137 at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
1138 while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
1139 cpu_relax();
1140
1141 /* Cancel all pending transfers. */
1142 list_for_each_entry_safe(desc, _desc, &atchan->xfers_list, xfer_node)
1143 at_xdmac_remove_xfer(atchan, desc);
1144
1145 clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
1146 break;
1147
1148 case DMA_SLAVE_CONFIG:
1149 ret = at_xdmac_set_slave_config(chan,
1150 (struct dma_slave_config *)arg);
1151 break;
1152
1153 default:
1154 dev_err(chan2dev(chan),
1155 "unmanaged or unknown dma control cmd: %d\n", cmd);
1156 ret = -ENXIO;
1157 }
1158
1159 spin_unlock_bh(&atchan->lock);
1160
1161 return ret;
1162}
1163
1164static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
1165{
1166 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1167 struct at_xdmac_desc *desc;
1168 int i;
1169
1170 spin_lock_bh(&atchan->lock);
1171
1172 if (at_xdmac_chan_is_enabled(atchan)) {
1173 dev_err(chan2dev(chan),
1174 "can't allocate channel resources (channel enabled)\n");
1175 i = -EIO;
1176 goto spin_unlock;
1177 }
1178
1179 if (!list_empty(&atchan->free_descs_list)) {
1180 dev_err(chan2dev(chan),
1181 "can't allocate channel resources (channel not free from a previous use)\n");
1182 i = -EIO;
1183 goto spin_unlock;
1184 }
1185
1186 for (i = 0; i < init_nr_desc_per_channel; i++) {
1187 desc = at_xdmac_alloc_desc(chan, GFP_ATOMIC);
1188 if (!desc) {
1189 dev_warn(chan2dev(chan),
1190 "only %d descriptors have been allocated\n", i);
1191 break;
1192 }
1193 list_add_tail(&desc->desc_node, &atchan->free_descs_list);
1194 }
1195
1196 dma_cookie_init(chan);
1197
1198 dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
1199
1200spin_unlock:
1201 spin_unlock_bh(&atchan->lock);
1202 return i;
1203}
1204
1205static void at_xdmac_free_chan_resources(struct dma_chan *chan)
1206{
1207 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1208 struct at_xdmac *atxdmac = to_at_xdmac(chan->device);
1209 struct at_xdmac_desc *desc, *_desc;
1210
1211 list_for_each_entry_safe(desc, _desc, &atchan->free_descs_list, desc_node) {
1212 dev_dbg(chan2dev(chan), "%s: freeing descriptor %p\n", __func__, desc);
1213 list_del(&desc->desc_node);
1214 dma_pool_free(atxdmac->at_xdmac_desc_pool, desc, desc->tx_dma_desc.phys);
1215 }
1216
1217 return;
1218}
1219
1220#define AT_XDMAC_DMA_BUSWIDTHS\
1221 (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
1222 BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) |\
1223 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
1224 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) |\
1225 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
1226
1227static int at_xdmac_device_slave_caps(struct dma_chan *dchan,
1228 struct dma_slave_caps *caps)
1229{
1230
1231 caps->src_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1232 caps->dstn_addr_widths = AT_XDMAC_DMA_BUSWIDTHS;
1233 caps->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV);
1234 caps->cmd_pause = true;
1235 caps->cmd_terminate = true;
1236 caps->residue_granularity = DMA_RESIDUE_GRANULARITY_BURST;
1237
1238 return 0;
1239}
1240
1241#ifdef CONFIG_PM
1242static int atmel_xdmac_prepare(struct device *dev)
1243{
1244 struct platform_device *pdev = to_platform_device(dev);
1245 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1246 struct dma_chan *chan, *_chan;
1247
1248 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1249 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1250
1251 /* Wait for transfer completion, except in cyclic case. */
1252 if (at_xdmac_chan_is_enabled(atchan) && !at_xdmac_chan_is_cyclic(atchan))
1253 return -EAGAIN;
1254 }
1255 return 0;
1256}
1257#else
1258# define atmel_xdmac_prepare NULL
1259#endif
1260
1261#ifdef CONFIG_PM_SLEEP
1262static int atmel_xdmac_suspend(struct device *dev)
1263{
1264 struct platform_device *pdev = to_platform_device(dev);
1265 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1266 struct dma_chan *chan, *_chan;
1267
1268 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1269 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1270
1271 if (at_xdmac_chan_is_cyclic(atchan)) {
1272 if (!at_xdmac_chan_is_paused(atchan))
1273 at_xdmac_control(chan, DMA_PAUSE, 0);
1274 atchan->save_cim = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
1275 atchan->save_cnda = at_xdmac_chan_read(atchan, AT_XDMAC_CNDA);
1276 atchan->save_cndc = at_xdmac_chan_read(atchan, AT_XDMAC_CNDC);
1277 }
1278 }
1279 atxdmac->save_gim = at_xdmac_read(atxdmac, AT_XDMAC_GIM);
1280
1281 at_xdmac_off(atxdmac);
1282 clk_disable_unprepare(atxdmac->clk);
1283 return 0;
1284}
1285
1286static int atmel_xdmac_resume(struct device *dev)
1287{
1288 struct platform_device *pdev = to_platform_device(dev);
1289 struct at_xdmac *atxdmac = platform_get_drvdata(pdev);
1290 struct at_xdmac_chan *atchan;
1291 struct dma_chan *chan, *_chan;
1292 int i;
1293 u32 cfg;
1294
1295 clk_prepare_enable(atxdmac->clk);
1296
1297 /* Clear pending interrupts. */
1298 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1299 atchan = &atxdmac->chan[i];
1300 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1301 cpu_relax();
1302 }
1303
1304 at_xdmac_write(atxdmac, AT_XDMAC_GIE, atxdmac->save_gim);
1305 at_xdmac_write(atxdmac, AT_XDMAC_GE, atxdmac->save_gs);
1306 list_for_each_entry_safe(chan, _chan, &atxdmac->dma.channels, device_node) {
1307 atchan = to_at_xdmac_chan(chan);
1308 cfg = atchan->cfg[AT_XDMAC_CUR_CFG];
1309 at_xdmac_chan_write(atchan, AT_XDMAC_CC, cfg);
1310 if (at_xdmac_chan_is_cyclic(atchan)) {
1311 at_xdmac_chan_write(atchan, AT_XDMAC_CNDA, atchan->save_cnda);
1312 at_xdmac_chan_write(atchan, AT_XDMAC_CNDC, atchan->save_cndc);
1313 at_xdmac_chan_write(atchan, AT_XDMAC_CIE, atchan->save_cim);
1314 wmb();
1315 at_xdmac_write(atxdmac, AT_XDMAC_GE, atchan->mask);
1316 }
1317 }
1318 return 0;
1319}
1320#endif /* CONFIG_PM_SLEEP */
1321
1322static int at_xdmac_probe(struct platform_device *pdev)
1323{
1324 struct resource *res;
1325 struct at_xdmac *atxdmac;
1326 int irq, size, nr_channels, i, ret;
1327 void __iomem *base;
1328 u32 reg;
1329
1330 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1331 if (!res)
1332 return -EINVAL;
1333
1334 irq = platform_get_irq(pdev, 0);
1335 if (irq < 0)
1336 return irq;
1337
1338 base = devm_ioremap_resource(&pdev->dev, res);
1339 if (IS_ERR(base))
1340 return PTR_ERR(base);
1341
1342 /*
1343 * Read number of xdmac channels, read helper function can't be used
1344 * since atxdmac is not yet allocated and we need to know the number
1345 * of channels to do the allocation.
1346 */
1347 reg = readl_relaxed(base + AT_XDMAC_GTYPE);
1348 nr_channels = AT_XDMAC_NB_CH(reg);
1349 if (nr_channels > AT_XDMAC_MAX_CHAN) {
1350 dev_err(&pdev->dev, "invalid number of channels (%u)\n",
1351 nr_channels);
1352 return -EINVAL;
1353 }
1354
1355 size = sizeof(*atxdmac);
1356 size += nr_channels * sizeof(struct at_xdmac_chan);
1357 atxdmac = devm_kzalloc(&pdev->dev, size, GFP_KERNEL);
1358 if (!atxdmac) {
1359 dev_err(&pdev->dev, "can't allocate at_xdmac structure\n");
1360 return -ENOMEM;
1361 }
1362
1363 atxdmac->regs = base;
1364 atxdmac->irq = irq;
1365
1366 atxdmac->clk = devm_clk_get(&pdev->dev, "dma_clk");
1367 if (IS_ERR(atxdmac->clk)) {
1368 dev_err(&pdev->dev, "can't get dma_clk\n");
1369 return PTR_ERR(atxdmac->clk);
1370 }
1371
1372 /* Do not use dev res to prevent races with tasklet */
1373 ret = request_irq(atxdmac->irq, at_xdmac_interrupt, 0, "at_xdmac", atxdmac);
1374 if (ret) {
1375 dev_err(&pdev->dev, "can't request irq\n");
1376 return ret;
1377 }
1378
1379 ret = clk_prepare_enable(atxdmac->clk);
1380 if (ret) {
1381 dev_err(&pdev->dev, "can't prepare or enable clock\n");
1382 goto err_free_irq;
1383 }
1384
1385 atxdmac->at_xdmac_desc_pool =
1386 dmam_pool_create(dev_name(&pdev->dev), &pdev->dev,
1387 sizeof(struct at_xdmac_desc), 4, 0);
1388 if (!atxdmac->at_xdmac_desc_pool) {
1389 dev_err(&pdev->dev, "no memory for descriptors dma pool\n");
1390 ret = -ENOMEM;
1391 goto err_clk_disable;
1392 }
1393
1394 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
1395 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
1396 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
1397 /*
1398 * Without DMA_PRIVATE the driver is not able to allocate more than
1399 * one channel, second allocation fails in private_candidate.
1400 */
1401 dma_cap_set(DMA_PRIVATE, atxdmac->dma.cap_mask);
1402 atxdmac->dma.dev = &pdev->dev;
1403 atxdmac->dma.device_alloc_chan_resources = at_xdmac_alloc_chan_resources;
1404 atxdmac->dma.device_free_chan_resources = at_xdmac_free_chan_resources;
1405 atxdmac->dma.device_tx_status = at_xdmac_tx_status;
1406 atxdmac->dma.device_issue_pending = at_xdmac_issue_pending;
1407 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
1408 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
1409 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
1410 atxdmac->dma.device_control = at_xdmac_control;
1411 atxdmac->dma.device_slave_caps = at_xdmac_device_slave_caps;
1412
1413 /* Disable all chans and interrupts. */
1414 at_xdmac_off(atxdmac);
1415
1416 /* Init channels. */
1417 INIT_LIST_HEAD(&atxdmac->dma.channels);
1418 for (i = 0; i < nr_channels; i++) {
1419 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
1420
1421 atchan->chan.device = &atxdmac->dma;
1422 list_add_tail(&atchan->chan.device_node,
1423 &atxdmac->dma.channels);
1424
1425 atchan->ch_regs = at_xdmac_chan_reg_base(atxdmac, i);
1426 atchan->mask = 1 << i;
1427
1428 spin_lock_init(&atchan->lock);
1429 INIT_LIST_HEAD(&atchan->xfers_list);
1430 INIT_LIST_HEAD(&atchan->free_descs_list);
1431 tasklet_init(&atchan->tasklet, at_xdmac_tasklet,
1432 (unsigned long)atchan);
1433
1434 /* Clear pending interrupts. */
1435 while (at_xdmac_chan_read(atchan, AT_XDMAC_CIS))
1436 cpu_relax();
1437 }
1438 platform_set_drvdata(pdev, atxdmac);
1439
1440 ret = dma_async_device_register(&atxdmac->dma);
1441 if (ret) {
1442 dev_err(&pdev->dev, "fail to register DMA engine device\n");
1443 goto err_clk_disable;
1444 }
1445
1446 ret = of_dma_controller_register(pdev->dev.of_node,
1447 at_xdmac_xlate, atxdmac);
1448 if (ret) {
1449 dev_err(&pdev->dev, "could not register of dma controller\n");
1450 goto err_dma_unregister;
1451 }
1452
1453 dev_info(&pdev->dev, "%d channels, mapped at 0x%p\n",
1454 nr_channels, atxdmac->regs);
1455
1456 return 0;
1457
1458err_dma_unregister:
1459 dma_async_device_unregister(&atxdmac->dma);
1460err_clk_disable:
1461 clk_disable_unprepare(atxdmac->clk);
1462err_free_irq:
1463 free_irq(atxdmac->irq, atxdmac->dma.dev);
1464 return ret;
1465}
1466
1467static int at_xdmac_remove(struct platform_device *pdev)
1468{
1469 struct at_xdmac *atxdmac = (struct at_xdmac *)platform_get_drvdata(pdev);
1470 int i;
1471
1472 at_xdmac_off(atxdmac);
1473 of_dma_controller_free(pdev->dev.of_node);
1474 dma_async_device_unregister(&atxdmac->dma);
1475 clk_disable_unprepare(atxdmac->clk);
1476
1477 synchronize_irq(atxdmac->irq);
1478
1479 free_irq(atxdmac->irq, atxdmac->dma.dev);
1480
1481 for (i = 0; i < atxdmac->dma.chancnt; i++) {
1482 struct at_xdmac_chan *atchan = &atxdmac->chan[i];
1483
1484 tasklet_kill(&atchan->tasklet);
1485 at_xdmac_free_chan_resources(&atchan->chan);
1486 }
1487
1488 return 0;
1489}
1490
1491static const struct dev_pm_ops atmel_xdmac_dev_pm_ops = {
1492 .prepare = atmel_xdmac_prepare,
1493 SET_LATE_SYSTEM_SLEEP_PM_OPS(atmel_xdmac_suspend, atmel_xdmac_resume)
1494};
1495
1496static const struct of_device_id atmel_xdmac_dt_ids[] = {
1497 {
1498 .compatible = "atmel,sama5d4-dma",
1499 }, {
1500 /* sentinel */
1501 }
1502};
1503MODULE_DEVICE_TABLE(of, atmel_xdmac_dt_ids);
1504
1505static struct platform_driver at_xdmac_driver = {
1506 .probe = at_xdmac_probe,
1507 .remove = at_xdmac_remove,
1508 .driver = {
1509 .name = "at_xdmac",
1510 .owner = THIS_MODULE,
1511 .of_match_table = of_match_ptr(atmel_xdmac_dt_ids),
1512 .pm = &atmel_xdmac_dev_pm_ops,
1513 }
1514};
1515
1516static int __init at_xdmac_init(void)
1517{
1518 return platform_driver_probe(&at_xdmac_driver, at_xdmac_probe);
1519}
1520subsys_initcall(at_xdmac_init);
1521
1522MODULE_DESCRIPTION("Atmel Extended DMA Controller driver");
1523MODULE_AUTHOR("Ludovic Desroches <ludovic.desroches@atmel.com>");
1524MODULE_LICENSE("GPL");
diff --git a/drivers/dma/bcm2835-dma.c b/drivers/dma/bcm2835-dma.c
index 68007974961a..918b7b3f766f 100644
--- a/drivers/dma/bcm2835-dma.c
+++ b/drivers/dma/bcm2835-dma.c
@@ -525,8 +525,6 @@ static int bcm2835_dma_chan_init(struct bcm2835_dmadev *d, int chan_id, int irq)
525 vchan_init(&c->vc, &d->ddev); 525 vchan_init(&c->vc, &d->ddev);
526 INIT_LIST_HEAD(&c->node); 526 INIT_LIST_HEAD(&c->node);
527 527
528 d->ddev.chancnt++;
529
530 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id); 528 c->chan_base = BCM2835_DMA_CHANIO(d->base, chan_id);
531 c->ch = chan_id; 529 c->ch = chan_id;
532 c->irq_number = irq; 530 c->irq_number = irq;
@@ -694,7 +692,6 @@ static struct platform_driver bcm2835_dma_driver = {
694 .remove = bcm2835_dma_remove, 692 .remove = bcm2835_dma_remove,
695 .driver = { 693 .driver = {
696 .name = "bcm2835-dma", 694 .name = "bcm2835-dma",
697 .owner = THIS_MODULE,
698 .of_match_table = of_match_ptr(bcm2835_dma_of_match), 695 .of_match_table = of_match_ptr(bcm2835_dma_of_match),
699 }, 696 },
700}; 697};
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index a58eec3b2cad..b743adf56465 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -1,3 +1,4 @@
1#include <linux/delay.h>
1#include <linux/dmaengine.h> 2#include <linux/dmaengine.h>
2#include <linux/dma-mapping.h> 3#include <linux/dma-mapping.h>
3#include <linux/platform_device.h> 4#include <linux/platform_device.h>
@@ -567,7 +568,7 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
567 reg |= GCR_TEARDOWN; 568 reg |= GCR_TEARDOWN;
568 cppi_writel(reg, c->gcr_reg); 569 cppi_writel(reg, c->gcr_reg);
569 c->td_queued = 1; 570 c->td_queued = 1;
570 c->td_retry = 100; 571 c->td_retry = 500;
571 } 572 }
572 573
573 if (!c->td_seen || !c->td_desc_seen) { 574 if (!c->td_seen || !c->td_desc_seen) {
@@ -603,12 +604,16 @@ static int cppi41_tear_down_chan(struct cppi41_channel *c)
603 * descriptor before the TD we fetch it from enqueue, it has to be 604 * descriptor before the TD we fetch it from enqueue, it has to be
604 * there waiting for us. 605 * there waiting for us.
605 */ 606 */
606 if (!c->td_seen && c->td_retry) 607 if (!c->td_seen && c->td_retry) {
608 udelay(1);
607 return -EAGAIN; 609 return -EAGAIN;
608 610 }
609 WARN_ON(!c->td_retry); 611 WARN_ON(!c->td_retry);
612
610 if (!c->td_desc_seen) { 613 if (!c->td_desc_seen) {
611 desc_phys = cppi41_pop_desc(cdd, c->q_num); 614 desc_phys = cppi41_pop_desc(cdd, c->q_num);
615 if (!desc_phys)
616 desc_phys = cppi41_pop_desc(cdd, c->q_comp_num);
612 WARN_ON(!desc_phys); 617 WARN_ON(!desc_phys);
613 } 618 }
614 619
@@ -1088,7 +1093,6 @@ static struct platform_driver cpp41_dma_driver = {
1088 .remove = cppi41_dma_remove, 1093 .remove = cppi41_dma_remove,
1089 .driver = { 1094 .driver = {
1090 .name = "cppi41-dma-engine", 1095 .name = "cppi41-dma-engine",
1091 .owner = THIS_MODULE,
1092 .pm = &cppi41_pm_ops, 1096 .pm = &cppi41_pm_ops,
1093 .of_match_table = of_match_ptr(cppi41_dma_ids), 1097 .of_match_table = of_match_ptr(cppi41_dma_ids),
1094 }, 1098 },
diff --git a/drivers/dma/dma-jz4740.c b/drivers/dma/dma-jz4740.c
index ae2ab14e64b3..bdeafeefa5f6 100644
--- a/drivers/dma/dma-jz4740.c
+++ b/drivers/dma/dma-jz4740.c
@@ -563,10 +563,9 @@ static int jz4740_dma_probe(struct platform_device *pdev)
563 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic; 563 dd->device_prep_dma_cyclic = jz4740_dma_prep_dma_cyclic;
564 dd->device_control = jz4740_dma_control; 564 dd->device_control = jz4740_dma_control;
565 dd->dev = &pdev->dev; 565 dd->dev = &pdev->dev;
566 dd->chancnt = JZ_DMA_NR_CHANS;
567 INIT_LIST_HEAD(&dd->channels); 566 INIT_LIST_HEAD(&dd->channels);
568 567
569 for (i = 0; i < dd->chancnt; i++) { 568 for (i = 0; i < JZ_DMA_NR_CHANS; i++) {
570 chan = &dmadev->chan[i]; 569 chan = &dmadev->chan[i];
571 chan->id = i; 570 chan->id = i;
572 chan->vchan.desc_free = jz4740_dma_desc_free; 571 chan->vchan.desc_free = jz4740_dma_desc_free;
@@ -608,7 +607,6 @@ static struct platform_driver jz4740_dma_driver = {
608 .remove = jz4740_dma_remove, 607 .remove = jz4740_dma_remove,
609 .driver = { 608 .driver = {
610 .name = "jz4740-dma", 609 .name = "jz4740-dma",
611 .owner = THIS_MODULE,
612 }, 610 },
613}; 611};
614module_platform_driver(jz4740_dma_driver); 612module_platform_driver(jz4740_dma_driver);
diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c
index 24bfaf0b92ba..e057935e3023 100644
--- a/drivers/dma/dmaengine.c
+++ b/drivers/dma/dmaengine.c
@@ -330,8 +330,7 @@ static int __init dma_channel_table_init(void)
330 if (err) { 330 if (err) {
331 pr_err("initialization failure\n"); 331 pr_err("initialization failure\n");
332 for_each_dma_cap_mask(cap, dma_cap_mask_all) 332 for_each_dma_cap_mask(cap, dma_cap_mask_all)
333 if (channel_table[cap]) 333 free_percpu(channel_table[cap]);
334 free_percpu(channel_table[cap]);
335 } 334 }
336 335
337 return err; 336 return err;
diff --git a/drivers/dma/fsl-edma.c b/drivers/dma/fsl-edma.c
index 3c5711d5fe97..6fb2e902b459 100644
--- a/drivers/dma/fsl-edma.c
+++ b/drivers/dma/fsl-edma.c
@@ -118,17 +118,17 @@
118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) 118 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES)
119 119
120struct fsl_edma_hw_tcd { 120struct fsl_edma_hw_tcd {
121 u32 saddr; 121 __le32 saddr;
122 u16 soff; 122 __le16 soff;
123 u16 attr; 123 __le16 attr;
124 u32 nbytes; 124 __le32 nbytes;
125 u32 slast; 125 __le32 slast;
126 u32 daddr; 126 __le32 daddr;
127 u16 doff; 127 __le16 doff;
128 u16 citer; 128 __le16 citer;
129 u32 dlast_sga; 129 __le32 dlast_sga;
130 u16 csr; 130 __le16 csr;
131 u16 biter; 131 __le16 biter;
132}; 132};
133 133
134struct fsl_edma_sw_tcd { 134struct fsl_edma_sw_tcd {
@@ -175,18 +175,12 @@ struct fsl_edma_engine {
175}; 175};
176 176
177/* 177/*
178 * R/W functions for big- or little-endian registers 178 * R/W functions for big- or little-endian registers:
179 * the eDMA controller's endian is independent of the CPU core's endian. 179 * The eDMA controller's endian is independent of the CPU core's endian.
180 * For the big-endian IP module, the offset for 8-bit or 16-bit registers
181 * should also be swapped opposite to that in little-endian IP.
180 */ 182 */
181 183
182static u16 edma_readw(struct fsl_edma_engine *edma, void __iomem *addr)
183{
184 if (edma->big_endian)
185 return ioread16be(addr);
186 else
187 return ioread16(addr);
188}
189
190static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr) 184static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
191{ 185{
192 if (edma->big_endian) 186 if (edma->big_endian)
@@ -197,13 +191,18 @@ static u32 edma_readl(struct fsl_edma_engine *edma, void __iomem *addr)
197 191
198static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr) 192static void edma_writeb(struct fsl_edma_engine *edma, u8 val, void __iomem *addr)
199{ 193{
200 iowrite8(val, addr); 194 /* swap the reg offset for these in big-endian mode */
195 if (edma->big_endian)
196 iowrite8(val, (void __iomem *)((unsigned long)addr ^ 0x3));
197 else
198 iowrite8(val, addr);
201} 199}
202 200
203static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr) 201static void edma_writew(struct fsl_edma_engine *edma, u16 val, void __iomem *addr)
204{ 202{
203 /* swap the reg offset for these in big-endian mode */
205 if (edma->big_endian) 204 if (edma->big_endian)
206 iowrite16be(val, addr); 205 iowrite16be(val, (void __iomem *)((unsigned long)addr ^ 0x2));
207 else 206 else
208 iowrite16(val, addr); 207 iowrite16(val, addr);
209} 208}
@@ -254,13 +253,12 @@ static void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
254 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR; 253 chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
255 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux; 254 ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
256 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux]; 255 muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
256 slot = EDMAMUX_CHCFG_SOURCE(slot);
257 257
258 if (enable) 258 if (enable)
259 edma_writeb(fsl_chan->edma, 259 iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
260 EDMAMUX_CHCFG_ENBL | EDMAMUX_CHCFG_SOURCE(slot),
261 muxaddr + ch_off);
262 else 260 else
263 edma_writeb(fsl_chan->edma, EDMAMUX_CHCFG_DIS, muxaddr + ch_off); 261 iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
264} 262}
265 263
266static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width) 264static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
@@ -286,9 +284,8 @@ static void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
286 284
287 fsl_desc = to_fsl_edma_desc(vdesc); 285 fsl_desc = to_fsl_edma_desc(vdesc);
288 for (i = 0; i < fsl_desc->n_tcds; i++) 286 for (i = 0; i < fsl_desc->n_tcds; i++)
289 dma_pool_free(fsl_desc->echan->tcd_pool, 287 dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
290 fsl_desc->tcd[i].vtcd, 288 fsl_desc->tcd[i].ptcd);
291 fsl_desc->tcd[i].ptcd);
292 kfree(fsl_desc); 289 kfree(fsl_desc);
293} 290}
294 291
@@ -363,8 +360,8 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
363 360
364 /* calculate the total size in this desc */ 361 /* calculate the total size in this desc */
365 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++) 362 for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
366 len += edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes)) 363 len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
367 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter)); 364 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
368 365
369 if (!in_progress) 366 if (!in_progress)
370 return len; 367 return len;
@@ -376,17 +373,15 @@ static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
376 373
377 /* figure out the finished and calculate the residue */ 374 /* figure out the finished and calculate the residue */
378 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) { 375 for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
379 size = edma_readl(fsl_chan->edma, &(edesc->tcd[i].vtcd->nbytes)) 376 size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
380 * edma_readw(fsl_chan->edma, &(edesc->tcd[i].vtcd->biter)); 377 * le16_to_cpu(edesc->tcd[i].vtcd->biter);
381 if (dir == DMA_MEM_TO_DEV) 378 if (dir == DMA_MEM_TO_DEV)
382 dma_addr = edma_readl(fsl_chan->edma, 379 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
383 &(edesc->tcd[i].vtcd->saddr));
384 else 380 else
385 dma_addr = edma_readl(fsl_chan->edma, 381 dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
386 &(edesc->tcd[i].vtcd->daddr));
387 382
388 len -= size; 383 len -= size;
389 if (cur_addr > dma_addr && cur_addr < dma_addr + size) { 384 if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
390 len += dma_addr + size - cur_addr; 385 len += dma_addr + size - cur_addr;
391 break; 386 break;
392 } 387 }
@@ -424,55 +419,67 @@ static enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
424 return fsl_chan->status; 419 return fsl_chan->status;
425} 420}
426 421
427static void fsl_edma_set_tcd_params(struct fsl_edma_chan *fsl_chan, 422static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
428 u32 src, u32 dst, u16 attr, u16 soff, u32 nbytes, 423 struct fsl_edma_hw_tcd *tcd)
429 u32 slast, u16 citer, u16 biter, u32 doff, u32 dlast_sga,
430 u16 csr)
431{ 424{
425 struct fsl_edma_engine *edma = fsl_chan->edma;
432 void __iomem *addr = fsl_chan->edma->membase; 426 void __iomem *addr = fsl_chan->edma->membase;
433 u32 ch = fsl_chan->vchan.chan.chan_id; 427 u32 ch = fsl_chan->vchan.chan.chan_id;
434 428
435 /* 429 /*
436 * TCD parameters have been swapped in fill_tcd_params(), 430 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
437 * so just write them to registers in the cpu endian here 431 * endian format. However, we need to load the TCD registers in
432 * big- or little-endian obeying the eDMA engine model endian.
438 */ 433 */
439 writew(0, addr + EDMA_TCD_CSR(ch)); 434 edma_writew(edma, 0, addr + EDMA_TCD_CSR(ch));
440 writel(src, addr + EDMA_TCD_SADDR(ch)); 435 edma_writel(edma, le32_to_cpu(tcd->saddr), addr + EDMA_TCD_SADDR(ch));
441 writel(dst, addr + EDMA_TCD_DADDR(ch)); 436 edma_writel(edma, le32_to_cpu(tcd->daddr), addr + EDMA_TCD_DADDR(ch));
442 writew(attr, addr + EDMA_TCD_ATTR(ch)); 437
443 writew(soff, addr + EDMA_TCD_SOFF(ch)); 438 edma_writew(edma, le16_to_cpu(tcd->attr), addr + EDMA_TCD_ATTR(ch));
444 writel(nbytes, addr + EDMA_TCD_NBYTES(ch)); 439 edma_writew(edma, le16_to_cpu(tcd->soff), addr + EDMA_TCD_SOFF(ch));
445 writel(slast, addr + EDMA_TCD_SLAST(ch)); 440
446 writew(citer, addr + EDMA_TCD_CITER(ch)); 441 edma_writel(edma, le32_to_cpu(tcd->nbytes), addr + EDMA_TCD_NBYTES(ch));
447 writew(biter, addr + EDMA_TCD_BITER(ch)); 442 edma_writel(edma, le32_to_cpu(tcd->slast), addr + EDMA_TCD_SLAST(ch));
448 writew(doff, addr + EDMA_TCD_DOFF(ch)); 443
449 writel(dlast_sga, addr + EDMA_TCD_DLAST_SGA(ch)); 444 edma_writew(edma, le16_to_cpu(tcd->citer), addr + EDMA_TCD_CITER(ch));
450 writew(csr, addr + EDMA_TCD_CSR(ch)); 445 edma_writew(edma, le16_to_cpu(tcd->biter), addr + EDMA_TCD_BITER(ch));
451} 446 edma_writew(edma, le16_to_cpu(tcd->doff), addr + EDMA_TCD_DOFF(ch));
452 447
453static void fill_tcd_params(struct fsl_edma_engine *edma, 448 edma_writel(edma, le32_to_cpu(tcd->dlast_sga), addr + EDMA_TCD_DLAST_SGA(ch));
454 struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst, 449
455 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer, 450 edma_writew(edma, le16_to_cpu(tcd->csr), addr + EDMA_TCD_CSR(ch));
456 u16 biter, u16 doff, u32 dlast_sga, bool major_int, 451}
457 bool disable_req, bool enable_sg) 452
453static inline
454void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
455 u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
456 u16 biter, u16 doff, u32 dlast_sga, bool major_int,
457 bool disable_req, bool enable_sg)
458{ 458{
459 u16 csr = 0; 459 u16 csr = 0;
460 460
461 /* 461 /*
462 * eDMA hardware SGs require the TCD parameters stored in memory 462 * eDMA hardware SGs require the TCDs to be stored in little
463 * the same endian as the eDMA module so that they can be loaded 463 * endian format irrespective of the register endian model.
464 * automatically by the engine 464 * So we put the value in little endian in memory, waiting
465 * for fsl_edma_set_tcd_regs doing the swap.
465 */ 466 */
466 edma_writel(edma, src, &(tcd->saddr)); 467 tcd->saddr = cpu_to_le32(src);
467 edma_writel(edma, dst, &(tcd->daddr)); 468 tcd->daddr = cpu_to_le32(dst);
468 edma_writew(edma, attr, &(tcd->attr)); 469
469 edma_writew(edma, EDMA_TCD_SOFF_SOFF(soff), &(tcd->soff)); 470 tcd->attr = cpu_to_le16(attr);
470 edma_writel(edma, EDMA_TCD_NBYTES_NBYTES(nbytes), &(tcd->nbytes)); 471
471 edma_writel(edma, EDMA_TCD_SLAST_SLAST(slast), &(tcd->slast)); 472 tcd->soff = cpu_to_le16(EDMA_TCD_SOFF_SOFF(soff));
472 edma_writew(edma, EDMA_TCD_CITER_CITER(citer), &(tcd->citer)); 473
473 edma_writew(edma, EDMA_TCD_DOFF_DOFF(doff), &(tcd->doff)); 474 tcd->nbytes = cpu_to_le32(EDMA_TCD_NBYTES_NBYTES(nbytes));
474 edma_writel(edma, EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga), &(tcd->dlast_sga)); 475 tcd->slast = cpu_to_le32(EDMA_TCD_SLAST_SLAST(slast));
475 edma_writew(edma, EDMA_TCD_BITER_BITER(biter), &(tcd->biter)); 476
477 tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
478 tcd->doff = cpu_to_le16(EDMA_TCD_DOFF_DOFF(doff));
479
480 tcd->dlast_sga = cpu_to_le32(EDMA_TCD_DLAST_SGA_DLAST_SGA(dlast_sga));
481
482 tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
476 if (major_int) 483 if (major_int)
477 csr |= EDMA_TCD_CSR_INT_MAJOR; 484 csr |= EDMA_TCD_CSR_INT_MAJOR;
478 485
@@ -482,7 +489,7 @@ static void fill_tcd_params(struct fsl_edma_engine *edma,
482 if (enable_sg) 489 if (enable_sg)
483 csr |= EDMA_TCD_CSR_E_SG; 490 csr |= EDMA_TCD_CSR_E_SG;
484 491
485 edma_writew(edma, csr, &(tcd->csr)); 492 tcd->csr = cpu_to_le16(csr);
486} 493}
487 494
488static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan, 495static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
@@ -558,9 +565,9 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
558 doff = fsl_chan->fsc.addr_width; 565 doff = fsl_chan->fsc.addr_width;
559 } 566 }
560 567
561 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, src_addr, 568 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
562 dst_addr, fsl_chan->fsc.attr, soff, nbytes, 0, 569 fsl_chan->fsc.attr, soff, nbytes, 0, iter,
563 iter, iter, doff, last_sg, true, false, true); 570 iter, doff, last_sg, true, false, true);
564 dma_buf_next += period_len; 571 dma_buf_next += period_len;
565 } 572 }
566 573
@@ -607,16 +614,16 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
607 iter = sg_dma_len(sg) / nbytes; 614 iter = sg_dma_len(sg) / nbytes;
608 if (i < sg_len - 1) { 615 if (i < sg_len - 1) {
609 last_sg = fsl_desc->tcd[(i + 1)].ptcd; 616 last_sg = fsl_desc->tcd[(i + 1)].ptcd;
610 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, 617 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
611 src_addr, dst_addr, fsl_chan->fsc.attr, 618 dst_addr, fsl_chan->fsc.attr, soff,
612 soff, nbytes, 0, iter, iter, doff, last_sg, 619 nbytes, 0, iter, iter, doff, last_sg,
613 false, false, true); 620 false, false, true);
614 } else { 621 } else {
615 last_sg = 0; 622 last_sg = 0;
616 fill_tcd_params(fsl_chan->edma, fsl_desc->tcd[i].vtcd, 623 fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
617 src_addr, dst_addr, fsl_chan->fsc.attr, 624 dst_addr, fsl_chan->fsc.attr, soff,
618 soff, nbytes, 0, iter, iter, doff, last_sg, 625 nbytes, 0, iter, iter, doff, last_sg,
619 true, true, false); 626 true, true, false);
620 } 627 }
621 } 628 }
622 629
@@ -625,17 +632,13 @@ static struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
625 632
626static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan) 633static void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
627{ 634{
628 struct fsl_edma_hw_tcd *tcd;
629 struct virt_dma_desc *vdesc; 635 struct virt_dma_desc *vdesc;
630 636
631 vdesc = vchan_next_desc(&fsl_chan->vchan); 637 vdesc = vchan_next_desc(&fsl_chan->vchan);
632 if (!vdesc) 638 if (!vdesc)
633 return; 639 return;
634 fsl_chan->edesc = to_fsl_edma_desc(vdesc); 640 fsl_chan->edesc = to_fsl_edma_desc(vdesc);
635 tcd = fsl_chan->edesc->tcd[0].vtcd; 641 fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
636 fsl_edma_set_tcd_params(fsl_chan, tcd->saddr, tcd->daddr, tcd->attr,
637 tcd->soff, tcd->nbytes, tcd->slast, tcd->citer,
638 tcd->biter, tcd->doff, tcd->dlast_sga, tcd->csr);
639 fsl_edma_enable_request(fsl_chan); 642 fsl_edma_enable_request(fsl_chan);
640 fsl_chan->status = DMA_IN_PROGRESS; 643 fsl_chan->status = DMA_IN_PROGRESS;
641} 644}
diff --git a/drivers/dma/fsldma.c b/drivers/dma/fsldma.c
index 994bcb2c6b92..3d8feb5e4c2f 100644
--- a/drivers/dma/fsldma.c
+++ b/drivers/dma/fsldma.c
@@ -1337,7 +1337,6 @@ static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1337 1337
1338 /* Add the channel to DMA device channel list */ 1338 /* Add the channel to DMA device channel list */
1339 list_add_tail(&chan->common.device_node, &fdev->common.channels); 1339 list_add_tail(&chan->common.device_node, &fdev->common.channels);
1340 fdev->common.chancnt++;
1341 1340
1342 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible, 1341 dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1343 chan->irq != NO_IRQ ? chan->irq : fdev->irq); 1342 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 88afc48c2ca7..d0df198f62e9 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -729,6 +729,7 @@ static void sdma_get_pc(struct sdma_channel *sdmac,
729 case IMX_DMATYPE_CSPI: 729 case IMX_DMATYPE_CSPI:
730 case IMX_DMATYPE_EXT: 730 case IMX_DMATYPE_EXT:
731 case IMX_DMATYPE_SSI: 731 case IMX_DMATYPE_SSI:
732 case IMX_DMATYPE_SAI:
732 per_2_emi = sdma->script_addrs->app_2_mcu_addr; 733 per_2_emi = sdma->script_addrs->app_2_mcu_addr;
733 emi_2_per = sdma->script_addrs->mcu_2_app_addr; 734 emi_2_per = sdma->script_addrs->mcu_2_app_addr;
734 break; 735 break;
@@ -1287,7 +1288,8 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
1287 unsigned short *ram_code; 1288 unsigned short *ram_code;
1288 1289
1289 if (!fw) { 1290 if (!fw) {
1290 dev_err(sdma->dev, "firmware not found\n"); 1291 dev_info(sdma->dev, "external firmware not found, using ROM firmware\n");
1292 /* In this case we just use the ROM firmware. */
1291 return; 1293 return;
1292 } 1294 }
1293 1295
@@ -1346,7 +1348,7 @@ static int sdma_get_firmware(struct sdma_engine *sdma,
1346 return ret; 1348 return ret;
1347} 1349}
1348 1350
1349static int __init sdma_init(struct sdma_engine *sdma) 1351static int sdma_init(struct sdma_engine *sdma)
1350{ 1352{
1351 int i, ret; 1353 int i, ret;
1352 dma_addr_t ccb_phys; 1354 dma_addr_t ccb_phys;
diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c
index 895f869d6c2c..32eae38291e5 100644
--- a/drivers/dma/ioat/dma_v3.c
+++ b/drivers/dma/ioat/dma_v3.c
@@ -1265,9 +1265,17 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1265 op = IOAT_OP_XOR; 1265 op = IOAT_OP_XOR;
1266 1266
1267 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE); 1267 dest_dma = dma_map_page(dev, dest, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1268 if (dma_mapping_error(dev, dest_dma))
1269 goto dma_unmap;
1270
1268 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1271 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1272 dma_srcs[i] = DMA_ERROR_CODE;
1273 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) {
1269 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE, 1274 dma_srcs[i] = dma_map_page(dev, xor_srcs[i], 0, PAGE_SIZE,
1270 DMA_TO_DEVICE); 1275 DMA_TO_DEVICE);
1276 if (dma_mapping_error(dev, dma_srcs[i]))
1277 goto dma_unmap;
1278 }
1271 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs, 1279 tx = dma->device_prep_dma_xor(dma_chan, dest_dma, dma_srcs,
1272 IOAT_NUM_SRC_TEST, PAGE_SIZE, 1280 IOAT_NUM_SRC_TEST, PAGE_SIZE,
1273 DMA_PREP_INTERRUPT); 1281 DMA_PREP_INTERRUPT);
@@ -1298,7 +1306,6 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1298 goto dma_unmap; 1306 goto dma_unmap;
1299 } 1307 }
1300 1308
1301 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1302 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1309 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1303 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE); 1310 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, DMA_TO_DEVICE);
1304 1311
@@ -1313,6 +1320,8 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1313 } 1320 }
1314 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1321 dma_sync_single_for_device(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1315 1322
1323 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE);
1324
1316 /* skip validate if the capability is not present */ 1325 /* skip validate if the capability is not present */
1317 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask)) 1326 if (!dma_has_cap(DMA_XOR_VAL, dma_chan->device->cap_mask))
1318 goto free_resources; 1327 goto free_resources;
@@ -1327,8 +1336,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1327 xor_val_result = 1; 1336 xor_val_result = 1;
1328 1337
1329 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1338 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1339 dma_srcs[i] = DMA_ERROR_CODE;
1340 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
1330 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1341 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1331 DMA_TO_DEVICE); 1342 DMA_TO_DEVICE);
1343 if (dma_mapping_error(dev, dma_srcs[i]))
1344 goto dma_unmap;
1345 }
1332 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1346 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1333 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1347 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1334 &xor_val_result, DMA_PREP_INTERRUPT); 1348 &xor_val_result, DMA_PREP_INTERRUPT);
@@ -1374,8 +1388,13 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1374 1388
1375 xor_val_result = 0; 1389 xor_val_result = 0;
1376 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1390 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1391 dma_srcs[i] = DMA_ERROR_CODE;
1392 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) {
1377 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE, 1393 dma_srcs[i] = dma_map_page(dev, xor_val_srcs[i], 0, PAGE_SIZE,
1378 DMA_TO_DEVICE); 1394 DMA_TO_DEVICE);
1395 if (dma_mapping_error(dev, dma_srcs[i]))
1396 goto dma_unmap;
1397 }
1379 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs, 1398 tx = dma->device_prep_dma_xor_val(dma_chan, dma_srcs,
1380 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE, 1399 IOAT_NUM_SRC_TEST + 1, PAGE_SIZE,
1381 &xor_val_result, DMA_PREP_INTERRUPT); 1400 &xor_val_result, DMA_PREP_INTERRUPT);
@@ -1417,14 +1436,18 @@ static int ioat_xor_val_self_test(struct ioatdma_device *device)
1417 goto free_resources; 1436 goto free_resources;
1418dma_unmap: 1437dma_unmap:
1419 if (op == IOAT_OP_XOR) { 1438 if (op == IOAT_OP_XOR) {
1420 dma_unmap_page(dev, dest_dma, PAGE_SIZE, DMA_FROM_DEVICE); 1439 if (dest_dma != DMA_ERROR_CODE)
1440 dma_unmap_page(dev, dest_dma, PAGE_SIZE,
1441 DMA_FROM_DEVICE);
1421 for (i = 0; i < IOAT_NUM_SRC_TEST; i++) 1442 for (i = 0; i < IOAT_NUM_SRC_TEST; i++)
1422 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1443 if (dma_srcs[i] != DMA_ERROR_CODE)
1423 DMA_TO_DEVICE); 1444 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1445 DMA_TO_DEVICE);
1424 } else if (op == IOAT_OP_XOR_VAL) { 1446 } else if (op == IOAT_OP_XOR_VAL) {
1425 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++) 1447 for (i = 0; i < IOAT_NUM_SRC_TEST + 1; i++)
1426 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE, 1448 if (dma_srcs[i] != DMA_ERROR_CODE)
1427 DMA_TO_DEVICE); 1449 dma_unmap_page(dev, dma_srcs[i], PAGE_SIZE,
1450 DMA_TO_DEVICE);
1428 } 1451 }
1429free_resources: 1452free_resources:
1430 dma->device_free_chan_resources(dma_chan); 1453 dma->device_free_chan_resources(dma_chan);
diff --git a/drivers/dma/iop-adma.c b/drivers/dma/iop-adma.c
index c56137bc3868..263d9f6a207e 100644
--- a/drivers/dma/iop-adma.c
+++ b/drivers/dma/iop-adma.c
@@ -1557,7 +1557,6 @@ static struct platform_driver iop_adma_driver = {
1557 .probe = iop_adma_probe, 1557 .probe = iop_adma_probe,
1558 .remove = iop_adma_remove, 1558 .remove = iop_adma_remove,
1559 .driver = { 1559 .driver = {
1560 .owner = THIS_MODULE,
1561 .name = "iop-adma", 1560 .name = "iop-adma",
1562 }, 1561 },
1563}; 1562};
diff --git a/drivers/dma/k3dma.c b/drivers/dma/k3dma.c
index a1f911aaf220..a1de14ab2c51 100644
--- a/drivers/dma/k3dma.c
+++ b/drivers/dma/k3dma.c
@@ -722,7 +722,6 @@ static int k3_dma_probe(struct platform_device *op)
722 d->slave.device_issue_pending = k3_dma_issue_pending; 722 d->slave.device_issue_pending = k3_dma_issue_pending;
723 d->slave.device_control = k3_dma_control; 723 d->slave.device_control = k3_dma_control;
724 d->slave.copy_align = DMA_ALIGN; 724 d->slave.copy_align = DMA_ALIGN;
725 d->slave.chancnt = d->dma_requests;
726 725
727 /* init virtual channel */ 726 /* init virtual channel */
728 d->chans = devm_kzalloc(&op->dev, 727 d->chans = devm_kzalloc(&op->dev,
@@ -787,6 +786,7 @@ static int k3_dma_remove(struct platform_device *op)
787 return 0; 786 return 0;
788} 787}
789 788
789#ifdef CONFIG_PM_SLEEP
790static int k3_dma_suspend(struct device *dev) 790static int k3_dma_suspend(struct device *dev)
791{ 791{
792 struct k3_dma_dev *d = dev_get_drvdata(dev); 792 struct k3_dma_dev *d = dev_get_drvdata(dev);
@@ -816,13 +816,13 @@ static int k3_dma_resume(struct device *dev)
816 k3_dma_enable_dma(d, true); 816 k3_dma_enable_dma(d, true);
817 return 0; 817 return 0;
818} 818}
819#endif
819 820
820static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume); 821static SIMPLE_DEV_PM_OPS(k3_dma_pmops, k3_dma_suspend, k3_dma_resume);
821 822
822static struct platform_driver k3_pdma_driver = { 823static struct platform_driver k3_pdma_driver = {
823 .driver = { 824 .driver = {
824 .name = DRIVER_NAME, 825 .name = DRIVER_NAME,
825 .owner = THIS_MODULE,
826 .pm = &k3_dma_pmops, 826 .pm = &k3_dma_pmops,
827 .of_match_table = k3_pdma_dt_ids, 827 .of_match_table = k3_pdma_dt_ids,
828 }, 828 },
diff --git a/drivers/dma/mmp_pdma.c b/drivers/dma/mmp_pdma.c
index a1a4db5721b8..8b8952f35e6c 100644
--- a/drivers/dma/mmp_pdma.c
+++ b/drivers/dma/mmp_pdma.c
@@ -1098,7 +1098,6 @@ static const struct platform_device_id mmp_pdma_id_table[] = {
1098static struct platform_driver mmp_pdma_driver = { 1098static struct platform_driver mmp_pdma_driver = {
1099 .driver = { 1099 .driver = {
1100 .name = "mmp-pdma", 1100 .name = "mmp-pdma",
1101 .owner = THIS_MODULE,
1102 .of_match_table = mmp_pdma_dt_ids, 1101 .of_match_table = mmp_pdma_dt_ids,
1103 }, 1102 },
1104 .id_table = mmp_pdma_id_table, 1103 .id_table = mmp_pdma_id_table,
diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
index c6bd015b7165..bfb46957c3dc 100644
--- a/drivers/dma/mmp_tdma.c
+++ b/drivers/dma/mmp_tdma.c
@@ -703,7 +703,6 @@ static const struct platform_device_id mmp_tdma_id_table[] = {
703static struct platform_driver mmp_tdma_driver = { 703static struct platform_driver mmp_tdma_driver = {
704 .driver = { 704 .driver = {
705 .name = "mmp-tdma", 705 .name = "mmp-tdma",
706 .owner = THIS_MODULE,
707 .of_match_table = mmp_tdma_dt_ids, 706 .of_match_table = mmp_tdma_dt_ids,
708 }, 707 },
709 .id_table = mmp_tdma_id_table, 708 .id_table = mmp_tdma_id_table,
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 881db2bcb48b..01bec4023de2 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -885,6 +885,7 @@ static int mpc_dma_probe(struct platform_device *op)
885 struct resource res; 885 struct resource res;
886 ulong regs_start, regs_size; 886 ulong regs_start, regs_size;
887 int retval, i; 887 int retval, i;
888 u8 chancnt;
888 889
889 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL); 890 mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
890 if (!mdma) { 891 if (!mdma) {
@@ -956,10 +957,6 @@ static int mpc_dma_probe(struct platform_device *op)
956 957
957 dma = &mdma->dma; 958 dma = &mdma->dma;
958 dma->dev = dev; 959 dma->dev = dev;
959 if (mdma->is_mpc8308)
960 dma->chancnt = MPC8308_DMACHAN_MAX;
961 else
962 dma->chancnt = MPC512x_DMACHAN_MAX;
963 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources; 960 dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
964 dma->device_free_chan_resources = mpc_dma_free_chan_resources; 961 dma->device_free_chan_resources = mpc_dma_free_chan_resources;
965 dma->device_issue_pending = mpc_dma_issue_pending; 962 dma->device_issue_pending = mpc_dma_issue_pending;
@@ -972,7 +969,12 @@ static int mpc_dma_probe(struct platform_device *op)
972 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 969 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
973 dma_cap_set(DMA_SLAVE, dma->cap_mask); 970 dma_cap_set(DMA_SLAVE, dma->cap_mask);
974 971
975 for (i = 0; i < dma->chancnt; i++) { 972 if (mdma->is_mpc8308)
973 chancnt = MPC8308_DMACHAN_MAX;
974 else
975 chancnt = MPC512x_DMACHAN_MAX;
976
977 for (i = 0; i < chancnt; i++) {
976 mchan = &mdma->channels[i]; 978 mchan = &mdma->channels[i];
977 979
978 mchan->chan.device = dma; 980 mchan->chan.device = dma;
@@ -1090,7 +1092,6 @@ static struct platform_driver mpc_dma_driver = {
1090 .remove = mpc_dma_remove, 1092 .remove = mpc_dma_remove,
1091 .driver = { 1093 .driver = {
1092 .name = DRV_NAME, 1094 .name = DRV_NAME,
1093 .owner = THIS_MODULE,
1094 .of_match_table = mpc_dma_match, 1095 .of_match_table = mpc_dma_match,
1095 }, 1096 },
1096}; 1097};
diff --git a/drivers/dma/nbpfaxi.c b/drivers/dma/nbpfaxi.c
index bda20e6e1007..d7d61e1a01c3 100644
--- a/drivers/dma/nbpfaxi.c
+++ b/drivers/dma/nbpfaxi.c
@@ -1500,7 +1500,6 @@ static const struct dev_pm_ops nbpf_pm_ops = {
1500 1500
1501static struct platform_driver nbpf_driver = { 1501static struct platform_driver nbpf_driver = {
1502 .driver = { 1502 .driver = {
1503 .owner = THIS_MODULE,
1504 .name = "dma-nbpf", 1503 .name = "dma-nbpf",
1505 .of_match_table = nbpf_match, 1504 .of_match_table = nbpf_match,
1506 .pm = &nbpf_pm_ops, 1505 .pm = &nbpf_pm_ops,
diff --git a/drivers/dma/omap-dma.c b/drivers/dma/omap-dma.c
index bbea8243f9e8..6ea1aded7e74 100644
--- a/drivers/dma/omap-dma.c
+++ b/drivers/dma/omap-dma.c
@@ -1074,8 +1074,6 @@ static int omap_dma_chan_init(struct omap_dmadev *od, int dma_sig)
1074 vchan_init(&c->vc, &od->ddev); 1074 vchan_init(&c->vc, &od->ddev);
1075 INIT_LIST_HEAD(&c->node); 1075 INIT_LIST_HEAD(&c->node);
1076 1076
1077 od->ddev.chancnt++;
1078
1079 return 0; 1077 return 0;
1080} 1078}
1081 1079
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 9f9ca9fe5ce6..6e0e47d76b23 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -997,7 +997,7 @@ static void pch_dma_remove(struct pci_dev *pdev)
997#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810 997#define PCI_DEVICE_ID_ML7831_DMA1_8CH 0x8810
998#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815 998#define PCI_DEVICE_ID_ML7831_DMA2_4CH 0x8815
999 999
1000const struct pci_device_id pch_dma_id_table[] = { 1000static const struct pci_device_id pch_dma_id_table[] = {
1001 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 }, 1001 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_8CH), 8 },
1002 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 }, 1002 { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_EG20T_PCH_DMA_4CH), 4 },
1003 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */ 1003 { PCI_VDEVICE(ROHM, PCI_DEVICE_ID_ML7213_DMA1_8CH), 8}, /* UART Video */
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 19a99743cf52..bdf40b530032 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -27,6 +27,7 @@
27#include <linux/of.h> 27#include <linux/of.h>
28#include <linux/of_dma.h> 28#include <linux/of_dma.h>
29#include <linux/err.h> 29#include <linux/err.h>
30#include <linux/pm_runtime.h>
30 31
31#include "dmaengine.h" 32#include "dmaengine.h"
32#define PL330_MAX_CHAN 8 33#define PL330_MAX_CHAN 8
@@ -265,6 +266,9 @@ static unsigned cmd_line;
265 266
266#define NR_DEFAULT_DESC 16 267#define NR_DEFAULT_DESC 16
267 268
269/* Delay for runtime PM autosuspend, ms */
270#define PL330_AUTOSUSPEND_DELAY 20
271
268/* Populated by the PL330 core driver for DMA API driver's info */ 272/* Populated by the PL330 core driver for DMA API driver's info */
269struct pl330_config { 273struct pl330_config {
270 u32 periph_id; 274 u32 periph_id;
@@ -1958,6 +1962,7 @@ static void pl330_tasklet(unsigned long data)
1958 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data; 1962 struct dma_pl330_chan *pch = (struct dma_pl330_chan *)data;
1959 struct dma_pl330_desc *desc, *_dt; 1963 struct dma_pl330_desc *desc, *_dt;
1960 unsigned long flags; 1964 unsigned long flags;
1965 bool power_down = false;
1961 1966
1962 spin_lock_irqsave(&pch->lock, flags); 1967 spin_lock_irqsave(&pch->lock, flags);
1963 1968
@@ -1972,10 +1977,17 @@ static void pl330_tasklet(unsigned long data)
1972 /* Try to submit a req imm. next to the last completed cookie */ 1977 /* Try to submit a req imm. next to the last completed cookie */
1973 fill_queue(pch); 1978 fill_queue(pch);
1974 1979
1975 /* Make sure the PL330 Channel thread is active */ 1980 if (list_empty(&pch->work_list)) {
1976 spin_lock(&pch->thread->dmac->lock); 1981 spin_lock(&pch->thread->dmac->lock);
1977 _start(pch->thread); 1982 _stop(pch->thread);
1978 spin_unlock(&pch->thread->dmac->lock); 1983 spin_unlock(&pch->thread->dmac->lock);
1984 power_down = true;
1985 } else {
1986 /* Make sure the PL330 Channel thread is active */
1987 spin_lock(&pch->thread->dmac->lock);
1988 _start(pch->thread);
1989 spin_unlock(&pch->thread->dmac->lock);
1990 }
1979 1991
1980 while (!list_empty(&pch->completed_list)) { 1992 while (!list_empty(&pch->completed_list)) {
1981 dma_async_tx_callback callback; 1993 dma_async_tx_callback callback;
@@ -1990,6 +2002,12 @@ static void pl330_tasklet(unsigned long data)
1990 if (pch->cyclic) { 2002 if (pch->cyclic) {
1991 desc->status = PREP; 2003 desc->status = PREP;
1992 list_move_tail(&desc->node, &pch->work_list); 2004 list_move_tail(&desc->node, &pch->work_list);
2005 if (power_down) {
2006 spin_lock(&pch->thread->dmac->lock);
2007 _start(pch->thread);
2008 spin_unlock(&pch->thread->dmac->lock);
2009 power_down = false;
2010 }
1993 } else { 2011 } else {
1994 desc->status = FREE; 2012 desc->status = FREE;
1995 list_move_tail(&desc->node, &pch->dmac->desc_pool); 2013 list_move_tail(&desc->node, &pch->dmac->desc_pool);
@@ -2004,6 +2022,12 @@ static void pl330_tasklet(unsigned long data)
2004 } 2022 }
2005 } 2023 }
2006 spin_unlock_irqrestore(&pch->lock, flags); 2024 spin_unlock_irqrestore(&pch->lock, flags);
2025
2026 /* If work list empty, power down */
2027 if (power_down) {
2028 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2029 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2030 }
2007} 2031}
2008 2032
2009bool pl330_filter(struct dma_chan *chan, void *param) 2033bool pl330_filter(struct dma_chan *chan, void *param)
@@ -2073,6 +2097,7 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
2073 2097
2074 switch (cmd) { 2098 switch (cmd) {
2075 case DMA_TERMINATE_ALL: 2099 case DMA_TERMINATE_ALL:
2100 pm_runtime_get_sync(pl330->ddma.dev);
2076 spin_lock_irqsave(&pch->lock, flags); 2101 spin_lock_irqsave(&pch->lock, flags);
2077 2102
2078 spin_lock(&pl330->lock); 2103 spin_lock(&pl330->lock);
@@ -2099,10 +2124,15 @@ static int pl330_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned
2099 dma_cookie_complete(&desc->txd); 2124 dma_cookie_complete(&desc->txd);
2100 } 2125 }
2101 2126
2127 if (!list_empty(&pch->work_list))
2128 pm_runtime_put(pl330->ddma.dev);
2129
2102 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool); 2130 list_splice_tail_init(&pch->submitted_list, &pl330->desc_pool);
2103 list_splice_tail_init(&pch->work_list, &pl330->desc_pool); 2131 list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
2104 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool); 2132 list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
2105 spin_unlock_irqrestore(&pch->lock, flags); 2133 spin_unlock_irqrestore(&pch->lock, flags);
2134 pm_runtime_mark_last_busy(pl330->ddma.dev);
2135 pm_runtime_put_autosuspend(pl330->ddma.dev);
2106 break; 2136 break;
2107 case DMA_SLAVE_CONFIG: 2137 case DMA_SLAVE_CONFIG:
2108 slave_config = (struct dma_slave_config *)arg; 2138 slave_config = (struct dma_slave_config *)arg;
@@ -2138,6 +2168,7 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2138 2168
2139 tasklet_kill(&pch->task); 2169 tasklet_kill(&pch->task);
2140 2170
2171 pm_runtime_get_sync(pch->dmac->ddma.dev);
2141 spin_lock_irqsave(&pch->lock, flags); 2172 spin_lock_irqsave(&pch->lock, flags);
2142 2173
2143 pl330_release_channel(pch->thread); 2174 pl330_release_channel(pch->thread);
@@ -2147,6 +2178,8 @@ static void pl330_free_chan_resources(struct dma_chan *chan)
2147 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); 2178 list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool);
2148 2179
2149 spin_unlock_irqrestore(&pch->lock, flags); 2180 spin_unlock_irqrestore(&pch->lock, flags);
2181 pm_runtime_mark_last_busy(pch->dmac->ddma.dev);
2182 pm_runtime_put_autosuspend(pch->dmac->ddma.dev);
2150} 2183}
2151 2184
2152static enum dma_status 2185static enum dma_status
@@ -2162,6 +2195,15 @@ static void pl330_issue_pending(struct dma_chan *chan)
2162 unsigned long flags; 2195 unsigned long flags;
2163 2196
2164 spin_lock_irqsave(&pch->lock, flags); 2197 spin_lock_irqsave(&pch->lock, flags);
2198 if (list_empty(&pch->work_list)) {
2199 /*
2200 * Warn on nothing pending. Empty submitted_list may
2201 * break our pm_runtime usage counter as it is
2202 * updated on work_list emptiness status.
2203 */
2204 WARN_ON(list_empty(&pch->submitted_list));
2205 pm_runtime_get_sync(pch->dmac->ddma.dev);
2206 }
2165 list_splice_tail_init(&pch->submitted_list, &pch->work_list); 2207 list_splice_tail_init(&pch->submitted_list, &pch->work_list);
2166 spin_unlock_irqrestore(&pch->lock, flags); 2208 spin_unlock_irqrestore(&pch->lock, flags);
2167 2209
@@ -2594,6 +2636,46 @@ static int pl330_dma_device_slave_caps(struct dma_chan *dchan,
2594 return 0; 2636 return 0;
2595} 2637}
2596 2638
2639/*
2640 * Runtime PM callbacks are provided by amba/bus.c driver.
2641 *
2642 * It is assumed here that IRQ safe runtime PM is chosen in probe and amba
2643 * bus driver will only disable/enable the clock in runtime PM callbacks.
2644 */
2645static int __maybe_unused pl330_suspend(struct device *dev)
2646{
2647 struct amba_device *pcdev = to_amba_device(dev);
2648
2649 pm_runtime_disable(dev);
2650
2651 if (!pm_runtime_status_suspended(dev)) {
2652 /* amba did not disable the clock */
2653 amba_pclk_disable(pcdev);
2654 }
2655 amba_pclk_unprepare(pcdev);
2656
2657 return 0;
2658}
2659
2660static int __maybe_unused pl330_resume(struct device *dev)
2661{
2662 struct amba_device *pcdev = to_amba_device(dev);
2663 int ret;
2664
2665 ret = amba_pclk_prepare(pcdev);
2666 if (ret)
2667 return ret;
2668
2669 if (!pm_runtime_status_suspended(dev))
2670 ret = amba_pclk_enable(pcdev);
2671
2672 pm_runtime_enable(dev);
2673
2674 return ret;
2675}
2676
2677static SIMPLE_DEV_PM_OPS(pl330_pm, pl330_suspend, pl330_resume);
2678
2597static int 2679static int
2598pl330_probe(struct amba_device *adev, const struct amba_id *id) 2680pl330_probe(struct amba_device *adev, const struct amba_id *id)
2599{ 2681{
@@ -2619,6 +2701,9 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2619 return -ENOMEM; 2701 return -ENOMEM;
2620 } 2702 }
2621 2703
2704 pd = &pl330->ddma;
2705 pd->dev = &adev->dev;
2706
2622 pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; 2707 pl330->mcbufsz = pdat ? pdat->mcbuf_sz : 0;
2623 2708
2624 res = &adev->res; 2709 res = &adev->res;
@@ -2655,7 +2740,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2655 if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC)) 2740 if (!add_desc(pl330, GFP_KERNEL, NR_DEFAULT_DESC))
2656 dev_warn(&adev->dev, "unable to allocate desc\n"); 2741 dev_warn(&adev->dev, "unable to allocate desc\n");
2657 2742
2658 pd = &pl330->ddma;
2659 INIT_LIST_HEAD(&pd->channels); 2743 INIT_LIST_HEAD(&pd->channels);
2660 2744
2661 /* Initialize channel parameters */ 2745 /* Initialize channel parameters */
@@ -2692,7 +2776,6 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2692 list_add_tail(&pch->chan.device_node, &pd->channels); 2776 list_add_tail(&pch->chan.device_node, &pd->channels);
2693 } 2777 }
2694 2778
2695 pd->dev = &adev->dev;
2696 if (pdat) { 2779 if (pdat) {
2697 pd->cap_mask = pdat->cap_mask; 2780 pd->cap_mask = pdat->cap_mask;
2698 } else { 2781 } else {
@@ -2747,6 +2830,12 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2747 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, 2830 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
2748 pcfg->num_peri, pcfg->num_events); 2831 pcfg->num_peri, pcfg->num_events);
2749 2832
2833 pm_runtime_irq_safe(&adev->dev);
2834 pm_runtime_use_autosuspend(&adev->dev);
2835 pm_runtime_set_autosuspend_delay(&adev->dev, PL330_AUTOSUSPEND_DELAY);
2836 pm_runtime_mark_last_busy(&adev->dev);
2837 pm_runtime_put_autosuspend(&adev->dev);
2838
2750 return 0; 2839 return 0;
2751probe_err3: 2840probe_err3:
2752 /* Idle the DMAC */ 2841 /* Idle the DMAC */
@@ -2773,6 +2862,8 @@ static int pl330_remove(struct amba_device *adev)
2773 struct pl330_dmac *pl330 = amba_get_drvdata(adev); 2862 struct pl330_dmac *pl330 = amba_get_drvdata(adev);
2774 struct dma_pl330_chan *pch, *_p; 2863 struct dma_pl330_chan *pch, *_p;
2775 2864
2865 pm_runtime_get_noresume(pl330->ddma.dev);
2866
2776 if (adev->dev.of_node) 2867 if (adev->dev.of_node)
2777 of_dma_controller_free(adev->dev.of_node); 2868 of_dma_controller_free(adev->dev.of_node);
2778 2869
@@ -2811,6 +2902,7 @@ static struct amba_driver pl330_driver = {
2811 .drv = { 2902 .drv = {
2812 .owner = THIS_MODULE, 2903 .owner = THIS_MODULE,
2813 .name = "dma-pl330", 2904 .name = "dma-pl330",
2905 .pm = &pl330_pm,
2814 }, 2906 },
2815 .id_table = pl330_ids, 2907 .id_table = pl330_ids,
2816 .probe = pl330_probe, 2908 .probe = pl330_probe,
@@ -2819,6 +2911,6 @@ static struct amba_driver pl330_driver = {
2819 2911
2820module_amba_driver(pl330_driver); 2912module_amba_driver(pl330_driver);
2821 2913
2822MODULE_AUTHOR("Jaswinder Singh <jassi.brar@samsung.com>"); 2914MODULE_AUTHOR("Jaswinder Singh <jassisinghbrar@gmail.com>");
2823MODULE_DESCRIPTION("API Driver for PL330 DMAC"); 2915MODULE_DESCRIPTION("API Driver for PL330 DMAC");
2824MODULE_LICENSE("GPL"); 2916MODULE_LICENSE("GPL");
diff --git a/drivers/dma/qcom_bam_dma.c b/drivers/dma/qcom_bam_dma.c
index 7a4bbb0f80a5..3122a99ec06b 100644
--- a/drivers/dma/qcom_bam_dma.c
+++ b/drivers/dma/qcom_bam_dma.c
@@ -79,35 +79,97 @@ struct bam_async_desc {
79 struct bam_desc_hw desc[0]; 79 struct bam_desc_hw desc[0];
80}; 80};
81 81
82#define BAM_CTRL 0x0000 82enum bam_reg {
83#define BAM_REVISION 0x0004 83 BAM_CTRL,
84#define BAM_SW_REVISION 0x0080 84 BAM_REVISION,
85#define BAM_NUM_PIPES 0x003C 85 BAM_NUM_PIPES,
86#define BAM_TIMER 0x0040 86 BAM_DESC_CNT_TRSHLD,
87#define BAM_TIMER_CTRL 0x0044 87 BAM_IRQ_SRCS,
88#define BAM_DESC_CNT_TRSHLD 0x0008 88 BAM_IRQ_SRCS_MSK,
89#define BAM_IRQ_SRCS 0x000C 89 BAM_IRQ_SRCS_UNMASKED,
90#define BAM_IRQ_SRCS_MSK 0x0010 90 BAM_IRQ_STTS,
91#define BAM_IRQ_SRCS_UNMASKED 0x0030 91 BAM_IRQ_CLR,
92#define BAM_IRQ_STTS 0x0014 92 BAM_IRQ_EN,
93#define BAM_IRQ_CLR 0x0018 93 BAM_CNFG_BITS,
94#define BAM_IRQ_EN 0x001C 94 BAM_IRQ_SRCS_EE,
95#define BAM_CNFG_BITS 0x007C 95 BAM_IRQ_SRCS_MSK_EE,
96#define BAM_IRQ_SRCS_EE(ee) (0x0800 + ((ee) * 0x80)) 96 BAM_P_CTRL,
97#define BAM_IRQ_SRCS_MSK_EE(ee) (0x0804 + ((ee) * 0x80)) 97 BAM_P_RST,
98#define BAM_P_CTRL(pipe) (0x1000 + ((pipe) * 0x1000)) 98 BAM_P_HALT,
99#define BAM_P_RST(pipe) (0x1004 + ((pipe) * 0x1000)) 99 BAM_P_IRQ_STTS,
100#define BAM_P_HALT(pipe) (0x1008 + ((pipe) * 0x1000)) 100 BAM_P_IRQ_CLR,
101#define BAM_P_IRQ_STTS(pipe) (0x1010 + ((pipe) * 0x1000)) 101 BAM_P_IRQ_EN,
102#define BAM_P_IRQ_CLR(pipe) (0x1014 + ((pipe) * 0x1000)) 102 BAM_P_EVNT_DEST_ADDR,
103#define BAM_P_IRQ_EN(pipe) (0x1018 + ((pipe) * 0x1000)) 103 BAM_P_EVNT_REG,
104#define BAM_P_EVNT_DEST_ADDR(pipe) (0x182C + ((pipe) * 0x1000)) 104 BAM_P_SW_OFSTS,
105#define BAM_P_EVNT_REG(pipe) (0x1818 + ((pipe) * 0x1000)) 105 BAM_P_DATA_FIFO_ADDR,
106#define BAM_P_SW_OFSTS(pipe) (0x1800 + ((pipe) * 0x1000)) 106 BAM_P_DESC_FIFO_ADDR,
107#define BAM_P_DATA_FIFO_ADDR(pipe) (0x1824 + ((pipe) * 0x1000)) 107 BAM_P_EVNT_GEN_TRSHLD,
108#define BAM_P_DESC_FIFO_ADDR(pipe) (0x181C + ((pipe) * 0x1000)) 108 BAM_P_FIFO_SIZES,
109#define BAM_P_EVNT_TRSHLD(pipe) (0x1828 + ((pipe) * 0x1000)) 109};
110#define BAM_P_FIFO_SIZES(pipe) (0x1820 + ((pipe) * 0x1000)) 110
111struct reg_offset_data {
112 u32 base_offset;
113 unsigned int pipe_mult, evnt_mult, ee_mult;
114};
115
116static const struct reg_offset_data bam_v1_3_reg_info[] = {
117 [BAM_CTRL] = { 0x0F80, 0x00, 0x00, 0x00 },
118 [BAM_REVISION] = { 0x0F84, 0x00, 0x00, 0x00 },
119 [BAM_NUM_PIPES] = { 0x0FBC, 0x00, 0x00, 0x00 },
120 [BAM_DESC_CNT_TRSHLD] = { 0x0F88, 0x00, 0x00, 0x00 },
121 [BAM_IRQ_SRCS] = { 0x0F8C, 0x00, 0x00, 0x00 },
122 [BAM_IRQ_SRCS_MSK] = { 0x0F90, 0x00, 0x00, 0x00 },
123 [BAM_IRQ_SRCS_UNMASKED] = { 0x0FB0, 0x00, 0x00, 0x00 },
124 [BAM_IRQ_STTS] = { 0x0F94, 0x00, 0x00, 0x00 },
125 [BAM_IRQ_CLR] = { 0x0F98, 0x00, 0x00, 0x00 },
126 [BAM_IRQ_EN] = { 0x0F9C, 0x00, 0x00, 0x00 },
127 [BAM_CNFG_BITS] = { 0x0FFC, 0x00, 0x00, 0x00 },
128 [BAM_IRQ_SRCS_EE] = { 0x1800, 0x00, 0x00, 0x80 },
129 [BAM_IRQ_SRCS_MSK_EE] = { 0x1804, 0x00, 0x00, 0x80 },
130 [BAM_P_CTRL] = { 0x0000, 0x80, 0x00, 0x00 },
131 [BAM_P_RST] = { 0x0004, 0x80, 0x00, 0x00 },
132 [BAM_P_HALT] = { 0x0008, 0x80, 0x00, 0x00 },
133 [BAM_P_IRQ_STTS] = { 0x0010, 0x80, 0x00, 0x00 },
134 [BAM_P_IRQ_CLR] = { 0x0014, 0x80, 0x00, 0x00 },
135 [BAM_P_IRQ_EN] = { 0x0018, 0x80, 0x00, 0x00 },
136 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x40, 0x00 },
137 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x40, 0x00 },
138 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x40, 0x00 },
139 [BAM_P_DATA_FIFO_ADDR] = { 0x1024, 0x00, 0x40, 0x00 },
140 [BAM_P_DESC_FIFO_ADDR] = { 0x101C, 0x00, 0x40, 0x00 },
141 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1028, 0x00, 0x40, 0x00 },
142 [BAM_P_FIFO_SIZES] = { 0x1020, 0x00, 0x40, 0x00 },
143};
144
145static const struct reg_offset_data bam_v1_4_reg_info[] = {
146 [BAM_CTRL] = { 0x0000, 0x00, 0x00, 0x00 },
147 [BAM_REVISION] = { 0x0004, 0x00, 0x00, 0x00 },
148 [BAM_NUM_PIPES] = { 0x003C, 0x00, 0x00, 0x00 },
149 [BAM_DESC_CNT_TRSHLD] = { 0x0008, 0x00, 0x00, 0x00 },
150 [BAM_IRQ_SRCS] = { 0x000C, 0x00, 0x00, 0x00 },
151 [BAM_IRQ_SRCS_MSK] = { 0x0010, 0x00, 0x00, 0x00 },
152 [BAM_IRQ_SRCS_UNMASKED] = { 0x0030, 0x00, 0x00, 0x00 },
153 [BAM_IRQ_STTS] = { 0x0014, 0x00, 0x00, 0x00 },
154 [BAM_IRQ_CLR] = { 0x0018, 0x00, 0x00, 0x00 },
155 [BAM_IRQ_EN] = { 0x001C, 0x00, 0x00, 0x00 },
156 [BAM_CNFG_BITS] = { 0x007C, 0x00, 0x00, 0x00 },
157 [BAM_IRQ_SRCS_EE] = { 0x0800, 0x00, 0x00, 0x80 },
158 [BAM_IRQ_SRCS_MSK_EE] = { 0x0804, 0x00, 0x00, 0x80 },
159 [BAM_P_CTRL] = { 0x1000, 0x1000, 0x00, 0x00 },
160 [BAM_P_RST] = { 0x1004, 0x1000, 0x00, 0x00 },
161 [BAM_P_HALT] = { 0x1008, 0x1000, 0x00, 0x00 },
162 [BAM_P_IRQ_STTS] = { 0x1010, 0x1000, 0x00, 0x00 },
163 [BAM_P_IRQ_CLR] = { 0x1014, 0x1000, 0x00, 0x00 },
164 [BAM_P_IRQ_EN] = { 0x1018, 0x1000, 0x00, 0x00 },
165 [BAM_P_EVNT_DEST_ADDR] = { 0x102C, 0x00, 0x1000, 0x00 },
166 [BAM_P_EVNT_REG] = { 0x1018, 0x00, 0x1000, 0x00 },
167 [BAM_P_SW_OFSTS] = { 0x1000, 0x00, 0x1000, 0x00 },
168 [BAM_P_DATA_FIFO_ADDR] = { 0x1824, 0x00, 0x1000, 0x00 },
169 [BAM_P_DESC_FIFO_ADDR] = { 0x181C, 0x00, 0x1000, 0x00 },
170 [BAM_P_EVNT_GEN_TRSHLD] = { 0x1828, 0x00, 0x1000, 0x00 },
171 [BAM_P_FIFO_SIZES] = { 0x1820, 0x00, 0x1000, 0x00 },
172};
111 173
112/* BAM CTRL */ 174/* BAM CTRL */
113#define BAM_SW_RST BIT(0) 175#define BAM_SW_RST BIT(0)
@@ -297,6 +359,8 @@ struct bam_device {
297 /* execution environment ID, from DT */ 359 /* execution environment ID, from DT */
298 u32 ee; 360 u32 ee;
299 361
362 const struct reg_offset_data *layout;
363
300 struct clk *bamclk; 364 struct clk *bamclk;
301 int irq; 365 int irq;
302 366
@@ -305,6 +369,23 @@ struct bam_device {
305}; 369};
306 370
307/** 371/**
372 * bam_addr - returns BAM register address
373 * @bdev: bam device
374 * @pipe: pipe instance (ignored when register doesn't have multiple instances)
375 * @reg: register enum
376 */
377static inline void __iomem *bam_addr(struct bam_device *bdev, u32 pipe,
378 enum bam_reg reg)
379{
380 const struct reg_offset_data r = bdev->layout[reg];
381
382 return bdev->regs + r.base_offset +
383 r.pipe_mult * pipe +
384 r.evnt_mult * pipe +
385 r.ee_mult * bdev->ee;
386}
387
388/**
308 * bam_reset_channel - Reset individual BAM DMA channel 389 * bam_reset_channel - Reset individual BAM DMA channel
309 * @bchan: bam channel 390 * @bchan: bam channel
310 * 391 *
@@ -317,8 +398,8 @@ static void bam_reset_channel(struct bam_chan *bchan)
317 lockdep_assert_held(&bchan->vc.lock); 398 lockdep_assert_held(&bchan->vc.lock);
318 399
319 /* reset channel */ 400 /* reset channel */
320 writel_relaxed(1, bdev->regs + BAM_P_RST(bchan->id)); 401 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_RST));
321 writel_relaxed(0, bdev->regs + BAM_P_RST(bchan->id)); 402 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_RST));
322 403
323 /* don't allow cpu to reorder BAM register accesses done after this */ 404 /* don't allow cpu to reorder BAM register accesses done after this */
324 wmb(); 405 wmb();
@@ -347,17 +428,18 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
347 * because we allocated 1 more descriptor (8 bytes) than we can use 428 * because we allocated 1 more descriptor (8 bytes) than we can use
348 */ 429 */
349 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)), 430 writel_relaxed(ALIGN(bchan->fifo_phys, sizeof(struct bam_desc_hw)),
350 bdev->regs + BAM_P_DESC_FIFO_ADDR(bchan->id)); 431 bam_addr(bdev, bchan->id, BAM_P_DESC_FIFO_ADDR));
351 writel_relaxed(BAM_DESC_FIFO_SIZE, bdev->regs + 432 writel_relaxed(BAM_DESC_FIFO_SIZE,
352 BAM_P_FIFO_SIZES(bchan->id)); 433 bam_addr(bdev, bchan->id, BAM_P_FIFO_SIZES));
353 434
354 /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */ 435 /* enable the per pipe interrupts, enable EOT, ERR, and INT irqs */
355 writel_relaxed(P_DEFAULT_IRQS_EN, bdev->regs + BAM_P_IRQ_EN(bchan->id)); 436 writel_relaxed(P_DEFAULT_IRQS_EN,
437 bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
356 438
357 /* unmask the specific pipe and EE combo */ 439 /* unmask the specific pipe and EE combo */
358 val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 440 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
359 val |= BIT(bchan->id); 441 val |= BIT(bchan->id);
360 writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 442 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
361 443
362 /* don't allow cpu to reorder the channel enable done below */ 444 /* don't allow cpu to reorder the channel enable done below */
363 wmb(); 445 wmb();
@@ -367,7 +449,7 @@ static void bam_chan_init_hw(struct bam_chan *bchan,
367 if (dir == DMA_DEV_TO_MEM) 449 if (dir == DMA_DEV_TO_MEM)
368 val |= P_DIRECTION; 450 val |= P_DIRECTION;
369 451
370 writel_relaxed(val, bdev->regs + BAM_P_CTRL(bchan->id)); 452 writel_relaxed(val, bam_addr(bdev, bchan->id, BAM_P_CTRL));
371 453
372 bchan->initialized = 1; 454 bchan->initialized = 1;
373 455
@@ -432,12 +514,12 @@ static void bam_free_chan(struct dma_chan *chan)
432 bchan->fifo_virt = NULL; 514 bchan->fifo_virt = NULL;
433 515
434 /* mask irq for pipe/channel */ 516 /* mask irq for pipe/channel */
435 val = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 517 val = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
436 val &= ~BIT(bchan->id); 518 val &= ~BIT(bchan->id);
437 writel_relaxed(val, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 519 writel_relaxed(val, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
438 520
439 /* disable irq */ 521 /* disable irq */
440 writel_relaxed(0, bdev->regs + BAM_P_IRQ_EN(bchan->id)); 522 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_IRQ_EN));
441} 523}
442 524
443/** 525/**
@@ -583,14 +665,14 @@ static int bam_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
583 switch (cmd) { 665 switch (cmd) {
584 case DMA_PAUSE: 666 case DMA_PAUSE:
585 spin_lock_irqsave(&bchan->vc.lock, flag); 667 spin_lock_irqsave(&bchan->vc.lock, flag);
586 writel_relaxed(1, bdev->regs + BAM_P_HALT(bchan->id)); 668 writel_relaxed(1, bam_addr(bdev, bchan->id, BAM_P_HALT));
587 bchan->paused = 1; 669 bchan->paused = 1;
588 spin_unlock_irqrestore(&bchan->vc.lock, flag); 670 spin_unlock_irqrestore(&bchan->vc.lock, flag);
589 break; 671 break;
590 672
591 case DMA_RESUME: 673 case DMA_RESUME:
592 spin_lock_irqsave(&bchan->vc.lock, flag); 674 spin_lock_irqsave(&bchan->vc.lock, flag);
593 writel_relaxed(0, bdev->regs + BAM_P_HALT(bchan->id)); 675 writel_relaxed(0, bam_addr(bdev, bchan->id, BAM_P_HALT));
594 bchan->paused = 0; 676 bchan->paused = 0;
595 spin_unlock_irqrestore(&bchan->vc.lock, flag); 677 spin_unlock_irqrestore(&bchan->vc.lock, flag);
596 break; 678 break;
@@ -626,7 +708,7 @@ static u32 process_channel_irqs(struct bam_device *bdev)
626 unsigned long flags; 708 unsigned long flags;
627 struct bam_async_desc *async_desc; 709 struct bam_async_desc *async_desc;
628 710
629 srcs = readl_relaxed(bdev->regs + BAM_IRQ_SRCS_EE(bdev->ee)); 711 srcs = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_SRCS_EE));
630 712
631 /* return early if no pipe/channel interrupts are present */ 713 /* return early if no pipe/channel interrupts are present */
632 if (!(srcs & P_IRQ)) 714 if (!(srcs & P_IRQ))
@@ -639,11 +721,9 @@ static u32 process_channel_irqs(struct bam_device *bdev)
639 continue; 721 continue;
640 722
641 /* clear pipe irq */ 723 /* clear pipe irq */
642 pipe_stts = readl_relaxed(bdev->regs + 724 pipe_stts = readl_relaxed(bam_addr(bdev, i, BAM_P_IRQ_STTS));
643 BAM_P_IRQ_STTS(i));
644 725
645 writel_relaxed(pipe_stts, bdev->regs + 726 writel_relaxed(pipe_stts, bam_addr(bdev, i, BAM_P_IRQ_CLR));
646 BAM_P_IRQ_CLR(i));
647 727
648 spin_lock_irqsave(&bchan->vc.lock, flags); 728 spin_lock_irqsave(&bchan->vc.lock, flags);
649 async_desc = bchan->curr_txd; 729 async_desc = bchan->curr_txd;
@@ -694,12 +774,12 @@ static irqreturn_t bam_dma_irq(int irq, void *data)
694 tasklet_schedule(&bdev->task); 774 tasklet_schedule(&bdev->task);
695 775
696 if (srcs & BAM_IRQ) 776 if (srcs & BAM_IRQ)
697 clr_mask = readl_relaxed(bdev->regs + BAM_IRQ_STTS); 777 clr_mask = readl_relaxed(bam_addr(bdev, 0, BAM_IRQ_STTS));
698 778
699 /* don't allow reorder of the various accesses to the BAM registers */ 779 /* don't allow reorder of the various accesses to the BAM registers */
700 mb(); 780 mb();
701 781
702 writel_relaxed(clr_mask, bdev->regs + BAM_IRQ_CLR); 782 writel_relaxed(clr_mask, bam_addr(bdev, 0, BAM_IRQ_CLR));
703 783
704 return IRQ_HANDLED; 784 return IRQ_HANDLED;
705} 785}
@@ -763,7 +843,7 @@ static void bam_apply_new_config(struct bam_chan *bchan,
763 else 843 else
764 maxburst = bchan->slave.dst_maxburst; 844 maxburst = bchan->slave.dst_maxburst;
765 845
766 writel_relaxed(maxburst, bdev->regs + BAM_DESC_CNT_TRSHLD); 846 writel_relaxed(maxburst, bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
767 847
768 bchan->reconfigure = 0; 848 bchan->reconfigure = 0;
769} 849}
@@ -830,7 +910,7 @@ static void bam_start_dma(struct bam_chan *bchan)
830 /* ensure descriptor writes and dma start not reordered */ 910 /* ensure descriptor writes and dma start not reordered */
831 wmb(); 911 wmb();
832 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw), 912 writel_relaxed(bchan->tail * sizeof(struct bam_desc_hw),
833 bdev->regs + BAM_P_EVNT_REG(bchan->id)); 913 bam_addr(bdev, bchan->id, BAM_P_EVNT_REG));
834} 914}
835 915
836/** 916/**
@@ -918,43 +998,44 @@ static int bam_init(struct bam_device *bdev)
918 u32 val; 998 u32 val;
919 999
920 /* read revision and configuration information */ 1000 /* read revision and configuration information */
921 val = readl_relaxed(bdev->regs + BAM_REVISION) >> NUM_EES_SHIFT; 1001 val = readl_relaxed(bam_addr(bdev, 0, BAM_REVISION)) >> NUM_EES_SHIFT;
922 val &= NUM_EES_MASK; 1002 val &= NUM_EES_MASK;
923 1003
924 /* check that configured EE is within range */ 1004 /* check that configured EE is within range */
925 if (bdev->ee >= val) 1005 if (bdev->ee >= val)
926 return -EINVAL; 1006 return -EINVAL;
927 1007
928 val = readl_relaxed(bdev->regs + BAM_NUM_PIPES); 1008 val = readl_relaxed(bam_addr(bdev, 0, BAM_NUM_PIPES));
929 bdev->num_channels = val & BAM_NUM_PIPES_MASK; 1009 bdev->num_channels = val & BAM_NUM_PIPES_MASK;
930 1010
931 /* s/w reset bam */ 1011 /* s/w reset bam */
932 /* after reset all pipes are disabled and idle */ 1012 /* after reset all pipes are disabled and idle */
933 val = readl_relaxed(bdev->regs + BAM_CTRL); 1013 val = readl_relaxed(bam_addr(bdev, 0, BAM_CTRL));
934 val |= BAM_SW_RST; 1014 val |= BAM_SW_RST;
935 writel_relaxed(val, bdev->regs + BAM_CTRL); 1015 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
936 val &= ~BAM_SW_RST; 1016 val &= ~BAM_SW_RST;
937 writel_relaxed(val, bdev->regs + BAM_CTRL); 1017 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
938 1018
939 /* make sure previous stores are visible before enabling BAM */ 1019 /* make sure previous stores are visible before enabling BAM */
940 wmb(); 1020 wmb();
941 1021
942 /* enable bam */ 1022 /* enable bam */
943 val |= BAM_EN; 1023 val |= BAM_EN;
944 writel_relaxed(val, bdev->regs + BAM_CTRL); 1024 writel_relaxed(val, bam_addr(bdev, 0, BAM_CTRL));
945 1025
946 /* set descriptor threshhold, start with 4 bytes */ 1026 /* set descriptor threshhold, start with 4 bytes */
947 writel_relaxed(DEFAULT_CNT_THRSHLD, bdev->regs + BAM_DESC_CNT_TRSHLD); 1027 writel_relaxed(DEFAULT_CNT_THRSHLD,
1028 bam_addr(bdev, 0, BAM_DESC_CNT_TRSHLD));
948 1029
949 /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */ 1030 /* Enable default set of h/w workarounds, ie all except BAM_FULL_PIPE */
950 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bdev->regs + BAM_CNFG_BITS); 1031 writel_relaxed(BAM_CNFG_BITS_DEFAULT, bam_addr(bdev, 0, BAM_CNFG_BITS));
951 1032
952 /* enable irqs for errors */ 1033 /* enable irqs for errors */
953 writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN, 1034 writel_relaxed(BAM_ERROR_EN | BAM_HRESP_ERR_EN,
954 bdev->regs + BAM_IRQ_EN); 1035 bam_addr(bdev, 0, BAM_IRQ_EN));
955 1036
956 /* unmask global bam interrupt */ 1037 /* unmask global bam interrupt */
957 writel_relaxed(BAM_IRQ_MSK, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 1038 writel_relaxed(BAM_IRQ_MSK, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
958 1039
959 return 0; 1040 return 0;
960} 1041}
@@ -969,9 +1050,18 @@ static void bam_channel_init(struct bam_device *bdev, struct bam_chan *bchan,
969 bchan->vc.desc_free = bam_dma_free_desc; 1050 bchan->vc.desc_free = bam_dma_free_desc;
970} 1051}
971 1052
1053static const struct of_device_id bam_of_match[] = {
1054 { .compatible = "qcom,bam-v1.3.0", .data = &bam_v1_3_reg_info },
1055 { .compatible = "qcom,bam-v1.4.0", .data = &bam_v1_4_reg_info },
1056 {}
1057};
1058
1059MODULE_DEVICE_TABLE(of, bam_of_match);
1060
972static int bam_dma_probe(struct platform_device *pdev) 1061static int bam_dma_probe(struct platform_device *pdev)
973{ 1062{
974 struct bam_device *bdev; 1063 struct bam_device *bdev;
1064 const struct of_device_id *match;
975 struct resource *iores; 1065 struct resource *iores;
976 int ret, i; 1066 int ret, i;
977 1067
@@ -981,6 +1071,14 @@ static int bam_dma_probe(struct platform_device *pdev)
981 1071
982 bdev->dev = &pdev->dev; 1072 bdev->dev = &pdev->dev;
983 1073
1074 match = of_match_node(bam_of_match, pdev->dev.of_node);
1075 if (!match) {
1076 dev_err(&pdev->dev, "Unsupported BAM module\n");
1077 return -ENODEV;
1078 }
1079
1080 bdev->layout = match->data;
1081
984 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1082 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
985 bdev->regs = devm_ioremap_resource(&pdev->dev, iores); 1083 bdev->regs = devm_ioremap_resource(&pdev->dev, iores);
986 if (IS_ERR(bdev->regs)) 1084 if (IS_ERR(bdev->regs))
@@ -1084,7 +1182,7 @@ static int bam_dma_remove(struct platform_device *pdev)
1084 dma_async_device_unregister(&bdev->common); 1182 dma_async_device_unregister(&bdev->common);
1085 1183
1086 /* mask all interrupts for this execution environment */ 1184 /* mask all interrupts for this execution environment */
1087 writel_relaxed(0, bdev->regs + BAM_IRQ_SRCS_MSK_EE(bdev->ee)); 1185 writel_relaxed(0, bam_addr(bdev, 0, BAM_IRQ_SRCS_MSK_EE));
1088 1186
1089 devm_free_irq(bdev->dev, bdev->irq, bdev); 1187 devm_free_irq(bdev->dev, bdev->irq, bdev);
1090 1188
@@ -1104,18 +1202,11 @@ static int bam_dma_remove(struct platform_device *pdev)
1104 return 0; 1202 return 0;
1105} 1203}
1106 1204
1107static const struct of_device_id bam_of_match[] = {
1108 { .compatible = "qcom,bam-v1.4.0", },
1109 {}
1110};
1111MODULE_DEVICE_TABLE(of, bam_of_match);
1112
1113static struct platform_driver bam_dma_driver = { 1205static struct platform_driver bam_dma_driver = {
1114 .probe = bam_dma_probe, 1206 .probe = bam_dma_probe,
1115 .remove = bam_dma_remove, 1207 .remove = bam_dma_remove,
1116 .driver = { 1208 .driver = {
1117 .name = "bam-dma-engine", 1209 .name = "bam-dma-engine",
1118 .owner = THIS_MODULE,
1119 .of_match_table = bam_of_match, 1210 .of_match_table = bam_of_match,
1120 }, 1211 },
1121}; 1212};
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 7416572d1e40..6941a77521c3 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -1402,7 +1402,6 @@ static int s3c24xx_dma_remove(struct platform_device *pdev)
1402static struct platform_driver s3c24xx_dma_driver = { 1402static struct platform_driver s3c24xx_dma_driver = {
1403 .driver = { 1403 .driver = {
1404 .name = "s3c24xx-dma", 1404 .name = "s3c24xx-dma",
1405 .owner = THIS_MODULE,
1406 }, 1405 },
1407 .id_table = s3c24xx_dma_driver_ids, 1406 .id_table = s3c24xx_dma_driver_ids,
1408 .probe = s3c24xx_dma_probe, 1407 .probe = s3c24xx_dma_probe,
diff --git a/drivers/dma/sa11x0-dma.c b/drivers/dma/sa11x0-dma.c
index 4b0ef043729a..2329d295efb5 100644
--- a/drivers/dma/sa11x0-dma.c
+++ b/drivers/dma/sa11x0-dma.c
@@ -829,7 +829,6 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
829{ 829{
830 unsigned i; 830 unsigned i;
831 831
832 dmadev->chancnt = ARRAY_SIZE(chan_desc);
833 INIT_LIST_HEAD(&dmadev->channels); 832 INIT_LIST_HEAD(&dmadev->channels);
834 dmadev->dev = dev; 833 dmadev->dev = dev;
835 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources; 834 dmadev->device_alloc_chan_resources = sa11x0_dma_alloc_chan_resources;
@@ -838,7 +837,7 @@ static int sa11x0_dma_init_dmadev(struct dma_device *dmadev,
838 dmadev->device_tx_status = sa11x0_dma_tx_status; 837 dmadev->device_tx_status = sa11x0_dma_tx_status;
839 dmadev->device_issue_pending = sa11x0_dma_issue_pending; 838 dmadev->device_issue_pending = sa11x0_dma_issue_pending;
840 839
841 for (i = 0; i < dmadev->chancnt; i++) { 840 for (i = 0; i < ARRAY_SIZE(chan_desc); i++) {
842 struct sa11x0_dma_chan *c; 841 struct sa11x0_dma_chan *c;
843 842
844 c = kzalloc(sizeof(*c), GFP_KERNEL); 843 c = kzalloc(sizeof(*c), GFP_KERNEL);
diff --git a/drivers/dma/sh/rcar-audmapp.c b/drivers/dma/sh/rcar-audmapp.c
index 80fd2aeb4870..d95bbdd721f4 100644
--- a/drivers/dma/sh/rcar-audmapp.c
+++ b/drivers/dma/sh/rcar-audmapp.c
@@ -253,7 +253,6 @@ static int audmapp_chan_probe(struct platform_device *pdev,
253 253
254static void audmapp_chan_remove(struct audmapp_device *audev) 254static void audmapp_chan_remove(struct audmapp_device *audev)
255{ 255{
256 struct dma_device *dma_dev = &audev->shdma_dev.dma_dev;
257 struct shdma_chan *schan; 256 struct shdma_chan *schan;
258 int i; 257 int i;
259 258
@@ -261,7 +260,6 @@ static void audmapp_chan_remove(struct audmapp_device *audev)
261 BUG_ON(!schan); 260 BUG_ON(!schan);
262 shdma_chan_remove(schan); 261 shdma_chan_remove(schan);
263 } 262 }
264 dma_dev->chancnt = 0;
265} 263}
266 264
267static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec, 265static struct dma_chan *audmapp_of_xlate(struct of_phandle_args *dma_spec,
@@ -367,7 +365,6 @@ static struct platform_driver audmapp_driver = {
367 .probe = audmapp_probe, 365 .probe = audmapp_probe,
368 .remove = audmapp_remove, 366 .remove = audmapp_remove,
369 .driver = { 367 .driver = {
370 .owner = THIS_MODULE,
371 .name = "rcar-audmapp-engine", 368 .name = "rcar-audmapp-engine",
372 .of_match_table = audmapp_of_match, 369 .of_match_table = audmapp_of_match,
373 }, 370 },
diff --git a/drivers/dma/sh/rcar-hpbdma.c b/drivers/dma/sh/rcar-hpbdma.c
index b212d9471ab5..20a6f6f2a018 100644
--- a/drivers/dma/sh/rcar-hpbdma.c
+++ b/drivers/dma/sh/rcar-hpbdma.c
@@ -619,7 +619,6 @@ error:
619 619
620static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev) 620static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
621{ 621{
622 struct dma_device *dma_dev = &hpbdev->shdma_dev.dma_dev;
623 struct shdma_chan *schan; 622 struct shdma_chan *schan;
624 int i; 623 int i;
625 624
@@ -628,7 +627,6 @@ static void hpb_dmae_chan_remove(struct hpb_dmae_device *hpbdev)
628 627
629 shdma_chan_remove(schan); 628 shdma_chan_remove(schan);
630 } 629 }
631 dma_dev->chancnt = 0;
632} 630}
633 631
634static int hpb_dmae_remove(struct platform_device *pdev) 632static int hpb_dmae_remove(struct platform_device *pdev)
@@ -655,7 +653,6 @@ static struct platform_driver hpb_dmae_driver = {
655 .remove = hpb_dmae_remove, 653 .remove = hpb_dmae_remove,
656 .shutdown = hpb_dmae_shutdown, 654 .shutdown = hpb_dmae_shutdown,
657 .driver = { 655 .driver = {
658 .owner = THIS_MODULE,
659 .name = "hpb-dma-engine", 656 .name = "hpb-dma-engine",
660 }, 657 },
661}; 658};
diff --git a/drivers/dma/sh/shdma-base.c b/drivers/dma/sh/shdma-base.c
index 42d497416196..3a2adb131d46 100644
--- a/drivers/dma/sh/shdma-base.c
+++ b/drivers/dma/sh/shdma-base.c
@@ -391,6 +391,8 @@ static dma_async_tx_callback __ld_cleanup(struct shdma_chan *schan, bool all)
391 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id); 391 dev_dbg(schan->dev, "Bring down channel %d\n", schan->id);
392 pm_runtime_put(schan->dev); 392 pm_runtime_put(schan->dev);
393 schan->pm_state = SHDMA_PM_ESTABLISHED; 393 schan->pm_state = SHDMA_PM_ESTABLISHED;
394 } else if (schan->pm_state == SHDMA_PM_PENDING) {
395 shdma_chan_xfer_ld_queue(schan);
394 } 396 }
395 } 397 }
396 } 398 }
@@ -951,7 +953,7 @@ void shdma_chan_probe(struct shdma_dev *sdev,
951 /* Add the channel to DMA device channel list */ 953 /* Add the channel to DMA device channel list */
952 list_add_tail(&schan->dma_chan.device_node, 954 list_add_tail(&schan->dma_chan.device_node,
953 &sdev->dma_dev.channels); 955 &sdev->dma_dev.channels);
954 sdev->schan[sdev->dma_dev.chancnt++] = schan; 956 sdev->schan[id] = schan;
955} 957}
956EXPORT_SYMBOL(shdma_chan_probe); 958EXPORT_SYMBOL(shdma_chan_probe);
957 959
diff --git a/drivers/dma/sh/shdma-of.c b/drivers/dma/sh/shdma-of.c
index b4ff9d3e56d1..f999f9b0d314 100644
--- a/drivers/dma/sh/shdma-of.c
+++ b/drivers/dma/sh/shdma-of.c
@@ -66,7 +66,6 @@ MODULE_DEVICE_TABLE(of, sh_dmae_of_match);
66 66
67static struct platform_driver shdma_of = { 67static struct platform_driver shdma_of = {
68 .driver = { 68 .driver = {
69 .owner = THIS_MODULE,
70 .name = "shdma-of", 69 .name = "shdma-of",
71 .of_match_table = shdma_of_match, 70 .of_match_table = shdma_of_match,
72 }, 71 },
diff --git a/drivers/dma/sh/shdmac.c b/drivers/dma/sh/shdmac.c
index 58eb85770eba..b65317c6ea4e 100644
--- a/drivers/dma/sh/shdmac.c
+++ b/drivers/dma/sh/shdmac.c
@@ -572,7 +572,6 @@ err_no_irq:
572 572
573static void sh_dmae_chan_remove(struct sh_dmae_device *shdev) 573static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
574{ 574{
575 struct dma_device *dma_dev = &shdev->shdma_dev.dma_dev;
576 struct shdma_chan *schan; 575 struct shdma_chan *schan;
577 int i; 576 int i;
578 577
@@ -581,7 +580,6 @@ static void sh_dmae_chan_remove(struct sh_dmae_device *shdev)
581 580
582 shdma_chan_remove(schan); 581 shdma_chan_remove(schan);
583 } 582 }
584 dma_dev->chancnt = 0;
585} 583}
586 584
587static void sh_dmae_shutdown(struct platform_device *pdev) 585static void sh_dmae_shutdown(struct platform_device *pdev)
diff --git a/drivers/dma/sh/sudmac.c b/drivers/dma/sh/sudmac.c
index 3ce103909896..6da2eaa6c294 100644
--- a/drivers/dma/sh/sudmac.c
+++ b/drivers/dma/sh/sudmac.c
@@ -295,7 +295,6 @@ err_no_irq:
295 295
296static void sudmac_chan_remove(struct sudmac_device *su_dev) 296static void sudmac_chan_remove(struct sudmac_device *su_dev)
297{ 297{
298 struct dma_device *dma_dev = &su_dev->shdma_dev.dma_dev;
299 struct shdma_chan *schan; 298 struct shdma_chan *schan;
300 int i; 299 int i;
301 300
@@ -304,7 +303,6 @@ static void sudmac_chan_remove(struct sudmac_device *su_dev)
304 303
305 shdma_chan_remove(schan); 304 shdma_chan_remove(schan);
306 } 305 }
307 dma_dev->chancnt = 0;
308} 306}
309 307
310static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan) 308static dma_addr_t sudmac_slave_addr(struct shdma_chan *schan)
@@ -411,7 +409,6 @@ static int sudmac_remove(struct platform_device *pdev)
411 409
412static struct platform_driver sudmac_driver = { 410static struct platform_driver sudmac_driver = {
413 .driver = { 411 .driver = {
414 .owner = THIS_MODULE,
415 .name = SUDMAC_DRV_NAME, 412 .name = SUDMAC_DRV_NAME,
416 }, 413 },
417 .probe = sudmac_probe, 414 .probe = sudmac_probe,
diff --git a/drivers/dma/sirf-dma.c b/drivers/dma/sirf-dma.c
index aac03ab10c54..feb1e8ab8d7b 100644
--- a/drivers/dma/sirf-dma.c
+++ b/drivers/dma/sirf-dma.c
@@ -735,7 +735,6 @@ static int sirfsoc_dma_probe(struct platform_device *op)
735 735
736 dma = &sdma->dma; 736 dma = &sdma->dma;
737 dma->dev = dev; 737 dma->dev = dev;
738 dma->chancnt = SIRFSOC_DMA_CHANNELS;
739 738
740 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources; 739 dma->device_alloc_chan_resources = sirfsoc_dma_alloc_chan_resources;
741 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources; 740 dma->device_free_chan_resources = sirfsoc_dma_free_chan_resources;
@@ -752,7 +751,7 @@ static int sirfsoc_dma_probe(struct platform_device *op)
752 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask); 751 dma_cap_set(DMA_INTERLEAVE, dma->cap_mask);
753 dma_cap_set(DMA_PRIVATE, dma->cap_mask); 752 dma_cap_set(DMA_PRIVATE, dma->cap_mask);
754 753
755 for (i = 0; i < dma->chancnt; i++) { 754 for (i = 0; i < SIRFSOC_DMA_CHANNELS; i++) {
756 schan = &sdma->channels[i]; 755 schan = &sdma->channels[i];
757 756
758 schan->chan.device = dma; 757 schan->chan.device = dma;
@@ -835,6 +834,7 @@ static int sirfsoc_dma_runtime_resume(struct device *dev)
835 return 0; 834 return 0;
836} 835}
837 836
837#ifdef CONFIG_PM_SLEEP
838static int sirfsoc_dma_pm_suspend(struct device *dev) 838static int sirfsoc_dma_pm_suspend(struct device *dev)
839{ 839{
840 struct sirfsoc_dma *sdma = dev_get_drvdata(dev); 840 struct sirfsoc_dma *sdma = dev_get_drvdata(dev);
@@ -916,6 +916,7 @@ static int sirfsoc_dma_pm_resume(struct device *dev)
916 916
917 return 0; 917 return 0;
918} 918}
919#endif
919 920
920static const struct dev_pm_ops sirfsoc_dma_pm_ops = { 921static const struct dev_pm_ops sirfsoc_dma_pm_ops = {
921 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL) 922 SET_RUNTIME_PM_OPS(sirfsoc_dma_runtime_suspend, sirfsoc_dma_runtime_resume, NULL)
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index d9ca3e32d748..4d0710648b08 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -3432,6 +3432,7 @@ static int __init d40_lcla_allocate(struct d40_base *base)
3432 3432
3433 d40_err(base->dev, "Failed to allocate %d pages.\n", 3433 d40_err(base->dev, "Failed to allocate %d pages.\n",
3434 base->lcla_pool.pages); 3434 base->lcla_pool.pages);
3435 ret = -ENOMEM;
3435 3436
3436 for (j = 0; j < i; j++) 3437 for (j = 0; j < i; j++)
3437 free_pages(page_list[j], base->lcla_pool.pages); 3438 free_pages(page_list[j], base->lcla_pool.pages);
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 91292f5513ff..159f1736a16f 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -18,6 +18,7 @@
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/of_dma.h> 20#include <linux/of_dma.h>
21#include <linux/of_device.h>
21#include <linux/platform_device.h> 22#include <linux/platform_device.h>
22#include <linux/reset.h> 23#include <linux/reset.h>
23#include <linux/slab.h> 24#include <linux/slab.h>
@@ -26,24 +27,6 @@
26#include "virt-dma.h" 27#include "virt-dma.h"
27 28
28/* 29/*
29 * There's 16 physical channels that can work in parallel.
30 *
31 * However we have 30 different endpoints for our requests.
32 *
33 * Since the channels are able to handle only an unidirectional
34 * transfer, we need to allocate more virtual channels so that
35 * everyone can grab one channel.
36 *
37 * Some devices can't work in both direction (mostly because it
38 * wouldn't make sense), so we have a bit fewer virtual channels than
39 * 2 channels per endpoints.
40 */
41
42#define NR_MAX_CHANNELS 16
43#define NR_MAX_REQUESTS 30
44#define NR_MAX_VCHANS 53
45
46/*
47 * Common registers 30 * Common registers
48 */ 31 */
49#define DMA_IRQ_EN(x) ((x) * 0x04) 32#define DMA_IRQ_EN(x) ((x) * 0x04)
@@ -60,6 +43,12 @@
60#define DMA_STAT 0x30 43#define DMA_STAT 0x30
61 44
62/* 45/*
46 * sun8i specific registers
47 */
48#define SUN8I_DMA_GATE 0x20
49#define SUN8I_DMA_GATE_ENABLE 0x4
50
51/*
63 * Channels specific registers 52 * Channels specific registers
64 */ 53 */
65#define DMA_CHAN_ENABLE 0x00 54#define DMA_CHAN_ENABLE 0x00
@@ -102,6 +91,19 @@
102#define DRQ_SDRAM 1 91#define DRQ_SDRAM 1
103 92
104/* 93/*
94 * Hardware channels / ports representation
95 *
96 * The hardware is used in several SoCs, with differing numbers
97 * of channels and endpoints. This structure ties those numbers
98 * to a certain compatible string.
99 */
100struct sun6i_dma_config {
101 u32 nr_max_channels;
102 u32 nr_max_requests;
103 u32 nr_max_vchans;
104};
105
106/*
105 * Hardware representation of the LLI 107 * Hardware representation of the LLI
106 * 108 *
107 * The hardware will be fed the physical address of this structure, 109 * The hardware will be fed the physical address of this structure,
@@ -159,6 +161,7 @@ struct sun6i_dma_dev {
159 struct dma_pool *pool; 161 struct dma_pool *pool;
160 struct sun6i_pchan *pchans; 162 struct sun6i_pchan *pchans;
161 struct sun6i_vchan *vchans; 163 struct sun6i_vchan *vchans;
164 const struct sun6i_dma_config *cfg;
162}; 165};
163 166
164static struct device *chan2dev(struct dma_chan *chan) 167static struct device *chan2dev(struct dma_chan *chan)
@@ -426,6 +429,7 @@ static int sun6i_dma_start_desc(struct sun6i_vchan *vchan)
426static void sun6i_dma_tasklet(unsigned long data) 429static void sun6i_dma_tasklet(unsigned long data)
427{ 430{
428 struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data; 431 struct sun6i_dma_dev *sdev = (struct sun6i_dma_dev *)data;
432 const struct sun6i_dma_config *cfg = sdev->cfg;
429 struct sun6i_vchan *vchan; 433 struct sun6i_vchan *vchan;
430 struct sun6i_pchan *pchan; 434 struct sun6i_pchan *pchan;
431 unsigned int pchan_alloc = 0; 435 unsigned int pchan_alloc = 0;
@@ -453,7 +457,7 @@ static void sun6i_dma_tasklet(unsigned long data)
453 } 457 }
454 458
455 spin_lock_irq(&sdev->lock); 459 spin_lock_irq(&sdev->lock);
456 for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { 460 for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
457 pchan = &sdev->pchans[pchan_idx]; 461 pchan = &sdev->pchans[pchan_idx];
458 462
459 if (pchan->vchan || list_empty(&sdev->pending)) 463 if (pchan->vchan || list_empty(&sdev->pending))
@@ -474,7 +478,7 @@ static void sun6i_dma_tasklet(unsigned long data)
474 } 478 }
475 spin_unlock_irq(&sdev->lock); 479 spin_unlock_irq(&sdev->lock);
476 480
477 for (pchan_idx = 0; pchan_idx < NR_MAX_CHANNELS; pchan_idx++) { 481 for (pchan_idx = 0; pchan_idx < cfg->nr_max_channels; pchan_idx++) {
478 if (!(pchan_alloc & BIT(pchan_idx))) 482 if (!(pchan_alloc & BIT(pchan_idx)))
479 continue; 483 continue;
480 484
@@ -496,7 +500,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
496 int i, j, ret = IRQ_NONE; 500 int i, j, ret = IRQ_NONE;
497 u32 status; 501 u32 status;
498 502
499 for (i = 0; i < 2; i++) { 503 for (i = 0; i < sdev->cfg->nr_max_channels / DMA_IRQ_CHAN_NR; i++) {
500 status = readl(sdev->base + DMA_IRQ_STAT(i)); 504 status = readl(sdev->base + DMA_IRQ_STAT(i));
501 if (!status) 505 if (!status)
502 continue; 506 continue;
@@ -506,7 +510,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
506 510
507 writel(status, sdev->base + DMA_IRQ_STAT(i)); 511 writel(status, sdev->base + DMA_IRQ_STAT(i));
508 512
509 for (j = 0; (j < 8) && status; j++) { 513 for (j = 0; (j < DMA_IRQ_CHAN_NR) && status; j++) {
510 if (status & DMA_IRQ_QUEUE) { 514 if (status & DMA_IRQ_QUEUE) {
511 pchan = sdev->pchans + j; 515 pchan = sdev->pchans + j;
512 vchan = pchan->vchan; 516 vchan = pchan->vchan;
@@ -519,7 +523,7 @@ static irqreturn_t sun6i_dma_interrupt(int irq, void *dev_id)
519 } 523 }
520 } 524 }
521 525
522 status = status >> 4; 526 status = status >> DMA_IRQ_CHAN_WIDTH;
523 } 527 }
524 528
525 if (!atomic_read(&sdev->tasklet_shutdown)) 529 if (!atomic_read(&sdev->tasklet_shutdown))
@@ -815,7 +819,7 @@ static struct dma_chan *sun6i_dma_of_xlate(struct of_phandle_args *dma_spec,
815 struct dma_chan *chan; 819 struct dma_chan *chan;
816 u8 port = dma_spec->args[0]; 820 u8 port = dma_spec->args[0];
817 821
818 if (port > NR_MAX_REQUESTS) 822 if (port > sdev->cfg->nr_max_requests)
819 return NULL; 823 return NULL;
820 824
821 chan = dma_get_any_slave_channel(&sdev->slave); 825 chan = dma_get_any_slave_channel(&sdev->slave);
@@ -848,7 +852,7 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
848{ 852{
849 int i; 853 int i;
850 854
851 for (i = 0; i < NR_MAX_VCHANS; i++) { 855 for (i = 0; i < sdev->cfg->nr_max_vchans; i++) {
852 struct sun6i_vchan *vchan = &sdev->vchans[i]; 856 struct sun6i_vchan *vchan = &sdev->vchans[i];
853 857
854 list_del(&vchan->vc.chan.device_node); 858 list_del(&vchan->vc.chan.device_node);
@@ -856,8 +860,48 @@ static inline void sun6i_dma_free(struct sun6i_dma_dev *sdev)
856 } 860 }
857} 861}
858 862
863/*
864 * For A31:
865 *
866 * There's 16 physical channels that can work in parallel.
867 *
868 * However we have 30 different endpoints for our requests.
869 *
870 * Since the channels are able to handle only an unidirectional
871 * transfer, we need to allocate more virtual channels so that
872 * everyone can grab one channel.
873 *
874 * Some devices can't work in both direction (mostly because it
875 * wouldn't make sense), so we have a bit fewer virtual channels than
876 * 2 channels per endpoints.
877 */
878
879static struct sun6i_dma_config sun6i_a31_dma_cfg = {
880 .nr_max_channels = 16,
881 .nr_max_requests = 30,
882 .nr_max_vchans = 53,
883};
884
885/*
886 * The A23 only has 8 physical channels, a maximum DRQ port id of 24,
887 * and a total of 37 usable source and destination endpoints.
888 */
889
890static struct sun6i_dma_config sun8i_a23_dma_cfg = {
891 .nr_max_channels = 8,
892 .nr_max_requests = 24,
893 .nr_max_vchans = 37,
894};
895
896static struct of_device_id sun6i_dma_match[] = {
897 { .compatible = "allwinner,sun6i-a31-dma", .data = &sun6i_a31_dma_cfg },
898 { .compatible = "allwinner,sun8i-a23-dma", .data = &sun8i_a23_dma_cfg },
899 { /* sentinel */ }
900};
901
859static int sun6i_dma_probe(struct platform_device *pdev) 902static int sun6i_dma_probe(struct platform_device *pdev)
860{ 903{
904 const struct of_device_id *device;
861 struct sun6i_dma_dev *sdc; 905 struct sun6i_dma_dev *sdc;
862 struct resource *res; 906 struct resource *res;
863 int ret, i; 907 int ret, i;
@@ -866,6 +910,11 @@ static int sun6i_dma_probe(struct platform_device *pdev)
866 if (!sdc) 910 if (!sdc)
867 return -ENOMEM; 911 return -ENOMEM;
868 912
913 device = of_match_device(sun6i_dma_match, &pdev->dev);
914 if (!device)
915 return -ENODEV;
916 sdc->cfg = device->data;
917
869 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 918 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
870 sdc->base = devm_ioremap_resource(&pdev->dev, res); 919 sdc->base = devm_ioremap_resource(&pdev->dev, res);
871 if (IS_ERR(sdc->base)) 920 if (IS_ERR(sdc->base))
@@ -912,31 +961,30 @@ static int sun6i_dma_probe(struct platform_device *pdev)
912 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg; 961 sdc->slave.device_prep_slave_sg = sun6i_dma_prep_slave_sg;
913 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 962 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
914 sdc->slave.device_control = sun6i_dma_control; 963 sdc->slave.device_control = sun6i_dma_control;
915 sdc->slave.chancnt = NR_MAX_VCHANS;
916 sdc->slave.copy_align = 4; 964 sdc->slave.copy_align = 4;
917 965
918 sdc->slave.dev = &pdev->dev; 966 sdc->slave.dev = &pdev->dev;
919 967
920 sdc->pchans = devm_kcalloc(&pdev->dev, NR_MAX_CHANNELS, 968 sdc->pchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_channels,
921 sizeof(struct sun6i_pchan), GFP_KERNEL); 969 sizeof(struct sun6i_pchan), GFP_KERNEL);
922 if (!sdc->pchans) 970 if (!sdc->pchans)
923 return -ENOMEM; 971 return -ENOMEM;
924 972
925 sdc->vchans = devm_kcalloc(&pdev->dev, NR_MAX_VCHANS, 973 sdc->vchans = devm_kcalloc(&pdev->dev, sdc->cfg->nr_max_vchans,
926 sizeof(struct sun6i_vchan), GFP_KERNEL); 974 sizeof(struct sun6i_vchan), GFP_KERNEL);
927 if (!sdc->vchans) 975 if (!sdc->vchans)
928 return -ENOMEM; 976 return -ENOMEM;
929 977
930 tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc); 978 tasklet_init(&sdc->task, sun6i_dma_tasklet, (unsigned long)sdc);
931 979
932 for (i = 0; i < NR_MAX_CHANNELS; i++) { 980 for (i = 0; i < sdc->cfg->nr_max_channels; i++) {
933 struct sun6i_pchan *pchan = &sdc->pchans[i]; 981 struct sun6i_pchan *pchan = &sdc->pchans[i];
934 982
935 pchan->idx = i; 983 pchan->idx = i;
936 pchan->base = sdc->base + 0x100 + i * 0x40; 984 pchan->base = sdc->base + 0x100 + i * 0x40;
937 } 985 }
938 986
939 for (i = 0; i < NR_MAX_VCHANS; i++) { 987 for (i = 0; i < sdc->cfg->nr_max_vchans; i++) {
940 struct sun6i_vchan *vchan = &sdc->vchans[i]; 988 struct sun6i_vchan *vchan = &sdc->vchans[i];
941 989
942 INIT_LIST_HEAD(&vchan->node); 990 INIT_LIST_HEAD(&vchan->node);
@@ -976,6 +1024,15 @@ static int sun6i_dma_probe(struct platform_device *pdev)
976 goto err_dma_unregister; 1024 goto err_dma_unregister;
977 } 1025 }
978 1026
1027 /*
1028 * sun8i variant requires us to toggle a dma gating register,
1029 * as seen in Allwinner's SDK. This register is not documented
1030 * in the A23 user manual.
1031 */
1032 if (of_device_is_compatible(pdev->dev.of_node,
1033 "allwinner,sun8i-a23-dma"))
1034 writel(SUN8I_DMA_GATE_ENABLE, sdc->base + SUN8I_DMA_GATE);
1035
979 return 0; 1036 return 0;
980 1037
981err_dma_unregister: 1038err_dma_unregister:
@@ -1008,11 +1065,6 @@ static int sun6i_dma_remove(struct platform_device *pdev)
1008 return 0; 1065 return 0;
1009} 1066}
1010 1067
1011static struct of_device_id sun6i_dma_match[] = {
1012 { .compatible = "allwinner,sun6i-a31-dma" },
1013 { /* sentinel */ }
1014};
1015
1016static struct platform_driver sun6i_dma_driver = { 1068static struct platform_driver sun6i_dma_driver = {
1017 .probe = sun6i_dma_probe, 1069 .probe = sun6i_dma_probe,
1018 .remove = sun6i_dma_remove, 1070 .remove = sun6i_dma_remove,
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 1c867d0303db..d8450c3f35f0 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -1597,7 +1597,6 @@ static const struct dev_pm_ops tegra_dma_dev_pm_ops = {
1597static struct platform_driver tegra_dmac_driver = { 1597static struct platform_driver tegra_dmac_driver = {
1598 .driver = { 1598 .driver = {
1599 .name = "tegra-apbdma", 1599 .name = "tegra-apbdma",
1600 .owner = THIS_MODULE,
1601 .pm = &tegra_dma_dev_pm_ops, 1600 .pm = &tegra_dma_dev_pm_ops,
1602 .of_match_table = tegra_dma_of_match, 1601 .of_match_table = tegra_dma_of_match,
1603 }, 1602 },
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c
index 4506a7b4f972..2407ccf1a64b 100644
--- a/drivers/dma/timb_dma.c
+++ b/drivers/dma/timb_dma.c
@@ -783,7 +783,6 @@ static int td_remove(struct platform_device *pdev)
783static struct platform_driver td_driver = { 783static struct platform_driver td_driver = {
784 .driver = { 784 .driver = {
785 .name = DRIVER_NAME, 785 .name = DRIVER_NAME,
786 .owner = THIS_MODULE,
787 }, 786 },
788 .probe = td_probe, 787 .probe = td_probe,
789 .remove = td_remove, 788 .remove = td_remove,
diff --git a/drivers/dma/txx9dmac.c b/drivers/dma/txx9dmac.c
index 17686caf64d5..0659ec9c4488 100644
--- a/drivers/dma/txx9dmac.c
+++ b/drivers/dma/txx9dmac.c
@@ -76,7 +76,7 @@ static void channel64_write_CHAR(const struct txx9dmac_chan *dc, dma_addr_t val)
76 76
77static void channel64_clear_CHAR(const struct txx9dmac_chan *dc) 77static void channel64_clear_CHAR(const struct txx9dmac_chan *dc)
78{ 78{
79#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) 79#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
80 channel64_writel(dc, CHAR, 0); 80 channel64_writel(dc, CHAR, 0);
81 channel64_writel(dc, __pad_CHAR, 0); 81 channel64_writel(dc, __pad_CHAR, 0);
82#else 82#else
diff --git a/drivers/dma/txx9dmac.h b/drivers/dma/txx9dmac.h
index f5a760598882..f6517b928bab 100644
--- a/drivers/dma/txx9dmac.h
+++ b/drivers/dma/txx9dmac.h
@@ -67,7 +67,7 @@ static inline bool txx9_dma_have_SMPCHN(void)
67 67
68/* Hardware register definitions. */ 68/* Hardware register definitions. */
69struct txx9dmac_cregs { 69struct txx9dmac_cregs {
70#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) 70#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
71 TXX9_DMA_REG32(CHAR); /* Chain Address Register */ 71 TXX9_DMA_REG32(CHAR); /* Chain Address Register */
72#else 72#else
73 u64 CHAR; /* Chain Address Register */ 73 u64 CHAR; /* Chain Address Register */
@@ -201,7 +201,7 @@ static inline bool is_dmac64(const struct txx9dmac_chan *dc)
201#ifdef TXX9_DMA_USE_SIMPLE_CHAIN 201#ifdef TXX9_DMA_USE_SIMPLE_CHAIN
202/* Hardware descriptor definition. (for simple-chain) */ 202/* Hardware descriptor definition. (for simple-chain) */
203struct txx9dmac_hwdesc { 203struct txx9dmac_hwdesc {
204#if defined(CONFIG_32BIT) && !defined(CONFIG_64BIT_PHYS_ADDR) 204#if defined(CONFIG_32BIT) && !defined(CONFIG_PHYS_ADDR_T_64BIT)
205 TXX9_DMA_REG32(CHAR); 205 TXX9_DMA_REG32(CHAR);
206#else 206#else
207 u64 CHAR; 207 u64 CHAR;
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index a6e64767186e..4a3a8f3137b3 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -942,6 +942,9 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
942 if (!xt->numf || !xt->sgl[0].size) 942 if (!xt->numf || !xt->sgl[0].size)
943 return NULL; 943 return NULL;
944 944
945 if (xt->frame_size != 1)
946 return NULL;
947
945 /* Allocate a transaction descriptor. */ 948 /* Allocate a transaction descriptor. */
946 desc = xilinx_vdma_alloc_tx_descriptor(chan); 949 desc = xilinx_vdma_alloc_tx_descriptor(chan);
947 if (!desc) 950 if (!desc)
@@ -960,7 +963,7 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
960 hw = &segment->hw; 963 hw = &segment->hw;
961 hw->vsize = xt->numf; 964 hw->vsize = xt->numf;
962 hw->hsize = xt->sgl[0].size; 965 hw->hsize = xt->sgl[0].size;
963 hw->stride = xt->sgl[0].icg << 966 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
964 XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; 967 XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT;
965 hw->stride |= chan->config.frm_dly << 968 hw->stride |= chan->config.frm_dly <<
966 XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 969 XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
@@ -971,9 +974,11 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
971 hw->buf_addr = xt->src_start; 974 hw->buf_addr = xt->src_start;
972 975
973 /* Link the previous next descriptor to current */ 976 /* Link the previous next descriptor to current */
974 prev = list_last_entry(&desc->segments, 977 if (!list_empty(&desc->segments)) {
975 struct xilinx_vdma_tx_segment, node); 978 prev = list_last_entry(&desc->segments,
976 prev->hw.next_desc = segment->phys; 979 struct xilinx_vdma_tx_segment, node);
980 prev->hw.next_desc = segment->phys;
981 }
977 982
978 /* Insert the segment into the descriptor segments list. */ 983 /* Insert the segment into the descriptor segments list. */
979 list_add_tail(&segment->node, &desc->segments); 984 list_add_tail(&segment->node, &desc->segments);
diff --git a/drivers/gpio/gpio-msm-v1.c b/drivers/gpio/gpio-msm-v1.c
index 73b73969d361..997e61ef173c 100644
--- a/drivers/gpio/gpio-msm-v1.c
+++ b/drivers/gpio/gpio-msm-v1.c
@@ -686,7 +686,7 @@ static int gpio_msm_v1_probe(struct platform_device *pdev)
686 irq_set_chained_handler(irq1, msm_gpio_irq_handler); 686 irq_set_chained_handler(irq1, msm_gpio_irq_handler);
687 irq_set_chained_handler(irq2, msm_gpio_irq_handler); 687 irq_set_chained_handler(irq2, msm_gpio_irq_handler);
688 irq_set_irq_wake(irq1, 1); 688 irq_set_irq_wake(irq1, 1);
689 irq_set_irq_wake(irq2, 2); 689 irq_set_irq_wake(irq2, 1);
690 return 0; 690 return 0;
691} 691}
692 692
diff --git a/drivers/gpio/gpio-spear-spics.c b/drivers/gpio/gpio-spear-spics.c
index 353263c85d26..506a2ea0eb4c 100644
--- a/drivers/gpio/gpio-spear-spics.c
+++ b/drivers/gpio/gpio-spear-spics.c
@@ -204,5 +204,5 @@ static int __init spics_gpio_init(void)
204subsys_initcall(spics_gpio_init); 204subsys_initcall(spics_gpio_init);
205 205
206MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>"); 206MODULE_AUTHOR("Shiraz Hashim <shiraz.linux.kernel@gmail.com>");
207MODULE_DESCRIPTION("ST Microlectronics SPEAr SPI Chip Select Abstraction"); 207MODULE_DESCRIPTION("STMicroelectronics SPEAr SPI Chip Select Abstraction");
208MODULE_LICENSE("GPL"); 208MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
index 4a85bb644e24..b928c17bdeed 100644
--- a/drivers/gpu/drm/radeon/mkregtable.c
+++ b/drivers/gpu/drm/radeon/mkregtable.c
@@ -347,7 +347,7 @@ static inline void list_splice_tail_init(struct list_head *list,
347 * list_entry - get the struct for this entry 347 * list_entry - get the struct for this entry
348 * @ptr: the &struct list_head pointer. 348 * @ptr: the &struct list_head pointer.
349 * @type: the type of the struct this is embedded in. 349 * @type: the type of the struct this is embedded in.
350 * @member: the name of the list_struct within the struct. 350 * @member: the name of the list_head within the struct.
351 */ 351 */
352#define list_entry(ptr, type, member) \ 352#define list_entry(ptr, type, member) \
353 container_of(ptr, type, member) 353 container_of(ptr, type, member)
@@ -356,7 +356,7 @@ static inline void list_splice_tail_init(struct list_head *list,
356 * list_first_entry - get the first element from a list 356 * list_first_entry - get the first element from a list
357 * @ptr: the list head to take the element from. 357 * @ptr: the list head to take the element from.
358 * @type: the type of the struct this is embedded in. 358 * @type: the type of the struct this is embedded in.
359 * @member: the name of the list_struct within the struct. 359 * @member: the name of the list_head within the struct.
360 * 360 *
361 * Note, that list is expected to be not empty. 361 * Note, that list is expected to be not empty.
362 */ 362 */
@@ -406,7 +406,7 @@ static inline void list_splice_tail_init(struct list_head *list,
406 * list_for_each_entry - iterate over list of given type 406 * list_for_each_entry - iterate over list of given type
407 * @pos: the type * to use as a loop cursor. 407 * @pos: the type * to use as a loop cursor.
408 * @head: the head for your list. 408 * @head: the head for your list.
409 * @member: the name of the list_struct within the struct. 409 * @member: the name of the list_head within the struct.
410 */ 410 */
411#define list_for_each_entry(pos, head, member) \ 411#define list_for_each_entry(pos, head, member) \
412 for (pos = list_entry((head)->next, typeof(*pos), member); \ 412 for (pos = list_entry((head)->next, typeof(*pos), member); \
@@ -417,7 +417,7 @@ static inline void list_splice_tail_init(struct list_head *list,
417 * list_for_each_entry_reverse - iterate backwards over list of given type. 417 * list_for_each_entry_reverse - iterate backwards over list of given type.
418 * @pos: the type * to use as a loop cursor. 418 * @pos: the type * to use as a loop cursor.
419 * @head: the head for your list. 419 * @head: the head for your list.
420 * @member: the name of the list_struct within the struct. 420 * @member: the name of the list_head within the struct.
421 */ 421 */
422#define list_for_each_entry_reverse(pos, head, member) \ 422#define list_for_each_entry_reverse(pos, head, member) \
423 for (pos = list_entry((head)->prev, typeof(*pos), member); \ 423 for (pos = list_entry((head)->prev, typeof(*pos), member); \
@@ -428,7 +428,7 @@ static inline void list_splice_tail_init(struct list_head *list,
428 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() 428 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
429 * @pos: the type * to use as a start point 429 * @pos: the type * to use as a start point
430 * @head: the head of the list 430 * @head: the head of the list
431 * @member: the name of the list_struct within the struct. 431 * @member: the name of the list_head within the struct.
432 * 432 *
433 * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). 433 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
434 */ 434 */
@@ -439,7 +439,7 @@ static inline void list_splice_tail_init(struct list_head *list,
439 * list_for_each_entry_continue - continue iteration over list of given type 439 * list_for_each_entry_continue - continue iteration over list of given type
440 * @pos: the type * to use as a loop cursor. 440 * @pos: the type * to use as a loop cursor.
441 * @head: the head for your list. 441 * @head: the head for your list.
442 * @member: the name of the list_struct within the struct. 442 * @member: the name of the list_head within the struct.
443 * 443 *
444 * Continue to iterate over list of given type, continuing after 444 * Continue to iterate over list of given type, continuing after
445 * the current position. 445 * the current position.
@@ -453,7 +453,7 @@ static inline void list_splice_tail_init(struct list_head *list,
453 * list_for_each_entry_continue_reverse - iterate backwards from the given point 453 * list_for_each_entry_continue_reverse - iterate backwards from the given point
454 * @pos: the type * to use as a loop cursor. 454 * @pos: the type * to use as a loop cursor.
455 * @head: the head for your list. 455 * @head: the head for your list.
456 * @member: the name of the list_struct within the struct. 456 * @member: the name of the list_head within the struct.
457 * 457 *
458 * Start to iterate over list of given type backwards, continuing after 458 * Start to iterate over list of given type backwards, continuing after
459 * the current position. 459 * the current position.
@@ -467,7 +467,7 @@ static inline void list_splice_tail_init(struct list_head *list,
467 * list_for_each_entry_from - iterate over list of given type from the current point 467 * list_for_each_entry_from - iterate over list of given type from the current point
468 * @pos: the type * to use as a loop cursor. 468 * @pos: the type * to use as a loop cursor.
469 * @head: the head for your list. 469 * @head: the head for your list.
470 * @member: the name of the list_struct within the struct. 470 * @member: the name of the list_head within the struct.
471 * 471 *
472 * Iterate over list of given type, continuing from current position. 472 * Iterate over list of given type, continuing from current position.
473 */ 473 */
@@ -480,7 +480,7 @@ static inline void list_splice_tail_init(struct list_head *list,
480 * @pos: the type * to use as a loop cursor. 480 * @pos: the type * to use as a loop cursor.
481 * @n: another type * to use as temporary storage 481 * @n: another type * to use as temporary storage
482 * @head: the head for your list. 482 * @head: the head for your list.
483 * @member: the name of the list_struct within the struct. 483 * @member: the name of the list_head within the struct.
484 */ 484 */
485#define list_for_each_entry_safe(pos, n, head, member) \ 485#define list_for_each_entry_safe(pos, n, head, member) \
486 for (pos = list_entry((head)->next, typeof(*pos), member), \ 486 for (pos = list_entry((head)->next, typeof(*pos), member), \
@@ -493,7 +493,7 @@ static inline void list_splice_tail_init(struct list_head *list,
493 * @pos: the type * to use as a loop cursor. 493 * @pos: the type * to use as a loop cursor.
494 * @n: another type * to use as temporary storage 494 * @n: another type * to use as temporary storage
495 * @head: the head for your list. 495 * @head: the head for your list.
496 * @member: the name of the list_struct within the struct. 496 * @member: the name of the list_head within the struct.
497 * 497 *
498 * Iterate over list of given type, continuing after current point, 498 * Iterate over list of given type, continuing after current point,
499 * safe against removal of list entry. 499 * safe against removal of list entry.
@@ -509,7 +509,7 @@ static inline void list_splice_tail_init(struct list_head *list,
509 * @pos: the type * to use as a loop cursor. 509 * @pos: the type * to use as a loop cursor.
510 * @n: another type * to use as temporary storage 510 * @n: another type * to use as temporary storage
511 * @head: the head for your list. 511 * @head: the head for your list.
512 * @member: the name of the list_struct within the struct. 512 * @member: the name of the list_head within the struct.
513 * 513 *
514 * Iterate over list of given type from current point, safe against 514 * Iterate over list of given type from current point, safe against
515 * removal of list entry. 515 * removal of list entry.
@@ -524,7 +524,7 @@ static inline void list_splice_tail_init(struct list_head *list,
524 * @pos: the type * to use as a loop cursor. 524 * @pos: the type * to use as a loop cursor.
525 * @n: another type * to use as temporary storage 525 * @n: another type * to use as temporary storage
526 * @head: the head for your list. 526 * @head: the head for your list.
527 * @member: the name of the list_struct within the struct. 527 * @member: the name of the list_head within the struct.
528 * 528 *
529 * Iterate backwards over list of given type, safe against removal 529 * Iterate backwards over list of given type, safe against removal
530 * of list entry. 530 * of list entry.
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index f42df4dd58d2..230b6f887cd8 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -371,6 +371,7 @@ config HID_LOGITECH_DJ
371 tristate "Logitech Unifying receivers full support" 371 tristate "Logitech Unifying receivers full support"
372 depends on HIDRAW 372 depends on HIDRAW
373 depends on HID_LOGITECH 373 depends on HID_LOGITECH
374 select HID_LOGITECH_HIDPP
374 ---help--- 375 ---help---
375 Say Y if you want support for Logitech Unifying receivers and devices. 376 Say Y if you want support for Logitech Unifying receivers and devices.
376 Unifying receivers are capable of pairing up to 6 Logitech compliant 377 Unifying receivers are capable of pairing up to 6 Logitech compliant
@@ -378,6 +379,17 @@ config HID_LOGITECH_DJ
378 generic USB_HID driver and all incoming events will be multiplexed 379 generic USB_HID driver and all incoming events will be multiplexed
379 into a single mouse and a single keyboard device. 380 into a single mouse and a single keyboard device.
380 381
382config HID_LOGITECH_HIDPP
383 tristate "Logitech HID++ devices support"
384 depends on HID_LOGITECH
385 ---help---
386 Support for Logitech devices relyingon the HID++ Logitech specification
387
388 Say Y if you want support for Logitech devices relying on the HID++
389 specification. Such devices are the various Logitech Touchpads (T650,
390 T651, TK820), some mice (Zone Touch mouse), or even keyboards (Solar
391 Keayboard).
392
381config LOGITECH_FF 393config LOGITECH_FF
382 bool "Logitech force feedback support" 394 bool "Logitech force feedback support"
383 depends on HID_LOGITECH 395 depends on HID_LOGITECH
@@ -613,6 +625,13 @@ config HID_PICOLCD_CIR
613 ---help--- 625 ---help---
614 Provide access to PicoLCD's CIR interface via remote control (LIRC). 626 Provide access to PicoLCD's CIR interface via remote control (LIRC).
615 627
628config HID_PLANTRONICS
629 tristate "Plantronics USB HID Driver"
630 default !EXPERT
631 depends on HID
632 ---help---
633 Provides HID support for Plantronics telephony devices.
634
616config HID_PRIMAX 635config HID_PRIMAX
617 tristate "Primax non-fully HID-compliant devices" 636 tristate "Primax non-fully HID-compliant devices"
618 depends on HID 637 depends on HID
@@ -629,7 +648,7 @@ config HID_ROCCAT
629 support for its special functionalities. 648 support for its special functionalities.
630 649
631config HID_SAITEK 650config HID_SAITEK
632 tristate "Saitek non-fully HID-compliant devices" 651 tristate "Saitek (Mad Catz) non-fully HID-compliant devices"
633 depends on HID 652 depends on HID
634 ---help--- 653 ---help---
635 Support for Saitek devices that are not fully compliant with the 654 Support for Saitek devices that are not fully compliant with the
@@ -637,6 +656,7 @@ config HID_SAITEK
637 656
638 Supported devices: 657 Supported devices:
639 - PS1000 Dual Analog Pad 658 - PS1000 Dual Analog Pad
659 - R.A.T.9 Gaming Mouse
640 - R.A.T.7 Gaming Mouse 660 - R.A.T.7 Gaming Mouse
641 - M.M.O.7 Gaming Mouse 661 - M.M.O.7 Gaming Mouse
642 662
diff --git a/drivers/hid/Makefile b/drivers/hid/Makefile
index e2850d8af9ca..debd15b44b59 100644
--- a/drivers/hid/Makefile
+++ b/drivers/hid/Makefile
@@ -63,6 +63,7 @@ obj-$(CONFIG_HID_LCPOWER) += hid-lcpower.o
63obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o 63obj-$(CONFIG_HID_LENOVO) += hid-lenovo.o
64obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o 64obj-$(CONFIG_HID_LOGITECH) += hid-logitech.o
65obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o 65obj-$(CONFIG_HID_LOGITECH_DJ) += hid-logitech-dj.o
66obj-$(CONFIG_HID_LOGITECH_HIDPP) += hid-logitech-hidpp.o
66obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o 67obj-$(CONFIG_HID_MAGICMOUSE) += hid-magicmouse.o
67obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o 68obj-$(CONFIG_HID_MICROSOFT) += hid-microsoft.o
68obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o 69obj-$(CONFIG_HID_MONTEREY) += hid-monterey.o
@@ -94,6 +95,7 @@ ifdef CONFIG_DEBUG_FS
94hid-picolcd-y += hid-picolcd_debugfs.o 95hid-picolcd-y += hid-picolcd_debugfs.o
95endif 96endif
96 97
98obj-$(CONFIG_HID_PLANTRONICS) += hid-plantronics.o
97obj-$(CONFIG_HID_PRIMAX) += hid-primax.o 99obj-$(CONFIG_HID_PRIMAX) += hid-primax.o
98obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \ 100obj-$(CONFIG_HID_ROCCAT) += hid-roccat.o hid-roccat-common.o \
99 hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \ 101 hid-roccat-arvo.o hid-roccat-isku.o hid-roccat-kone.o \
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 3402033fa52a..c3d0ac1a0988 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -702,6 +702,11 @@ static void hid_scan_collection(struct hid_parser *parser, unsigned type)
702 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) && 702 if (((parser->global.usage_page << 16) == HID_UP_SENSOR) &&
703 type == HID_COLLECTION_PHYSICAL) 703 type == HID_COLLECTION_PHYSICAL)
704 hid->group = HID_GROUP_SENSOR_HUB; 704 hid->group = HID_GROUP_SENSOR_HUB;
705
706 if (hid->vendor == USB_VENDOR_ID_MICROSOFT &&
707 hid->product == USB_DEVICE_ID_MS_TYPE_COVER_3 &&
708 hid->group == HID_GROUP_MULTITOUCH)
709 hid->group = HID_GROUP_GENERIC;
705} 710}
706 711
707static int hid_scan_main(struct hid_parser *parser, struct hid_item *item) 712static int hid_scan_main(struct hid_parser *parser, struct hid_item *item)
@@ -780,22 +785,19 @@ static int hid_scan_report(struct hid_device *hid)
780 hid->group = HID_GROUP_MULTITOUCH_WIN_8; 785 hid->group = HID_GROUP_MULTITOUCH_WIN_8;
781 786
782 /* 787 /*
783 * Vendor specific handlings
784 */
785 if ((hid->vendor == USB_VENDOR_ID_SYNAPTICS) &&
786 (hid->group == HID_GROUP_GENERIC) &&
787 /* only bind to the mouse interface of composite USB devices */
788 (hid->bus != BUS_USB || hid->type == HID_TYPE_USBMOUSE))
789 /* hid-rmi should take care of them, not hid-generic */
790 hid->group = HID_GROUP_RMI;
791
792 /*
793 * Vendor specific handlings 788 * Vendor specific handlings
794 */ 789 */
795 switch (hid->vendor) { 790 switch (hid->vendor) {
796 case USB_VENDOR_ID_WACOM: 791 case USB_VENDOR_ID_WACOM:
797 hid->group = HID_GROUP_WACOM; 792 hid->group = HID_GROUP_WACOM;
798 break; 793 break;
794 case USB_VENDOR_ID_SYNAPTICS:
795 if ((hid->group == HID_GROUP_GENERIC) &&
796 (hid->bus != BUS_USB || hid->type == HID_TYPE_USBMOUSE))
797 /* hid-rmi should only bind to the mouse interface of
798 * composite USB devices */
799 hid->group = HID_GROUP_RMI;
800 break;
799 } 801 }
800 802
801 vfree(parser); 803 vfree(parser);
@@ -1280,12 +1282,6 @@ void hid_output_report(struct hid_report *report, __u8 *data)
1280} 1282}
1281EXPORT_SYMBOL_GPL(hid_output_report); 1283EXPORT_SYMBOL_GPL(hid_output_report);
1282 1284
1283static int hid_report_len(struct hid_report *report)
1284{
1285 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
1286 return ((report->size - 1) >> 3) + 1 + (report->id > 0);
1287}
1288
1289/* 1285/*
1290 * Allocator for buffer that is going to be passed to hid_output_report() 1286 * Allocator for buffer that is going to be passed to hid_output_report()
1291 */ 1287 */
@@ -1822,6 +1818,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1822 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) }, 1818 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_S510_RECEIVER_2) },
1823 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) }, 1819 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_RECEIVER) },
1824 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) }, 1820 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3) },
1821 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_T651) },
1825 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) }, 1822 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_DESKTOP) },
1826 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) }, 1823 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_EDGE) },
1827 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) }, 1824 { HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_DINOVO_MINI) },
@@ -1862,6 +1859,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1862 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) }, 1859 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_DIGITAL_MEDIA_3K) },
1863 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) }, 1860 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_WIRELESS_OPTICAL_DESKTOP_3_0) },
1864 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) }, 1861 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_OFFICE_KB) },
1862 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3) },
1865 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) }, 1863 { HID_USB_DEVICE(USB_VENDOR_ID_MONTEREY, USB_DEVICE_ID_GENIUS_KB29E) },
1866 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) }, 1864 { HID_USB_DEVICE(USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL) },
1867 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) }, 1865 { HID_USB_DEVICE(USB_VENDOR_ID_NTRIG, USB_DEVICE_ID_NTRIG_TOUCH_SCREEN) },
@@ -1887,6 +1885,7 @@ static const struct hid_device_id hid_have_special_driver[] = {
1887 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) }, 1885 { HID_USB_DEVICE(USB_VENDOR_ID_ORTEK, USB_DEVICE_ID_ORTEK_WKB2000) },
1888 { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) }, 1886 { HID_USB_DEVICE(USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_6000) },
1889 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) }, 1887 { HID_USB_DEVICE(USB_VENDOR_ID_PETALYNX, USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE) },
1888 { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
1890 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) }, 1889 { HID_USB_DEVICE(USB_VENDOR_ID_PRIMAX, USB_DEVICE_ID_PRIMAX_KEYBOARD) },
1891#if IS_ENABLED(CONFIG_HID_ROCCAT) 1890#if IS_ENABLED(CONFIG_HID_ROCCAT)
1892 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) }, 1891 { HID_USB_DEVICE(USB_VENDOR_ID_ROCCAT, USB_DEVICE_ID_ROCCAT_ARVO) },
@@ -1910,10 +1909,12 @@ static const struct hid_device_id hid_have_special_driver[] = {
1910 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) }, 1909 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_PS1000) },
1911 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) }, 1910 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7) },
1912 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) }, 1911 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7) },
1912 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9) },
1913#endif 1913#endif
1914 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) }, 1914 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_IR_REMOTE) },
1915 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) }, 1915 { HID_USB_DEVICE(USB_VENDOR_ID_SAMSUNG, USB_DEVICE_ID_SAMSUNG_WIRELESS_KBD_MOUSE) },
1916 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) }, 1916 { HID_USB_DEVICE(USB_VENDOR_ID_SKYCABLE, USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER) },
1917 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE) },
1917 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) }, 1918 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_BUZZ_CONTROLLER) },
1918 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) }, 1919 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_WIRELESS_BUZZ_CONTROLLER) },
1919 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) }, 1920 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_BDREMOTE) },
@@ -2539,7 +2540,8 @@ int hid_add_device(struct hid_device *hdev)
2539 * Scan generic devices for group information 2540 * Scan generic devices for group information
2540 */ 2541 */
2541 if (hid_ignore_special_drivers || 2542 if (hid_ignore_special_drivers ||
2542 !hid_match_id(hdev, hid_have_special_driver)) { 2543 (!hdev->group &&
2544 !hid_match_id(hdev, hid_have_special_driver))) {
2543 ret = hid_scan_report(hdev); 2545 ret = hid_scan_report(hdev);
2544 if (ret) 2546 if (ret)
2545 hid_warn(hdev, "bad device descriptor (%d)\n", ret); 2547 hid_warn(hdev, "bad device descriptor (%d)\n", ret);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 7c863738e419..7460f3402298 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -300,6 +300,7 @@
300#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 300#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
301#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b 301#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
302#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103 302#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
303#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c 0x010c
303#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f 304#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
304 305
305#define USB_VENDOR_ID_ELECOM 0x056e 306#define USB_VENDOR_ID_ELECOM 0x056e
@@ -578,6 +579,7 @@
578 579
579#define USB_VENDOR_ID_LOGITECH 0x046d 580#define USB_VENDOR_ID_LOGITECH 0x046d
580#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e 581#define USB_DEVICE_ID_LOGITECH_AUDIOHUB 0x0a0e
582#define USB_DEVICE_ID_LOGITECH_T651 0xb00c
581#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101 583#define USB_DEVICE_ID_LOGITECH_RECEIVER 0xc101
582#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110 584#define USB_DEVICE_ID_LOGITECH_HARMONY_FIRST 0xc110
583#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f 585#define USB_DEVICE_ID_LOGITECH_HARMONY_LAST 0xc14f
@@ -620,6 +622,7 @@
620 622
621#define USB_VENDOR_ID_MADCATZ 0x0738 623#define USB_VENDOR_ID_MADCATZ 0x0738
622#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540 624#define USB_DEVICE_ID_MADCATZ_BEATPAD 0x4540
625#define USB_DEVICE_ID_MADCATZ_RAT9 0x1709
623 626
624#define USB_VENDOR_ID_MCC 0x09db 627#define USB_VENDOR_ID_MCC 0x09db
625#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076 628#define USB_DEVICE_ID_MCC_PMD1024LS 0x0076
@@ -649,6 +652,7 @@
649#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799 652#define USB_DEVICE_ID_MS_SURFACE_PRO_2 0x0799
650#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7 653#define USB_DEVICE_ID_MS_TOUCH_COVER_2 0x07a7
651#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9 654#define USB_DEVICE_ID_MS_TYPE_COVER_2 0x07a9
655#define USB_DEVICE_ID_MS_TYPE_COVER_3 0x07dc
652 656
653#define USB_VENDOR_ID_MOJO 0x8282 657#define USB_VENDOR_ID_MOJO 0x8282
654#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201 658#define USB_DEVICE_ID_RETRO_ADAPTER 0x3201
@@ -716,6 +720,8 @@
716#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700 720#define USB_DEVICE_ID_ORTEK_PKB1700 0x1700
717#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000 721#define USB_DEVICE_ID_ORTEK_WKB2000 0x2000
718 722
723#define USB_VENDOR_ID_PLANTRONICS 0x047f
724
719#define USB_VENDOR_ID_PANASONIC 0x04da 725#define USB_VENDOR_ID_PANASONIC 0x04da
720#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044 726#define USB_DEVICE_ID_PANABOARD_UBT780 0x1044
721#define USB_DEVICE_ID_PANABOARD_UBT880 0x104d 727#define USB_DEVICE_ID_PANABOARD_UBT880 0x104d
@@ -813,6 +819,9 @@
813#define USB_VENDOR_ID_SKYCABLE 0x1223 819#define USB_VENDOR_ID_SKYCABLE 0x1223
814#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07 820#define USB_DEVICE_ID_SKYCABLE_WIRELESS_PRESENTER 0x3F07
815 821
822#define USB_VENDOR_ID_SMK 0x0609
823#define USB_DEVICE_ID_SMK_PS3_BDREMOTE 0x0306
824
816#define USB_VENDOR_ID_SONY 0x054c 825#define USB_VENDOR_ID_SONY 0x054c
817#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b 826#define USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE 0x024b
818#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374 827#define USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE 0x0374
@@ -931,6 +940,9 @@
931#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004 940#define USB_DEVICE_ID_VERNIER_CYCLOPS 0x0004
932#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006 941#define USB_DEVICE_ID_VERNIER_LCSPEC 0x0006
933 942
943#define USB_VENDOR_ID_VTL 0x0306
944#define USB_DEVICE_ID_VTL_MULTITOUCH_FF3F 0xff3f
945
934#define USB_VENDOR_ID_WACOM 0x056a 946#define USB_VENDOR_ID_WACOM 0x056a
935#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81 947#define USB_DEVICE_ID_WACOM_GRAPHIRE_BLUETOOTH 0x81
936#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD 948#define USB_DEVICE_ID_WACOM_INTUOS4_BLUETOOTH 0x00BD
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 725f22ca47fc..e0a0f06ac5ef 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -872,7 +872,7 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
872 case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break; 872 case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
873 case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break; 873 case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
874 874
875 default: goto ignore; 875 default: map_key_clear(KEY_UNKNOWN);
876 } 876 }
877 break; 877 break;
878 878
@@ -1215,7 +1215,7 @@ static void hidinput_led_worker(struct work_struct *work)
1215 return hid->ll_driver->request(hid, report, HID_REQ_SET_REPORT); 1215 return hid->ll_driver->request(hid, report, HID_REQ_SET_REPORT);
1216 1216
1217 /* fall back to generic raw-output-report */ 1217 /* fall back to generic raw-output-report */
1218 len = ((report->size - 1) >> 3) + 1 + (report->id > 0); 1218 len = hid_report_len(report);
1219 buf = hid_alloc_report_buf(report, GFP_KERNEL); 1219 buf = hid_alloc_report_buf(report, GFP_KERNEL);
1220 if (!buf) 1220 if (!buf)
1221 return; 1221 return;
diff --git a/drivers/hid/hid-lenovo.c b/drivers/hid/hid-lenovo.c
index bf227f7679af..4c55f4d95798 100644
--- a/drivers/hid/hid-lenovo.c
+++ b/drivers/hid/hid-lenovo.c
@@ -62,7 +62,6 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
62 /* HID_UP_LNVENDOR = USB, HID_UP_MSVENDOR = BT */ 62 /* HID_UP_LNVENDOR = USB, HID_UP_MSVENDOR = BT */
63 if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR || 63 if ((usage->hid & HID_USAGE_PAGE) == HID_UP_MSVENDOR ||
64 (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) { 64 (usage->hid & HID_USAGE_PAGE) == HID_UP_LNVENDOR) {
65 set_bit(EV_REP, hi->input->evbit);
66 switch (usage->hid & HID_USAGE) { 65 switch (usage->hid & HID_USAGE) {
67 case 0x00f1: /* Fn-F4: Mic mute */ 66 case 0x00f1: /* Fn-F4: Mic mute */
68 map_key_clear(KEY_MICMUTE); 67 map_key_clear(KEY_MICMUTE);
@@ -85,13 +84,13 @@ static int lenovo_input_mapping_cptkbd(struct hid_device *hdev,
85 case 0x00f8: /* Fn-F11: View open applications (3 boxes) */ 84 case 0x00f8: /* Fn-F11: View open applications (3 boxes) */
86 map_key_clear(KEY_SCALE); 85 map_key_clear(KEY_SCALE);
87 return 1; 86 return 1;
88 case 0x00fa: /* Fn-Esc: Fn-lock toggle */ 87 case 0x00f9: /* Fn-F12: Open My computer (6 boxes) USB-only */
89 map_key_clear(KEY_FN_ESC);
90 return 1;
91 case 0x00fb: /* Fn-F12: Open My computer (6 boxes) USB-only */
92 /* NB: This mapping is invented in raw_event below */ 88 /* NB: This mapping is invented in raw_event below */
93 map_key_clear(KEY_FILE); 89 map_key_clear(KEY_FILE);
94 return 1; 90 return 1;
91 case 0x00fa: /* Fn-Esc: Fn-lock toggle */
92 map_key_clear(KEY_FN_ESC);
93 return 1;
95 } 94 }
96 } 95 }
97 96
@@ -207,8 +206,8 @@ static int lenovo_raw_event(struct hid_device *hdev,
207 && data[0] == 0x15 206 && data[0] == 0x15
208 && data[1] == 0x94 207 && data[1] == 0x94
209 && data[2] == 0x01)) { 208 && data[2] == 0x01)) {
210 data[1] = 0x0; 209 data[1] = 0x00;
211 data[2] = 0x4; 210 data[2] = 0x01;
212 } 211 }
213 212
214 return 0; 213 return 0;
diff --git a/drivers/hid/hid-logitech-dj.c b/drivers/hid/hid-logitech-dj.c
index 71f569292cab..c917ab61aafa 100644
--- a/drivers/hid/hid-logitech-dj.c
+++ b/drivers/hid/hid-logitech-dj.c
@@ -26,9 +26,104 @@
26#include <linux/hid.h> 26#include <linux/hid.h>
27#include <linux/module.h> 27#include <linux/module.h>
28#include <linux/usb.h> 28#include <linux/usb.h>
29#include <linux/kfifo.h>
29#include <asm/unaligned.h> 30#include <asm/unaligned.h>
30#include "hid-ids.h" 31#include "hid-ids.h"
31#include "hid-logitech-dj.h" 32
33#define DJ_MAX_PAIRED_DEVICES 6
34#define DJ_MAX_NUMBER_NOTIFICATIONS 8
35#define DJ_RECEIVER_INDEX 0
36#define DJ_DEVICE_INDEX_MIN 1
37#define DJ_DEVICE_INDEX_MAX 6
38
39#define DJREPORT_SHORT_LENGTH 15
40#define DJREPORT_LONG_LENGTH 32
41
42#define REPORT_ID_DJ_SHORT 0x20
43#define REPORT_ID_DJ_LONG 0x21
44
45#define REPORT_ID_HIDPP_SHORT 0x10
46#define REPORT_ID_HIDPP_LONG 0x11
47
48#define HIDPP_REPORT_SHORT_LENGTH 7
49#define HIDPP_REPORT_LONG_LENGTH 20
50
51#define HIDPP_RECEIVER_INDEX 0xff
52
53#define REPORT_TYPE_RFREPORT_FIRST 0x01
54#define REPORT_TYPE_RFREPORT_LAST 0x1F
55
56/* Command Switch to DJ mode */
57#define REPORT_TYPE_CMD_SWITCH 0x80
58#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00
59#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01
60#define TIMEOUT_NO_KEEPALIVE 0x00
61
62/* Command to Get the list of Paired devices */
63#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81
64
65/* Device Paired Notification */
66#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41
67#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01
68#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02
69#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00
70#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01
71#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02
72#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03
73
74/* Device Un-Paired Notification */
75#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
76
77
78/* Connection Status Notification */
79#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
80#define CONNECTION_STATUS_PARAM_STATUS 0x00
81#define STATUS_LINKLOSS 0x01
82
83/* Error Notification */
84#define REPORT_TYPE_NOTIF_ERROR 0x7F
85#define NOTIF_ERROR_PARAM_ETYPE 0x00
86#define ETYPE_KEEPALIVE_TIMEOUT 0x01
87
88/* supported DJ HID && RF report types */
89#define REPORT_TYPE_KEYBOARD 0x01
90#define REPORT_TYPE_MOUSE 0x02
91#define REPORT_TYPE_CONSUMER_CONTROL 0x03
92#define REPORT_TYPE_SYSTEM_CONTROL 0x04
93#define REPORT_TYPE_MEDIA_CENTER 0x08
94#define REPORT_TYPE_LEDS 0x0E
95
96/* RF Report types bitfield */
97#define STD_KEYBOARD 0x00000002
98#define STD_MOUSE 0x00000004
99#define MULTIMEDIA 0x00000008
100#define POWER_KEYS 0x00000010
101#define MEDIA_CENTER 0x00000100
102#define KBD_LEDS 0x00004000
103
104struct dj_report {
105 u8 report_id;
106 u8 device_index;
107 u8 report_type;
108 u8 report_params[DJREPORT_SHORT_LENGTH - 3];
109};
110
111struct dj_receiver_dev {
112 struct hid_device *hdev;
113 struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
114 DJ_DEVICE_INDEX_MIN];
115 struct work_struct work;
116 struct kfifo notif_fifo;
117 spinlock_t lock;
118 bool querying_devices;
119};
120
121struct dj_device {
122 struct hid_device *hdev;
123 struct dj_receiver_dev *dj_receiver_dev;
124 u32 reports_supported;
125 u8 device_index;
126};
32 127
33/* Keyboard descriptor (1) */ 128/* Keyboard descriptor (1) */
34static const char kbd_descriptor[] = { 129static const char kbd_descriptor[] = {
@@ -156,6 +251,57 @@ static const char media_descriptor[] = {
156 0xc0, /* EndCollection */ 251 0xc0, /* EndCollection */
157}; /* */ 252}; /* */
158 253
254/* HIDPP descriptor */
255static const char hidpp_descriptor[] = {
256 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
257 0x09, 0x01, /* Usage (Vendor Usage 1) */
258 0xa1, 0x01, /* Collection (Application) */
259 0x85, 0x10, /* Report ID (16) */
260 0x75, 0x08, /* Report Size (8) */
261 0x95, 0x06, /* Report Count (6) */
262 0x15, 0x00, /* Logical Minimum (0) */
263 0x26, 0xff, 0x00, /* Logical Maximum (255) */
264 0x09, 0x01, /* Usage (Vendor Usage 1) */
265 0x81, 0x00, /* Input (Data,Arr,Abs) */
266 0x09, 0x01, /* Usage (Vendor Usage 1) */
267 0x91, 0x00, /* Output (Data,Arr,Abs) */
268 0xc0, /* End Collection */
269 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
270 0x09, 0x02, /* Usage (Vendor Usage 2) */
271 0xa1, 0x01, /* Collection (Application) */
272 0x85, 0x11, /* Report ID (17) */
273 0x75, 0x08, /* Report Size (8) */
274 0x95, 0x13, /* Report Count (19) */
275 0x15, 0x00, /* Logical Minimum (0) */
276 0x26, 0xff, 0x00, /* Logical Maximum (255) */
277 0x09, 0x02, /* Usage (Vendor Usage 2) */
278 0x81, 0x00, /* Input (Data,Arr,Abs) */
279 0x09, 0x02, /* Usage (Vendor Usage 2) */
280 0x91, 0x00, /* Output (Data,Arr,Abs) */
281 0xc0, /* End Collection */
282 0x06, 0x00, 0xff, /* Usage Page (Vendor Defined Page 1) */
283 0x09, 0x04, /* Usage (Vendor Usage 0x04) */
284 0xa1, 0x01, /* Collection (Application) */
285 0x85, 0x20, /* Report ID (32) */
286 0x75, 0x08, /* Report Size (8) */
287 0x95, 0x0e, /* Report Count (14) */
288 0x15, 0x00, /* Logical Minimum (0) */
289 0x26, 0xff, 0x00, /* Logical Maximum (255) */
290 0x09, 0x41, /* Usage (Vendor Usage 0x41) */
291 0x81, 0x00, /* Input (Data,Arr,Abs) */
292 0x09, 0x41, /* Usage (Vendor Usage 0x41) */
293 0x91, 0x00, /* Output (Data,Arr,Abs) */
294 0x85, 0x21, /* Report ID (33) */
295 0x95, 0x1f, /* Report Count (31) */
296 0x15, 0x00, /* Logical Minimum (0) */
297 0x26, 0xff, 0x00, /* Logical Maximum (255) */
298 0x09, 0x42, /* Usage (Vendor Usage 0x42) */
299 0x81, 0x00, /* Input (Data,Arr,Abs) */
300 0x09, 0x42, /* Usage (Vendor Usage 0x42) */
301 0x91, 0x00, /* Output (Data,Arr,Abs) */
302 0xc0, /* End Collection */
303};
304
159/* Maximum size of all defined hid reports in bytes (including report id) */ 305/* Maximum size of all defined hid reports in bytes (including report id) */
160#define MAX_REPORT_SIZE 8 306#define MAX_REPORT_SIZE 8
161 307
@@ -165,7 +311,8 @@ static const char media_descriptor[] = {
165 sizeof(mse_descriptor) + \ 311 sizeof(mse_descriptor) + \
166 sizeof(consumer_descriptor) + \ 312 sizeof(consumer_descriptor) + \
167 sizeof(syscontrol_descriptor) + \ 313 sizeof(syscontrol_descriptor) + \
168 sizeof(media_descriptor)) 314 sizeof(media_descriptor) + \
315 sizeof(hidpp_descriptor))
169 316
170/* Number of possible hid report types that can be created by this driver. 317/* Number of possible hid report types that can be created by this driver.
171 * 318 *
@@ -256,11 +403,15 @@ static void logi_dj_recv_add_djhid_device(struct dj_receiver_dev *djrcv_dev,
256 dj_hiddev->dev.parent = &djrcv_hdev->dev; 403 dj_hiddev->dev.parent = &djrcv_hdev->dev;
257 dj_hiddev->bus = BUS_USB; 404 dj_hiddev->bus = BUS_USB;
258 dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor); 405 dj_hiddev->vendor = le16_to_cpu(usbdev->descriptor.idVendor);
259 dj_hiddev->product = le16_to_cpu(usbdev->descriptor.idProduct); 406 dj_hiddev->product =
407 (dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB]
408 << 8) |
409 dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB];
260 snprintf(dj_hiddev->name, sizeof(dj_hiddev->name), 410 snprintf(dj_hiddev->name, sizeof(dj_hiddev->name),
261 "Logitech Unifying Device. Wireless PID:%02x%02x", 411 "Logitech Unifying Device. Wireless PID:%04x",
262 dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_MSB], 412 dj_hiddev->product);
263 dj_report->report_params[DEVICE_PAIRED_PARAM_EQUAD_ID_LSB]); 413
414 dj_hiddev->group = HID_GROUP_LOGITECH_DJ_DEVICE;
264 415
265 usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys)); 416 usb_make_path(usbdev, dj_hiddev->phys, sizeof(dj_hiddev->phys));
266 snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index); 417 snprintf(tmpstr, sizeof(tmpstr), ":%d", dj_report->device_index);
@@ -422,6 +573,13 @@ static void logi_dj_recv_forward_report(struct dj_receiver_dev *djrcv_dev,
422 } 573 }
423} 574}
424 575
576static void logi_dj_recv_forward_hidpp(struct dj_device *dj_dev, u8 *data,
577 int size)
578{
579 /* We are called from atomic context (tasklet && djrcv->lock held) */
580 if (hid_input_report(dj_dev->hdev, HID_INPUT_REPORT, data, size, 1))
581 dbg_hid("hid_input_report error\n");
582}
425 583
426static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev, 584static int logi_dj_recv_send_report(struct dj_receiver_dev *djrcv_dev,
427 struct dj_report *dj_report) 585 struct dj_report *dj_report)
@@ -472,7 +630,9 @@ static int logi_dj_recv_query_paired_devices(struct dj_receiver_dev *djrcv_dev)
472static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev, 630static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
473 unsigned timeout) 631 unsigned timeout)
474{ 632{
633 struct hid_device *hdev = djrcv_dev->hdev;
475 struct dj_report *dj_report; 634 struct dj_report *dj_report;
635 u8 *buf;
476 int retval; 636 int retval;
477 637
478 dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL); 638 dj_report = kzalloc(sizeof(struct dj_report), GFP_KERNEL);
@@ -484,7 +644,6 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
484 dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F; 644 dj_report->report_params[CMD_SWITCH_PARAM_DEVBITFIELD] = 0x3F;
485 dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout; 645 dj_report->report_params[CMD_SWITCH_PARAM_TIMEOUT_SECONDS] = (u8)timeout;
486 retval = logi_dj_recv_send_report(djrcv_dev, dj_report); 646 retval = logi_dj_recv_send_report(djrcv_dev, dj_report);
487 kfree(dj_report);
488 647
489 /* 648 /*
490 * Ugly sleep to work around a USB 3.0 bug when the receiver is still 649 * Ugly sleep to work around a USB 3.0 bug when the receiver is still
@@ -493,6 +652,30 @@ static int logi_dj_recv_switch_to_dj_mode(struct dj_receiver_dev *djrcv_dev,
493 */ 652 */
494 msleep(50); 653 msleep(50);
495 654
655 /*
656 * Magical bits to set up hidpp notifications when the dj devices
657 * are connected/disconnected.
658 *
659 * We can reuse dj_report because HIDPP_REPORT_SHORT_LENGTH is smaller
660 * than DJREPORT_SHORT_LENGTH.
661 */
662 buf = (u8 *)dj_report;
663
664 memset(buf, 0, HIDPP_REPORT_SHORT_LENGTH);
665
666 buf[0] = REPORT_ID_HIDPP_SHORT;
667 buf[1] = 0xFF;
668 buf[2] = 0x80;
669 buf[3] = 0x00;
670 buf[4] = 0x00;
671 buf[5] = 0x09;
672 buf[6] = 0x00;
673
674 hid_hw_raw_request(hdev, REPORT_ID_HIDPP_SHORT, buf,
675 HIDPP_REPORT_SHORT_LENGTH, HID_OUTPUT_REPORT,
676 HID_REQ_SET_REPORT);
677
678 kfree(dj_report);
496 return retval; 679 return retval;
497} 680}
498 681
@@ -509,6 +692,9 @@ static void logi_dj_ll_close(struct hid_device *hid)
509 dbg_hid("%s:%s\n", __func__, hid->phys); 692 dbg_hid("%s:%s\n", __func__, hid->phys);
510} 693}
511 694
695static u8 unifying_name_query[] = {0x10, 0xff, 0x83, 0xb5, 0x40, 0x00, 0x00};
696static u8 unifying_name_answer[] = {0x11, 0xff, 0x83, 0xb5};
697
512static int logi_dj_ll_raw_request(struct hid_device *hid, 698static int logi_dj_ll_raw_request(struct hid_device *hid,
513 unsigned char reportnum, __u8 *buf, 699 unsigned char reportnum, __u8 *buf,
514 size_t count, unsigned char report_type, 700 size_t count, unsigned char report_type,
@@ -519,6 +705,22 @@ static int logi_dj_ll_raw_request(struct hid_device *hid,
519 u8 *out_buf; 705 u8 *out_buf;
520 int ret; 706 int ret;
521 707
708 if ((buf[0] == REPORT_ID_HIDPP_SHORT) ||
709 (buf[0] == REPORT_ID_HIDPP_LONG)) {
710 if (count < 2)
711 return -EINVAL;
712
713 /* special case where we should not overwrite
714 * the device_index */
715 if (count == 7 && !memcmp(buf, unifying_name_query,
716 sizeof(unifying_name_query)))
717 buf[4] |= djdev->device_index - 1;
718 else
719 buf[1] = djdev->device_index;
720 return hid_hw_raw_request(djrcv_dev->hdev, reportnum, buf,
721 count, report_type, reqtype);
722 }
723
522 if (buf[0] != REPORT_TYPE_LEDS) 724 if (buf[0] != REPORT_TYPE_LEDS)
523 return -EINVAL; 725 return -EINVAL;
524 726
@@ -597,6 +799,8 @@ static int logi_dj_ll_parse(struct hid_device *hid)
597 __func__, djdev->reports_supported); 799 __func__, djdev->reports_supported);
598 } 800 }
599 801
802 rdcat(rdesc, &rsize, hidpp_descriptor, sizeof(hidpp_descriptor));
803
600 retval = hid_parse_report(hid, rdesc, rsize); 804 retval = hid_parse_report(hid, rdesc, rsize);
601 kfree(rdesc); 805 kfree(rdesc);
602 806
@@ -624,8 +828,7 @@ static struct hid_ll_driver logi_dj_ll_driver = {
624 .raw_request = logi_dj_ll_raw_request, 828 .raw_request = logi_dj_ll_raw_request,
625}; 829};
626 830
627 831static int logi_dj_dj_event(struct hid_device *hdev,
628static int logi_dj_raw_event(struct hid_device *hdev,
629 struct hid_report *report, u8 *data, 832 struct hid_report *report, u8 *data,
630 int size) 833 int size)
631{ 834{
@@ -633,36 +836,24 @@ static int logi_dj_raw_event(struct hid_device *hdev,
633 struct dj_report *dj_report = (struct dj_report *) data; 836 struct dj_report *dj_report = (struct dj_report *) data;
634 unsigned long flags; 837 unsigned long flags;
635 838
636 dbg_hid("%s, size:%d\n", __func__, size); 839 /*
637 840 * Here we receive all data coming from iface 2, there are 3 cases:
638 /* Here we receive all data coming from iface 2, there are 4 cases:
639 *
640 * 1) Data should continue its normal processing i.e. data does not
641 * come from the DJ collection, in which case we do nothing and
642 * return 0, so hid-core can continue normal processing (will forward
643 * to associated hidraw device)
644 * 841 *
645 * 2) Data is from DJ collection, and is intended for this driver i. e. 842 * 1) Data is intended for this driver i. e. data contains arrival,
646 * data contains arrival, departure, etc notifications, in which case 843 * departure, etc notifications, in which case we queue them for delayed
647 * we queue them for delayed processing by the work queue. We return 1 844 * processing by the work queue. We return 1 to hid-core as no further
648 * to hid-core as no further processing is required from it. 845 * processing is required from it.
649 * 846 *
650 * 3) Data is from DJ collection, and informs a connection change, 847 * 2) Data informs a connection change, if the change means rf link
651 * if the change means rf link loss, then we must send a null report 848 * loss, then we must send a null report to the upper layer to discard
652 * to the upper layer to discard potentially pressed keys that may be 849 * potentially pressed keys that may be repeated forever by the input
653 * repeated forever by the input layer. Return 1 to hid-core as no 850 * layer. Return 1 to hid-core as no further processing is required.
654 * further processing is required.
655 * 851 *
656 * 4) Data is from DJ collection and is an actual input event from 852 * 3) Data is an actual input event from a paired DJ device in which
657 * a paired DJ device in which case we forward it to the correct hid 853 * case we forward it to the correct hid device (via hid_input_report()
658 * device (via hid_input_report() ) and return 1 so hid-core does not do 854 * ) and return 1 so hid-core does not anything else with it.
659 * anything else with it.
660 */ 855 */
661 856
662 /* case 1) */
663 if (data[0] != REPORT_ID_DJ_SHORT)
664 return false;
665
666 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) || 857 if ((dj_report->device_index < DJ_DEVICE_INDEX_MIN) ||
667 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) { 858 (dj_report->device_index > DJ_DEVICE_INDEX_MAX)) {
668 /* 859 /*
@@ -707,6 +898,80 @@ out:
707 return true; 898 return true;
708} 899}
709 900
901static int logi_dj_hidpp_event(struct hid_device *hdev,
902 struct hid_report *report, u8 *data,
903 int size)
904{
905 struct dj_receiver_dev *djrcv_dev = hid_get_drvdata(hdev);
906 struct dj_report *dj_report = (struct dj_report *) data;
907 unsigned long flags;
908 u8 device_index = dj_report->device_index;
909
910 if (device_index == HIDPP_RECEIVER_INDEX) {
911 /* special case were the device wants to know its unifying
912 * name */
913 if (size == HIDPP_REPORT_LONG_LENGTH &&
914 !memcmp(data, unifying_name_answer,
915 sizeof(unifying_name_answer)) &&
916 ((data[4] & 0xF0) == 0x40))
917 device_index = (data[4] & 0x0F) + 1;
918 else
919 return false;
920 }
921
922 /*
923 * Data is from the HID++ collection, in this case, we forward the
924 * data to the corresponding child dj device and return 0 to hid-core
925 * so he data also goes to the hidraw device of the receiver. This
926 * allows a user space application to implement the full HID++ routing
927 * via the receiver.
928 */
929
930 if ((device_index < DJ_DEVICE_INDEX_MIN) ||
931 (device_index > DJ_DEVICE_INDEX_MAX)) {
932 /*
933 * Device index is wrong, bail out.
934 * This driver can ignore safely the receiver notifications,
935 * so ignore those reports too.
936 */
937 dev_err(&hdev->dev, "%s: invalid device index:%d\n",
938 __func__, dj_report->device_index);
939 return false;
940 }
941
942 spin_lock_irqsave(&djrcv_dev->lock, flags);
943
944 if (!djrcv_dev->paired_dj_devices[device_index])
945 /* received an event for an unknown device, bail out */
946 goto out;
947
948 logi_dj_recv_forward_hidpp(djrcv_dev->paired_dj_devices[device_index],
949 data, size);
950
951out:
952 spin_unlock_irqrestore(&djrcv_dev->lock, flags);
953
954 return false;
955}
956
957static int logi_dj_raw_event(struct hid_device *hdev,
958 struct hid_report *report, u8 *data,
959 int size)
960{
961 dbg_hid("%s, size:%d\n", __func__, size);
962
963 switch (data[0]) {
964 case REPORT_ID_DJ_SHORT:
965 return logi_dj_dj_event(hdev, report, data, size);
966 case REPORT_ID_HIDPP_SHORT:
967 /* intentional fallthrough */
968 case REPORT_ID_HIDPP_LONG:
969 return logi_dj_hidpp_event(hdev, report, data, size);
970 }
971
972 return false;
973}
974
710static int logi_dj_probe(struct hid_device *hdev, 975static int logi_dj_probe(struct hid_device *hdev,
711 const struct hid_device_id *id) 976 const struct hid_device_id *id)
712{ 977{
@@ -714,9 +979,6 @@ static int logi_dj_probe(struct hid_device *hdev,
714 struct dj_receiver_dev *djrcv_dev; 979 struct dj_receiver_dev *djrcv_dev;
715 int retval; 980 int retval;
716 981
717 if (is_dj_device((struct dj_device *)hdev->driver_data))
718 return -ENODEV;
719
720 dbg_hid("%s called for ifnum %d\n", __func__, 982 dbg_hid("%s called for ifnum %d\n", __func__,
721 intf->cur_altsetting->desc.bInterfaceNumber); 983 intf->cur_altsetting->desc.bInterfaceNumber);
722 984
@@ -869,22 +1131,6 @@ static void logi_dj_remove(struct hid_device *hdev)
869 hid_set_drvdata(hdev, NULL); 1131 hid_set_drvdata(hdev, NULL);
870} 1132}
871 1133
872static int logi_djdevice_probe(struct hid_device *hdev,
873 const struct hid_device_id *id)
874{
875 int ret;
876 struct dj_device *dj_dev = hdev->driver_data;
877
878 if (!is_dj_device(dj_dev))
879 return -ENODEV;
880
881 ret = hid_parse(hdev);
882 if (!ret)
883 ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
884
885 return ret;
886}
887
888static const struct hid_device_id logi_dj_receivers[] = { 1134static const struct hid_device_id logi_dj_receivers[] = {
889 {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH, 1135 {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
890 USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)}, 1136 USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
@@ -906,51 +1152,8 @@ static struct hid_driver logi_djreceiver_driver = {
906#endif 1152#endif
907}; 1153};
908 1154
1155module_hid_driver(logi_djreceiver_driver);
909 1156
910static const struct hid_device_id logi_dj_devices[] = {
911 {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
912 USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER)},
913 {HID_USB_DEVICE(USB_VENDOR_ID_LOGITECH,
914 USB_DEVICE_ID_LOGITECH_UNIFYING_RECEIVER_2)},
915 {}
916};
917
918static struct hid_driver logi_djdevice_driver = {
919 .name = "logitech-djdevice",
920 .id_table = logi_dj_devices,
921 .probe = logi_djdevice_probe,
922};
923
924
925static int __init logi_dj_init(void)
926{
927 int retval;
928
929 dbg_hid("Logitech-DJ:%s\n", __func__);
930
931 retval = hid_register_driver(&logi_djreceiver_driver);
932 if (retval)
933 return retval;
934
935 retval = hid_register_driver(&logi_djdevice_driver);
936 if (retval)
937 hid_unregister_driver(&logi_djreceiver_driver);
938
939 return retval;
940
941}
942
943static void __exit logi_dj_exit(void)
944{
945 dbg_hid("Logitech-DJ:%s\n", __func__);
946
947 hid_unregister_driver(&logi_djdevice_driver);
948 hid_unregister_driver(&logi_djreceiver_driver);
949
950}
951
952module_init(logi_dj_init);
953module_exit(logi_dj_exit);
954MODULE_LICENSE("GPL"); 1157MODULE_LICENSE("GPL");
955MODULE_AUTHOR("Logitech"); 1158MODULE_AUTHOR("Logitech");
956MODULE_AUTHOR("Nestor Lopez Casado"); 1159MODULE_AUTHOR("Nestor Lopez Casado");
diff --git a/drivers/hid/hid-logitech-dj.h b/drivers/hid/hid-logitech-dj.h
deleted file mode 100644
index daeb0aa4bee9..000000000000
--- a/drivers/hid/hid-logitech-dj.h
+++ /dev/null
@@ -1,125 +0,0 @@
1#ifndef __HID_LOGITECH_DJ_H
2#define __HID_LOGITECH_DJ_H
3
4/*
5 * HID driver for Logitech Unifying receivers
6 *
7 * Copyright (c) 2011 Logitech
8 */
9
10/*
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 *
24 */
25
26#include <linux/kfifo.h>
27
28#define DJ_MAX_PAIRED_DEVICES 6
29#define DJ_MAX_NUMBER_NOTIFICATIONS 8
30#define DJ_RECEIVER_INDEX 0
31#define DJ_DEVICE_INDEX_MIN 1
32#define DJ_DEVICE_INDEX_MAX 6
33
34#define DJREPORT_SHORT_LENGTH 15
35#define DJREPORT_LONG_LENGTH 32
36
37#define REPORT_ID_DJ_SHORT 0x20
38#define REPORT_ID_DJ_LONG 0x21
39
40#define REPORT_TYPE_RFREPORT_FIRST 0x01
41#define REPORT_TYPE_RFREPORT_LAST 0x1F
42
43/* Command Switch to DJ mode */
44#define REPORT_TYPE_CMD_SWITCH 0x80
45#define CMD_SWITCH_PARAM_DEVBITFIELD 0x00
46#define CMD_SWITCH_PARAM_TIMEOUT_SECONDS 0x01
47#define TIMEOUT_NO_KEEPALIVE 0x00
48
49/* Command to Get the list of Paired devices */
50#define REPORT_TYPE_CMD_GET_PAIRED_DEVICES 0x81
51
52/* Device Paired Notification */
53#define REPORT_TYPE_NOTIF_DEVICE_PAIRED 0x41
54#define SPFUNCTION_MORE_NOTIF_EXPECTED 0x01
55#define SPFUNCTION_DEVICE_LIST_EMPTY 0x02
56#define DEVICE_PAIRED_PARAM_SPFUNCTION 0x00
57#define DEVICE_PAIRED_PARAM_EQUAD_ID_LSB 0x01
58#define DEVICE_PAIRED_PARAM_EQUAD_ID_MSB 0x02
59#define DEVICE_PAIRED_RF_REPORT_TYPE 0x03
60
61/* Device Un-Paired Notification */
62#define REPORT_TYPE_NOTIF_DEVICE_UNPAIRED 0x40
63
64
65/* Connection Status Notification */
66#define REPORT_TYPE_NOTIF_CONNECTION_STATUS 0x42
67#define CONNECTION_STATUS_PARAM_STATUS 0x00
68#define STATUS_LINKLOSS 0x01
69
70/* Error Notification */
71#define REPORT_TYPE_NOTIF_ERROR 0x7F
72#define NOTIF_ERROR_PARAM_ETYPE 0x00
73#define ETYPE_KEEPALIVE_TIMEOUT 0x01
74
75/* supported DJ HID && RF report types */
76#define REPORT_TYPE_KEYBOARD 0x01
77#define REPORT_TYPE_MOUSE 0x02
78#define REPORT_TYPE_CONSUMER_CONTROL 0x03
79#define REPORT_TYPE_SYSTEM_CONTROL 0x04
80#define REPORT_TYPE_MEDIA_CENTER 0x08
81#define REPORT_TYPE_LEDS 0x0E
82
83/* RF Report types bitfield */
84#define STD_KEYBOARD 0x00000002
85#define STD_MOUSE 0x00000004
86#define MULTIMEDIA 0x00000008
87#define POWER_KEYS 0x00000010
88#define MEDIA_CENTER 0x00000100
89#define KBD_LEDS 0x00004000
90
91struct dj_report {
92 u8 report_id;
93 u8 device_index;
94 u8 report_type;
95 u8 report_params[DJREPORT_SHORT_LENGTH - 3];
96};
97
98struct dj_receiver_dev {
99 struct hid_device *hdev;
100 struct dj_device *paired_dj_devices[DJ_MAX_PAIRED_DEVICES +
101 DJ_DEVICE_INDEX_MIN];
102 struct work_struct work;
103 struct kfifo notif_fifo;
104 spinlock_t lock;
105 bool querying_devices;
106};
107
108struct dj_device {
109 struct hid_device *hdev;
110 struct dj_receiver_dev *dj_receiver_dev;
111 u32 reports_supported;
112 u8 device_index;
113};
114
115/**
116 * is_dj_device - know if the given dj_device is not the receiver.
117 * @dj_dev: the dj device to test
118 *
119 * This macro tests if a struct dj_device pointer is a device created
120 * by the bus enumarator.
121 */
122#define is_dj_device(dj_dev) \
123 (&(dj_dev)->dj_receiver_dev->hdev->dev == (dj_dev)->hdev->dev.parent)
124
125#endif
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
new file mode 100644
index 000000000000..2f420c0b6609
--- /dev/null
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -0,0 +1,1241 @@
1/*
2 * HIDPP protocol for Logitech Unifying receivers
3 *
4 * Copyright (c) 2011 Logitech (c)
5 * Copyright (c) 2012-2013 Google (c)
6 * Copyright (c) 2013-2014 Red Hat Inc.
7 */
8
9/*
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the Free
12 * Software Foundation; version 2 of the License.
13 */
14
15#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16
17#include <linux/device.h>
18#include <linux/hid.h>
19#include <linux/module.h>
20#include <linux/slab.h>
21#include <linux/sched.h>
22#include <linux/kfifo.h>
23#include <linux/input/mt.h>
24#include <asm/unaligned.h>
25#include "hid-ids.h"
26
27MODULE_LICENSE("GPL");
28MODULE_AUTHOR("Benjamin Tissoires <benjamin.tissoires@gmail.com>");
29MODULE_AUTHOR("Nestor Lopez Casado <nlopezcasad@logitech.com>");
30
31#define REPORT_ID_HIDPP_SHORT 0x10
32#define REPORT_ID_HIDPP_LONG 0x11
33
34#define HIDPP_REPORT_SHORT_LENGTH 7
35#define HIDPP_REPORT_LONG_LENGTH 20
36
37#define HIDPP_QUIRK_CLASS_WTP BIT(0)
38
39/* bits 1..20 are reserved for classes */
40#define HIDPP_QUIRK_DELAYED_INIT BIT(21)
41#define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS BIT(22)
42#define HIDPP_QUIRK_MULTI_INPUT BIT(23)
43
44/*
45 * There are two hidpp protocols in use, the first version hidpp10 is known
46 * as register access protocol or RAP, the second version hidpp20 is known as
47 * feature access protocol or FAP
48 *
49 * Most older devices (including the Unifying usb receiver) use the RAP protocol
50 * where as most newer devices use the FAP protocol. Both protocols are
51 * compatible with the underlying transport, which could be usb, Unifiying, or
52 * bluetooth. The message lengths are defined by the hid vendor specific report
53 * descriptor for the HIDPP_SHORT report type (total message lenth 7 bytes) and
54 * the HIDPP_LONG report type (total message length 20 bytes)
55 *
56 * The RAP protocol uses both report types, whereas the FAP only uses HIDPP_LONG
57 * messages. The Unifying receiver itself responds to RAP messages (device index
58 * is 0xFF for the receiver), and all messages (short or long) with a device
59 * index between 1 and 6 are passed untouched to the corresponding paired
60 * Unifying device.
61 *
62 * The paired device can be RAP or FAP, it will receive the message untouched
63 * from the Unifiying receiver.
64 */
65
66struct fap {
67 u8 feature_index;
68 u8 funcindex_clientid;
69 u8 params[HIDPP_REPORT_LONG_LENGTH - 4U];
70};
71
72struct rap {
73 u8 sub_id;
74 u8 reg_address;
75 u8 params[HIDPP_REPORT_LONG_LENGTH - 4U];
76};
77
78struct hidpp_report {
79 u8 report_id;
80 u8 device_index;
81 union {
82 struct fap fap;
83 struct rap rap;
84 u8 rawbytes[sizeof(struct fap)];
85 };
86} __packed;
87
88struct hidpp_device {
89 struct hid_device *hid_dev;
90 struct mutex send_mutex;
91 void *send_receive_buf;
92 wait_queue_head_t wait;
93 bool answer_available;
94 u8 protocol_major;
95 u8 protocol_minor;
96
97 void *private_data;
98
99 struct work_struct work;
100 struct kfifo delayed_work_fifo;
101 atomic_t connected;
102 struct input_dev *delayed_input;
103
104 unsigned long quirks;
105};
106
107
108#define HIDPP_ERROR 0x8f
109#define HIDPP_ERROR_SUCCESS 0x00
110#define HIDPP_ERROR_INVALID_SUBID 0x01
111#define HIDPP_ERROR_INVALID_ADRESS 0x02
112#define HIDPP_ERROR_INVALID_VALUE 0x03
113#define HIDPP_ERROR_CONNECT_FAIL 0x04
114#define HIDPP_ERROR_TOO_MANY_DEVICES 0x05
115#define HIDPP_ERROR_ALREADY_EXISTS 0x06
116#define HIDPP_ERROR_BUSY 0x07
117#define HIDPP_ERROR_UNKNOWN_DEVICE 0x08
118#define HIDPP_ERROR_RESOURCE_ERROR 0x09
119#define HIDPP_ERROR_REQUEST_UNAVAILABLE 0x0a
120#define HIDPP_ERROR_INVALID_PARAM_VALUE 0x0b
121#define HIDPP_ERROR_WRONG_PIN_CODE 0x0c
122
123static void hidpp_connect_event(struct hidpp_device *hidpp_dev);
124
125static int __hidpp_send_report(struct hid_device *hdev,
126 struct hidpp_report *hidpp_report)
127{
128 int fields_count, ret;
129
130 switch (hidpp_report->report_id) {
131 case REPORT_ID_HIDPP_SHORT:
132 fields_count = HIDPP_REPORT_SHORT_LENGTH;
133 break;
134 case REPORT_ID_HIDPP_LONG:
135 fields_count = HIDPP_REPORT_LONG_LENGTH;
136 break;
137 default:
138 return -ENODEV;
139 }
140
141 /*
142 * set the device_index as the receiver, it will be overwritten by
143 * hid_hw_request if needed
144 */
145 hidpp_report->device_index = 0xff;
146
147 ret = hid_hw_raw_request(hdev, hidpp_report->report_id,
148 (u8 *)hidpp_report, fields_count, HID_OUTPUT_REPORT,
149 HID_REQ_SET_REPORT);
150
151 return ret == fields_count ? 0 : -1;
152}
153
154/**
155 * hidpp_send_message_sync() returns 0 in case of success, and something else
156 * in case of a failure.
157 * - If ' something else' is positive, that means that an error has been raised
158 * by the protocol itself.
159 * - If ' something else' is negative, that means that we had a classic error
160 * (-ENOMEM, -EPIPE, etc...)
161 */
162static int hidpp_send_message_sync(struct hidpp_device *hidpp,
163 struct hidpp_report *message,
164 struct hidpp_report *response)
165{
166 int ret;
167
168 mutex_lock(&hidpp->send_mutex);
169
170 hidpp->send_receive_buf = response;
171 hidpp->answer_available = false;
172
173 /*
174 * So that we can later validate the answer when it arrives
175 * in hidpp_raw_event
176 */
177 *response = *message;
178
179 ret = __hidpp_send_report(hidpp->hid_dev, message);
180
181 if (ret) {
182 dbg_hid("__hidpp_send_report returned err: %d\n", ret);
183 memset(response, 0, sizeof(struct hidpp_report));
184 goto exit;
185 }
186
187 if (!wait_event_timeout(hidpp->wait, hidpp->answer_available,
188 5*HZ)) {
189 dbg_hid("%s:timeout waiting for response\n", __func__);
190 memset(response, 0, sizeof(struct hidpp_report));
191 ret = -ETIMEDOUT;
192 }
193
194 if (response->report_id == REPORT_ID_HIDPP_SHORT &&
195 response->fap.feature_index == HIDPP_ERROR) {
196 ret = response->fap.params[1];
197 dbg_hid("__hidpp_send_report got hidpp error %02X\n", ret);
198 goto exit;
199 }
200
201exit:
202 mutex_unlock(&hidpp->send_mutex);
203 return ret;
204
205}
206
207static int hidpp_send_fap_command_sync(struct hidpp_device *hidpp,
208 u8 feat_index, u8 funcindex_clientid, u8 *params, int param_count,
209 struct hidpp_report *response)
210{
211 struct hidpp_report *message;
212 int ret;
213
214 if (param_count > sizeof(message->fap.params))
215 return -EINVAL;
216
217 message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL);
218 if (!message)
219 return -ENOMEM;
220 message->report_id = REPORT_ID_HIDPP_LONG;
221 message->fap.feature_index = feat_index;
222 message->fap.funcindex_clientid = funcindex_clientid;
223 memcpy(&message->fap.params, params, param_count);
224
225 ret = hidpp_send_message_sync(hidpp, message, response);
226 kfree(message);
227 return ret;
228}
229
230static int hidpp_send_rap_command_sync(struct hidpp_device *hidpp_dev,
231 u8 report_id, u8 sub_id, u8 reg_address, u8 *params, int param_count,
232 struct hidpp_report *response)
233{
234 struct hidpp_report *message;
235 int ret;
236
237 if ((report_id != REPORT_ID_HIDPP_SHORT) &&
238 (report_id != REPORT_ID_HIDPP_LONG))
239 return -EINVAL;
240
241 if (param_count > sizeof(message->rap.params))
242 return -EINVAL;
243
244 message = kzalloc(sizeof(struct hidpp_report), GFP_KERNEL);
245 if (!message)
246 return -ENOMEM;
247 message->report_id = report_id;
248 message->rap.sub_id = sub_id;
249 message->rap.reg_address = reg_address;
250 memcpy(&message->rap.params, params, param_count);
251
252 ret = hidpp_send_message_sync(hidpp_dev, message, response);
253 kfree(message);
254 return ret;
255}
256
257static void delayed_work_cb(struct work_struct *work)
258{
259 struct hidpp_device *hidpp = container_of(work, struct hidpp_device,
260 work);
261 hidpp_connect_event(hidpp);
262}
263
264static inline bool hidpp_match_answer(struct hidpp_report *question,
265 struct hidpp_report *answer)
266{
267 return (answer->fap.feature_index == question->fap.feature_index) &&
268 (answer->fap.funcindex_clientid == question->fap.funcindex_clientid);
269}
270
271static inline bool hidpp_match_error(struct hidpp_report *question,
272 struct hidpp_report *answer)
273{
274 return (answer->fap.feature_index == HIDPP_ERROR) &&
275 (answer->fap.funcindex_clientid == question->fap.feature_index) &&
276 (answer->fap.params[0] == question->fap.funcindex_clientid);
277}
278
279static inline bool hidpp_report_is_connect_event(struct hidpp_report *report)
280{
281 return (report->report_id == REPORT_ID_HIDPP_SHORT) &&
282 (report->rap.sub_id == 0x41);
283}
284
285/* -------------------------------------------------------------------------- */
286/* HIDP++ 1.0 commands */
287/* -------------------------------------------------------------------------- */
288
289#define HIDPP_SET_REGISTER 0x80
290#define HIDPP_GET_REGISTER 0x81
291#define HIDPP_SET_LONG_REGISTER 0x82
292#define HIDPP_GET_LONG_REGISTER 0x83
293
294#define HIDPP_REG_PAIRING_INFORMATION 0xB5
295#define DEVICE_NAME 0x40
296
297static char *hidpp_get_unifying_name(struct hidpp_device *hidpp_dev)
298{
299 struct hidpp_report response;
300 int ret;
301 /* hid-logitech-dj is in charge of setting the right device index */
302 u8 params[1] = { DEVICE_NAME };
303 char *name;
304 int len;
305
306 ret = hidpp_send_rap_command_sync(hidpp_dev,
307 REPORT_ID_HIDPP_SHORT,
308 HIDPP_GET_LONG_REGISTER,
309 HIDPP_REG_PAIRING_INFORMATION,
310 params, 1, &response);
311 if (ret)
312 return NULL;
313
314 len = response.rap.params[1];
315
316 if (2 + len > sizeof(response.rap.params))
317 return NULL;
318
319 name = kzalloc(len + 1, GFP_KERNEL);
320 if (!name)
321 return NULL;
322
323 memcpy(name, &response.rap.params[2], len);
324 return name;
325}
326
327/* -------------------------------------------------------------------------- */
328/* 0x0000: Root */
329/* -------------------------------------------------------------------------- */
330
331#define HIDPP_PAGE_ROOT 0x0000
332#define HIDPP_PAGE_ROOT_IDX 0x00
333
334#define CMD_ROOT_GET_FEATURE 0x01
335#define CMD_ROOT_GET_PROTOCOL_VERSION 0x11
336
337static int hidpp_root_get_feature(struct hidpp_device *hidpp, u16 feature,
338 u8 *feature_index, u8 *feature_type)
339{
340 struct hidpp_report response;
341 int ret;
342 u8 params[2] = { feature >> 8, feature & 0x00FF };
343
344 ret = hidpp_send_fap_command_sync(hidpp,
345 HIDPP_PAGE_ROOT_IDX,
346 CMD_ROOT_GET_FEATURE,
347 params, 2, &response);
348 if (ret)
349 return ret;
350
351 *feature_index = response.fap.params[0];
352 *feature_type = response.fap.params[1];
353
354 return ret;
355}
356
357static int hidpp_root_get_protocol_version(struct hidpp_device *hidpp)
358{
359 struct hidpp_report response;
360 int ret;
361
362 ret = hidpp_send_fap_command_sync(hidpp,
363 HIDPP_PAGE_ROOT_IDX,
364 CMD_ROOT_GET_PROTOCOL_VERSION,
365 NULL, 0, &response);
366
367 if (ret == HIDPP_ERROR_INVALID_SUBID) {
368 hidpp->protocol_major = 1;
369 hidpp->protocol_minor = 0;
370 return 0;
371 }
372
373 /* the device might not be connected */
374 if (ret == HIDPP_ERROR_RESOURCE_ERROR)
375 return -EIO;
376
377 if (ret > 0) {
378 hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
379 __func__, ret);
380 return -EPROTO;
381 }
382 if (ret)
383 return ret;
384
385 hidpp->protocol_major = response.fap.params[0];
386 hidpp->protocol_minor = response.fap.params[1];
387
388 return ret;
389}
390
391static bool hidpp_is_connected(struct hidpp_device *hidpp)
392{
393 int ret;
394
395 ret = hidpp_root_get_protocol_version(hidpp);
396 if (!ret)
397 hid_dbg(hidpp->hid_dev, "HID++ %u.%u device connected.\n",
398 hidpp->protocol_major, hidpp->protocol_minor);
399 return ret == 0;
400}
401
402/* -------------------------------------------------------------------------- */
403/* 0x0005: GetDeviceNameType */
404/* -------------------------------------------------------------------------- */
405
406#define HIDPP_PAGE_GET_DEVICE_NAME_TYPE 0x0005
407
408#define CMD_GET_DEVICE_NAME_TYPE_GET_COUNT 0x01
409#define CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME 0x11
410#define CMD_GET_DEVICE_NAME_TYPE_GET_TYPE 0x21
411
412static int hidpp_devicenametype_get_count(struct hidpp_device *hidpp,
413 u8 feature_index, u8 *nameLength)
414{
415 struct hidpp_report response;
416 int ret;
417
418 ret = hidpp_send_fap_command_sync(hidpp, feature_index,
419 CMD_GET_DEVICE_NAME_TYPE_GET_COUNT, NULL, 0, &response);
420
421 if (ret > 0) {
422 hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
423 __func__, ret);
424 return -EPROTO;
425 }
426 if (ret)
427 return ret;
428
429 *nameLength = response.fap.params[0];
430
431 return ret;
432}
433
434static int hidpp_devicenametype_get_device_name(struct hidpp_device *hidpp,
435 u8 feature_index, u8 char_index, char *device_name, int len_buf)
436{
437 struct hidpp_report response;
438 int ret, i;
439 int count;
440
441 ret = hidpp_send_fap_command_sync(hidpp, feature_index,
442 CMD_GET_DEVICE_NAME_TYPE_GET_DEVICE_NAME, &char_index, 1,
443 &response);
444
445 if (ret > 0) {
446 hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
447 __func__, ret);
448 return -EPROTO;
449 }
450 if (ret)
451 return ret;
452
453 if (response.report_id == REPORT_ID_HIDPP_LONG)
454 count = HIDPP_REPORT_LONG_LENGTH - 4;
455 else
456 count = HIDPP_REPORT_SHORT_LENGTH - 4;
457
458 if (len_buf < count)
459 count = len_buf;
460
461 for (i = 0; i < count; i++)
462 device_name[i] = response.fap.params[i];
463
464 return count;
465}
466
467static char *hidpp_get_device_name(struct hidpp_device *hidpp)
468{
469 u8 feature_type;
470 u8 feature_index;
471 u8 __name_length;
472 char *name;
473 unsigned index = 0;
474 int ret;
475
476 ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_GET_DEVICE_NAME_TYPE,
477 &feature_index, &feature_type);
478 if (ret)
479 return NULL;
480
481 ret = hidpp_devicenametype_get_count(hidpp, feature_index,
482 &__name_length);
483 if (ret)
484 return NULL;
485
486 name = kzalloc(__name_length + 1, GFP_KERNEL);
487 if (!name)
488 return NULL;
489
490 while (index < __name_length) {
491 ret = hidpp_devicenametype_get_device_name(hidpp,
492 feature_index, index, name + index,
493 __name_length - index);
494 if (ret <= 0) {
495 kfree(name);
496 return NULL;
497 }
498 index += ret;
499 }
500
501 return name;
502}
503
504/* -------------------------------------------------------------------------- */
505/* 0x6100: TouchPadRawXY */
506/* -------------------------------------------------------------------------- */
507
508#define HIDPP_PAGE_TOUCHPAD_RAW_XY 0x6100
509
510#define CMD_TOUCHPAD_GET_RAW_INFO 0x01
511#define CMD_TOUCHPAD_SET_RAW_REPORT_STATE 0x21
512
513#define EVENT_TOUCHPAD_RAW_XY 0x00
514
515#define TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT 0x01
516#define TOUCHPAD_RAW_XY_ORIGIN_UPPER_LEFT 0x03
517
518struct hidpp_touchpad_raw_info {
519 u16 x_size;
520 u16 y_size;
521 u8 z_range;
522 u8 area_range;
523 u8 timestamp_unit;
524 u8 maxcontacts;
525 u8 origin;
526 u16 res;
527};
528
529struct hidpp_touchpad_raw_xy_finger {
530 u8 contact_type;
531 u8 contact_status;
532 u16 x;
533 u16 y;
534 u8 z;
535 u8 area;
536 u8 finger_id;
537};
538
539struct hidpp_touchpad_raw_xy {
540 u16 timestamp;
541 struct hidpp_touchpad_raw_xy_finger fingers[2];
542 u8 spurious_flag;
543 u8 end_of_frame;
544 u8 finger_count;
545 u8 button;
546};
547
548static int hidpp_touchpad_get_raw_info(struct hidpp_device *hidpp,
549 u8 feature_index, struct hidpp_touchpad_raw_info *raw_info)
550{
551 struct hidpp_report response;
552 int ret;
553 u8 *params = (u8 *)response.fap.params;
554
555 ret = hidpp_send_fap_command_sync(hidpp, feature_index,
556 CMD_TOUCHPAD_GET_RAW_INFO, NULL, 0, &response);
557
558 if (ret > 0) {
559 hid_err(hidpp->hid_dev, "%s: received protocol error 0x%02x\n",
560 __func__, ret);
561 return -EPROTO;
562 }
563 if (ret)
564 return ret;
565
566 raw_info->x_size = get_unaligned_be16(&params[0]);
567 raw_info->y_size = get_unaligned_be16(&params[2]);
568 raw_info->z_range = params[4];
569 raw_info->area_range = params[5];
570 raw_info->maxcontacts = params[7];
571 raw_info->origin = params[8];
572 /* res is given in unit per inch */
573 raw_info->res = get_unaligned_be16(&params[13]) * 2 / 51;
574
575 return ret;
576}
577
578static int hidpp_touchpad_set_raw_report_state(struct hidpp_device *hidpp_dev,
579 u8 feature_index, bool send_raw_reports,
580 bool sensor_enhanced_settings)
581{
582 struct hidpp_report response;
583
584 /*
585 * Params:
586 * bit 0 - enable raw
587 * bit 1 - 16bit Z, no area
588 * bit 2 - enhanced sensitivity
589 * bit 3 - width, height (4 bits each) instead of area
590 * bit 4 - send raw + gestures (degrades smoothness)
591 * remaining bits - reserved
592 */
593 u8 params = send_raw_reports | (sensor_enhanced_settings << 2);
594
595 return hidpp_send_fap_command_sync(hidpp_dev, feature_index,
596 CMD_TOUCHPAD_SET_RAW_REPORT_STATE, &params, 1, &response);
597}
598
599static void hidpp_touchpad_touch_event(u8 *data,
600 struct hidpp_touchpad_raw_xy_finger *finger)
601{
602 u8 x_m = data[0] << 2;
603 u8 y_m = data[2] << 2;
604
605 finger->x = x_m << 6 | data[1];
606 finger->y = y_m << 6 | data[3];
607
608 finger->contact_type = data[0] >> 6;
609 finger->contact_status = data[2] >> 6;
610
611 finger->z = data[4];
612 finger->area = data[5];
613 finger->finger_id = data[6] >> 4;
614}
615
616static void hidpp_touchpad_raw_xy_event(struct hidpp_device *hidpp_dev,
617 u8 *data, struct hidpp_touchpad_raw_xy *raw_xy)
618{
619 memset(raw_xy, 0, sizeof(struct hidpp_touchpad_raw_xy));
620 raw_xy->end_of_frame = data[8] & 0x01;
621 raw_xy->spurious_flag = (data[8] >> 1) & 0x01;
622 raw_xy->finger_count = data[15] & 0x0f;
623 raw_xy->button = (data[8] >> 2) & 0x01;
624
625 if (raw_xy->finger_count) {
626 hidpp_touchpad_touch_event(&data[2], &raw_xy->fingers[0]);
627 hidpp_touchpad_touch_event(&data[9], &raw_xy->fingers[1]);
628 }
629}
630
631/* ************************************************************************** */
632/* */
633/* Device Support */
634/* */
635/* ************************************************************************** */
636
637/* -------------------------------------------------------------------------- */
638/* Touchpad HID++ devices */
639/* -------------------------------------------------------------------------- */
640
641#define WTP_MANUAL_RESOLUTION 39
642
643struct wtp_data {
644 struct input_dev *input;
645 u16 x_size, y_size;
646 u8 finger_count;
647 u8 mt_feature_index;
648 u8 button_feature_index;
649 u8 maxcontacts;
650 bool flip_y;
651 unsigned int resolution;
652};
653
654static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
655 struct hid_field *field, struct hid_usage *usage,
656 unsigned long **bit, int *max)
657{
658 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
659
660 if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
661 (field->application == HID_GD_KEYBOARD))
662 return 0;
663
664 return -1;
665}
666
667static void wtp_populate_input(struct hidpp_device *hidpp,
668 struct input_dev *input_dev, bool origin_is_hid_core)
669{
670 struct wtp_data *wd = hidpp->private_data;
671
672 if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
673 /* this is the generic hid-input call */
674 return;
675
676 __set_bit(EV_ABS, input_dev->evbit);
677 __set_bit(EV_KEY, input_dev->evbit);
678 __clear_bit(EV_REL, input_dev->evbit);
679 __clear_bit(EV_LED, input_dev->evbit);
680
681 input_set_abs_params(input_dev, ABS_MT_POSITION_X, 0, wd->x_size, 0, 0);
682 input_abs_set_res(input_dev, ABS_MT_POSITION_X, wd->resolution);
683 input_set_abs_params(input_dev, ABS_MT_POSITION_Y, 0, wd->y_size, 0, 0);
684 input_abs_set_res(input_dev, ABS_MT_POSITION_Y, wd->resolution);
685
686 /* Max pressure is not given by the devices, pick one */
687 input_set_abs_params(input_dev, ABS_MT_PRESSURE, 0, 50, 0, 0);
688
689 input_set_capability(input_dev, EV_KEY, BTN_LEFT);
690
691 if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS)
692 input_set_capability(input_dev, EV_KEY, BTN_RIGHT);
693 else
694 __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit);
695
696 input_mt_init_slots(input_dev, wd->maxcontacts, INPUT_MT_POINTER |
697 INPUT_MT_DROP_UNUSED);
698
699 wd->input = input_dev;
700}
701
702static void wtp_touch_event(struct wtp_data *wd,
703 struct hidpp_touchpad_raw_xy_finger *touch_report)
704{
705 int slot;
706
707 if (!touch_report->finger_id || touch_report->contact_type)
708 /* no actual data */
709 return;
710
711 slot = input_mt_get_slot_by_key(wd->input, touch_report->finger_id);
712
713 input_mt_slot(wd->input, slot);
714 input_mt_report_slot_state(wd->input, MT_TOOL_FINGER,
715 touch_report->contact_status);
716 if (touch_report->contact_status) {
717 input_event(wd->input, EV_ABS, ABS_MT_POSITION_X,
718 touch_report->x);
719 input_event(wd->input, EV_ABS, ABS_MT_POSITION_Y,
720 wd->flip_y ? wd->y_size - touch_report->y :
721 touch_report->y);
722 input_event(wd->input, EV_ABS, ABS_MT_PRESSURE,
723 touch_report->area);
724 }
725}
726
727static void wtp_send_raw_xy_event(struct hidpp_device *hidpp,
728 struct hidpp_touchpad_raw_xy *raw)
729{
730 struct wtp_data *wd = hidpp->private_data;
731 int i;
732
733 for (i = 0; i < 2; i++)
734 wtp_touch_event(wd, &(raw->fingers[i]));
735
736 if (raw->end_of_frame &&
737 !(hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS))
738 input_event(wd->input, EV_KEY, BTN_LEFT, raw->button);
739
740 if (raw->end_of_frame || raw->finger_count <= 2) {
741 input_mt_sync_frame(wd->input);
742 input_sync(wd->input);
743 }
744}
745
746static int wtp_mouse_raw_xy_event(struct hidpp_device *hidpp, u8 *data)
747{
748 struct wtp_data *wd = hidpp->private_data;
749 u8 c1_area = ((data[7] & 0xf) * (data[7] & 0xf) +
750 (data[7] >> 4) * (data[7] >> 4)) / 2;
751 u8 c2_area = ((data[13] & 0xf) * (data[13] & 0xf) +
752 (data[13] >> 4) * (data[13] >> 4)) / 2;
753 struct hidpp_touchpad_raw_xy raw = {
754 .timestamp = data[1],
755 .fingers = {
756 {
757 .contact_type = 0,
758 .contact_status = !!data[7],
759 .x = get_unaligned_le16(&data[3]),
760 .y = get_unaligned_le16(&data[5]),
761 .z = c1_area,
762 .area = c1_area,
763 .finger_id = data[2],
764 }, {
765 .contact_type = 0,
766 .contact_status = !!data[13],
767 .x = get_unaligned_le16(&data[9]),
768 .y = get_unaligned_le16(&data[11]),
769 .z = c2_area,
770 .area = c2_area,
771 .finger_id = data[8],
772 }
773 },
774 .finger_count = wd->maxcontacts,
775 .spurious_flag = 0,
776 .end_of_frame = (data[0] >> 7) == 0,
777 .button = data[0] & 0x01,
778 };
779
780 wtp_send_raw_xy_event(hidpp, &raw);
781
782 return 1;
783}
784
785static int wtp_raw_event(struct hid_device *hdev, u8 *data, int size)
786{
787 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
788 struct wtp_data *wd = hidpp->private_data;
789 struct hidpp_report *report = (struct hidpp_report *)data;
790 struct hidpp_touchpad_raw_xy raw;
791
792 if (!wd || !wd->input)
793 return 1;
794
795 switch (data[0]) {
796 case 0x02:
797 if (hidpp->quirks & HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS) {
798 input_event(wd->input, EV_KEY, BTN_LEFT,
799 !!(data[1] & 0x01));
800 input_event(wd->input, EV_KEY, BTN_RIGHT,
801 !!(data[1] & 0x02));
802 input_sync(wd->input);
803 } else {
804 if (size < 21)
805 return 1;
806 return wtp_mouse_raw_xy_event(hidpp, &data[7]);
807 }
808 case REPORT_ID_HIDPP_LONG:
809 if ((report->fap.feature_index != wd->mt_feature_index) ||
810 (report->fap.funcindex_clientid != EVENT_TOUCHPAD_RAW_XY))
811 return 1;
812 hidpp_touchpad_raw_xy_event(hidpp, data + 4, &raw);
813
814 wtp_send_raw_xy_event(hidpp, &raw);
815 return 0;
816 }
817
818 return 0;
819}
820
821static int wtp_get_config(struct hidpp_device *hidpp)
822{
823 struct wtp_data *wd = hidpp->private_data;
824 struct hidpp_touchpad_raw_info raw_info = {0};
825 u8 feature_type;
826 int ret;
827
828 ret = hidpp_root_get_feature(hidpp, HIDPP_PAGE_TOUCHPAD_RAW_XY,
829 &wd->mt_feature_index, &feature_type);
830 if (ret)
831 /* means that the device is not powered up */
832 return ret;
833
834 ret = hidpp_touchpad_get_raw_info(hidpp, wd->mt_feature_index,
835 &raw_info);
836 if (ret)
837 return ret;
838
839 wd->x_size = raw_info.x_size;
840 wd->y_size = raw_info.y_size;
841 wd->maxcontacts = raw_info.maxcontacts;
842 wd->flip_y = raw_info.origin == TOUCHPAD_RAW_XY_ORIGIN_LOWER_LEFT;
843 wd->resolution = raw_info.res;
844 if (!wd->resolution)
845 wd->resolution = WTP_MANUAL_RESOLUTION;
846
847 return 0;
848}
849
850static int wtp_allocate(struct hid_device *hdev, const struct hid_device_id *id)
851{
852 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
853 struct wtp_data *wd;
854
855 wd = devm_kzalloc(&hdev->dev, sizeof(struct wtp_data),
856 GFP_KERNEL);
857 if (!wd)
858 return -ENOMEM;
859
860 hidpp->private_data = wd;
861
862 return 0;
863};
864
865static void wtp_connect(struct hid_device *hdev, bool connected)
866{
867 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
868 struct wtp_data *wd = hidpp->private_data;
869 int ret;
870
871 if (!connected)
872 return;
873
874 if (!wd->x_size) {
875 ret = wtp_get_config(hidpp);
876 if (ret) {
877 hid_err(hdev, "Can not get wtp config: %d\n", ret);
878 return;
879 }
880 }
881
882 hidpp_touchpad_set_raw_report_state(hidpp, wd->mt_feature_index,
883 true, true);
884}
885
886/* -------------------------------------------------------------------------- */
887/* Generic HID++ devices */
888/* -------------------------------------------------------------------------- */
889
890static int hidpp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
891 struct hid_field *field, struct hid_usage *usage,
892 unsigned long **bit, int *max)
893{
894 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
895
896 if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
897 return wtp_input_mapping(hdev, hi, field, usage, bit, max);
898
899 return 0;
900}
901
902static void hidpp_populate_input(struct hidpp_device *hidpp,
903 struct input_dev *input, bool origin_is_hid_core)
904{
905 if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
906 wtp_populate_input(hidpp, input, origin_is_hid_core);
907}
908
909static void hidpp_input_configured(struct hid_device *hdev,
910 struct hid_input *hidinput)
911{
912 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
913 struct input_dev *input = hidinput->input;
914
915 hidpp_populate_input(hidpp, input, true);
916}
917
918static int hidpp_raw_hidpp_event(struct hidpp_device *hidpp, u8 *data,
919 int size)
920{
921 struct hidpp_report *question = hidpp->send_receive_buf;
922 struct hidpp_report *answer = hidpp->send_receive_buf;
923 struct hidpp_report *report = (struct hidpp_report *)data;
924
925 /*
926 * If the mutex is locked then we have a pending answer from a
927 * previoulsly sent command
928 */
929 if (unlikely(mutex_is_locked(&hidpp->send_mutex))) {
930 /*
931 * Check for a correct hidpp20 answer or the corresponding
932 * error
933 */
934 if (hidpp_match_answer(question, report) ||
935 hidpp_match_error(question, report)) {
936 *answer = *report;
937 hidpp->answer_available = true;
938 wake_up(&hidpp->wait);
939 /*
940 * This was an answer to a command that this driver sent
941 * We return 1 to hid-core to avoid forwarding the
942 * command upstream as it has been treated by the driver
943 */
944
945 return 1;
946 }
947 }
948
949 if (unlikely(hidpp_report_is_connect_event(report))) {
950 atomic_set(&hidpp->connected,
951 !(report->rap.params[0] & (1 << 6)));
952 if ((hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) &&
953 (schedule_work(&hidpp->work) == 0))
954 dbg_hid("%s: connect event already queued\n", __func__);
955 return 1;
956 }
957
958 if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
959 return wtp_raw_event(hidpp->hid_dev, data, size);
960
961 return 0;
962}
963
964static int hidpp_raw_event(struct hid_device *hdev, struct hid_report *report,
965 u8 *data, int size)
966{
967 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
968
969 switch (data[0]) {
970 case REPORT_ID_HIDPP_LONG:
971 if (size != HIDPP_REPORT_LONG_LENGTH) {
972 hid_err(hdev, "received hid++ report of bad size (%d)",
973 size);
974 return 1;
975 }
976 return hidpp_raw_hidpp_event(hidpp, data, size);
977 case REPORT_ID_HIDPP_SHORT:
978 if (size != HIDPP_REPORT_SHORT_LENGTH) {
979 hid_err(hdev, "received hid++ report of bad size (%d)",
980 size);
981 return 1;
982 }
983 return hidpp_raw_hidpp_event(hidpp, data, size);
984 }
985
986 if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
987 return wtp_raw_event(hdev, data, size);
988
989 return 0;
990}
991
992static void hidpp_overwrite_name(struct hid_device *hdev, bool use_unifying)
993{
994 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
995 char *name;
996
997 if (use_unifying)
998 /*
999 * the device is connected through an Unifying receiver, and
1000 * might not be already connected.
1001 * Ask the receiver for its name.
1002 */
1003 name = hidpp_get_unifying_name(hidpp);
1004 else
1005 name = hidpp_get_device_name(hidpp);
1006
1007 if (!name)
1008 hid_err(hdev, "unable to retrieve the name of the device");
1009 else
1010 snprintf(hdev->name, sizeof(hdev->name), "%s", name);
1011
1012 kfree(name);
1013}
1014
1015static int hidpp_input_open(struct input_dev *dev)
1016{
1017 struct hid_device *hid = input_get_drvdata(dev);
1018
1019 return hid_hw_open(hid);
1020}
1021
1022static void hidpp_input_close(struct input_dev *dev)
1023{
1024 struct hid_device *hid = input_get_drvdata(dev);
1025
1026 hid_hw_close(hid);
1027}
1028
1029static struct input_dev *hidpp_allocate_input(struct hid_device *hdev)
1030{
1031 struct input_dev *input_dev = devm_input_allocate_device(&hdev->dev);
1032
1033 if (!input_dev)
1034 return NULL;
1035
1036 input_set_drvdata(input_dev, hdev);
1037 input_dev->open = hidpp_input_open;
1038 input_dev->close = hidpp_input_close;
1039
1040 input_dev->name = hdev->name;
1041 input_dev->phys = hdev->phys;
1042 input_dev->uniq = hdev->uniq;
1043 input_dev->id.bustype = hdev->bus;
1044 input_dev->id.vendor = hdev->vendor;
1045 input_dev->id.product = hdev->product;
1046 input_dev->id.version = hdev->version;
1047 input_dev->dev.parent = &hdev->dev;
1048
1049 return input_dev;
1050}
1051
1052static void hidpp_connect_event(struct hidpp_device *hidpp)
1053{
1054 struct hid_device *hdev = hidpp->hid_dev;
1055 int ret = 0;
1056 bool connected = atomic_read(&hidpp->connected);
1057 struct input_dev *input;
1058 char *name, *devm_name;
1059
1060 if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)
1061 wtp_connect(hdev, connected);
1062
1063 if (!connected || hidpp->delayed_input)
1064 return;
1065
1066 if (!hidpp->protocol_major) {
1067 ret = !hidpp_is_connected(hidpp);
1068 if (ret) {
1069 hid_err(hdev, "Can not get the protocol version.\n");
1070 return;
1071 }
1072 }
1073
1074 /* the device is already connected, we can ask for its name and
1075 * protocol */
1076 hid_info(hdev, "HID++ %u.%u device connected.\n",
1077 hidpp->protocol_major, hidpp->protocol_minor);
1078
1079 input = hidpp_allocate_input(hdev);
1080 if (!input) {
1081 hid_err(hdev, "cannot allocate new input device: %d\n", ret);
1082 return;
1083 }
1084
1085 name = hidpp_get_device_name(hidpp);
1086 if (!name) {
1087 hid_err(hdev, "unable to retrieve the name of the device");
1088 } else {
1089 devm_name = devm_kasprintf(&hdev->dev, GFP_KERNEL, "%s", name);
1090 if (devm_name)
1091 input->name = devm_name;
1092 kfree(name);
1093 }
1094
1095 hidpp_populate_input(hidpp, input, false);
1096
1097 ret = input_register_device(input);
1098 if (ret)
1099 input_free_device(input);
1100
1101 hidpp->delayed_input = input;
1102}
1103
1104static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
1105{
1106 struct hidpp_device *hidpp;
1107 int ret;
1108 bool connected;
1109 unsigned int connect_mask = HID_CONNECT_DEFAULT;
1110
1111 hidpp = devm_kzalloc(&hdev->dev, sizeof(struct hidpp_device),
1112 GFP_KERNEL);
1113 if (!hidpp)
1114 return -ENOMEM;
1115
1116 hidpp->hid_dev = hdev;
1117 hid_set_drvdata(hdev, hidpp);
1118
1119 hidpp->quirks = id->driver_data;
1120
1121 if (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP) {
1122 ret = wtp_allocate(hdev, id);
1123 if (ret)
1124 goto wtp_allocate_fail;
1125 }
1126
1127 INIT_WORK(&hidpp->work, delayed_work_cb);
1128 mutex_init(&hidpp->send_mutex);
1129 init_waitqueue_head(&hidpp->wait);
1130
1131 ret = hid_parse(hdev);
1132 if (ret) {
1133 hid_err(hdev, "%s:parse failed\n", __func__);
1134 goto hid_parse_fail;
1135 }
1136
1137 /* Allow incoming packets */
1138 hid_device_io_start(hdev);
1139
1140 connected = hidpp_is_connected(hidpp);
1141 if (id->group != HID_GROUP_LOGITECH_DJ_DEVICE) {
1142 if (!connected) {
1143 hid_err(hdev, "Device not connected");
1144 hid_device_io_stop(hdev);
1145 goto hid_parse_fail;
1146 }
1147
1148 hid_info(hdev, "HID++ %u.%u device connected.\n",
1149 hidpp->protocol_major, hidpp->protocol_minor);
1150 }
1151
1152 hidpp_overwrite_name(hdev, id->group == HID_GROUP_LOGITECH_DJ_DEVICE);
1153 atomic_set(&hidpp->connected, connected);
1154
1155 if (connected && (hidpp->quirks & HIDPP_QUIRK_CLASS_WTP)) {
1156 ret = wtp_get_config(hidpp);
1157 if (ret)
1158 goto hid_parse_fail;
1159 }
1160
1161 /* Block incoming packets */
1162 hid_device_io_stop(hdev);
1163
1164 if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
1165 connect_mask &= ~HID_CONNECT_HIDINPUT;
1166
1167 /* Re-enable hidinput for multi-input devices */
1168 if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
1169 connect_mask |= HID_CONNECT_HIDINPUT;
1170
1171 ret = hid_hw_start(hdev, connect_mask);
1172 if (ret) {
1173 hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
1174 goto hid_hw_start_fail;
1175 }
1176
1177 if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT) {
1178 /* Allow incoming packets */
1179 hid_device_io_start(hdev);
1180
1181 hidpp_connect_event(hidpp);
1182 }
1183
1184 return ret;
1185
1186hid_hw_start_fail:
1187hid_parse_fail:
1188 cancel_work_sync(&hidpp->work);
1189 mutex_destroy(&hidpp->send_mutex);
1190wtp_allocate_fail:
1191 hid_set_drvdata(hdev, NULL);
1192 return ret;
1193}
1194
1195static void hidpp_remove(struct hid_device *hdev)
1196{
1197 struct hidpp_device *hidpp = hid_get_drvdata(hdev);
1198
1199 cancel_work_sync(&hidpp->work);
1200 mutex_destroy(&hidpp->send_mutex);
1201 hid_hw_stop(hdev);
1202}
1203
1204static const struct hid_device_id hidpp_devices[] = {
1205 { /* wireless touchpad */
1206 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
1207 USB_VENDOR_ID_LOGITECH, 0x4011),
1208 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT |
1209 HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS },
1210 { /* wireless touchpad T650 */
1211 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
1212 USB_VENDOR_ID_LOGITECH, 0x4101),
1213 .driver_data = HIDPP_QUIRK_CLASS_WTP | HIDPP_QUIRK_DELAYED_INIT },
1214 { /* wireless touchpad T651 */
1215 HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
1216 USB_DEVICE_ID_LOGITECH_T651),
1217 .driver_data = HIDPP_QUIRK_CLASS_WTP },
1218 { /* Keyboard TK820 */
1219 HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
1220 USB_VENDOR_ID_LOGITECH, 0x4102),
1221 .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
1222 HIDPP_QUIRK_CLASS_WTP },
1223
1224 { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
1225 USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
1226 {}
1227};
1228
1229MODULE_DEVICE_TABLE(hid, hidpp_devices);
1230
1231static struct hid_driver hidpp_driver = {
1232 .name = "logitech-hidpp-device",
1233 .id_table = hidpp_devices,
1234 .probe = hidpp_probe,
1235 .remove = hidpp_remove,
1236 .raw_event = hidpp_raw_event,
1237 .input_configured = hidpp_input_configured,
1238 .input_mapping = hidpp_input_mapping,
1239};
1240
1241module_hid_driver(hidpp_driver);
diff --git a/drivers/hid/hid-microsoft.c b/drivers/hid/hid-microsoft.c
index 8ba17a946f2a..cacda43f6a6f 100644
--- a/drivers/hid/hid-microsoft.c
+++ b/drivers/hid/hid-microsoft.c
@@ -274,6 +274,8 @@ static const struct hid_device_id ms_devices[] = {
274 .driver_data = MS_NOGET }, 274 .driver_data = MS_NOGET },
275 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500), 275 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_COMFORT_MOUSE_4500),
276 .driver_data = MS_DUPLICATE_USAGES }, 276 .driver_data = MS_DUPLICATE_USAGES },
277 { HID_USB_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3),
278 .driver_data = MS_HIDINPUT },
277 279
278 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT), 280 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_PRESENTER_8K_BT),
279 .driver_data = MS_PRESENTER }, 281 .driver_data = MS_PRESENTER },
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 51e25b9407f2..f65e78b46999 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -67,6 +67,7 @@ MODULE_LICENSE("GPL");
67#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10) 67#define MT_QUIRK_IGNORE_DUPLICATES (1 << 10)
68#define MT_QUIRK_HOVERING (1 << 11) 68#define MT_QUIRK_HOVERING (1 << 11)
69#define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12) 69#define MT_QUIRK_CONTACT_CNT_ACCURATE (1 << 12)
70#define MT_QUIRK_FORCE_GET_FEATURE (1 << 13)
70 71
71#define MT_INPUTMODE_TOUCHSCREEN 0x02 72#define MT_INPUTMODE_TOUCHSCREEN 0x02
72#define MT_INPUTMODE_TOUCHPAD 0x03 73#define MT_INPUTMODE_TOUCHPAD 0x03
@@ -150,6 +151,7 @@ static void mt_post_parse(struct mt_device *td);
150#define MT_CLS_FLATFROG 0x0107 151#define MT_CLS_FLATFROG 0x0107
151#define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108 152#define MT_CLS_GENERALTOUCH_TWOFINGERS 0x0108
152#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109 153#define MT_CLS_GENERALTOUCH_PWT_TENFINGERS 0x0109
154#define MT_CLS_VTL 0x0110
153 155
154#define MT_DEFAULT_MAXCONTACT 10 156#define MT_DEFAULT_MAXCONTACT 10
155#define MT_MAX_MAXCONTACT 250 157#define MT_MAX_MAXCONTACT 250
@@ -255,6 +257,11 @@ static struct mt_class mt_classes[] = {
255 .sn_move = 2048, 257 .sn_move = 2048,
256 .maxcontacts = 40, 258 .maxcontacts = 40,
257 }, 259 },
260 { .name = MT_CLS_VTL,
261 .quirks = MT_QUIRK_ALWAYS_VALID |
262 MT_QUIRK_CONTACT_CNT_ACCURATE |
263 MT_QUIRK_FORCE_GET_FEATURE,
264 },
258 { } 265 { }
259}; 266};
260 267
@@ -809,6 +816,9 @@ static void mt_set_input_mode(struct hid_device *hdev)
809 struct mt_device *td = hid_get_drvdata(hdev); 816 struct mt_device *td = hid_get_drvdata(hdev);
810 struct hid_report *r; 817 struct hid_report *r;
811 struct hid_report_enum *re; 818 struct hid_report_enum *re;
819 struct mt_class *cls = &td->mtclass;
820 char *buf;
821 int report_len;
812 822
813 if (td->inputmode < 0) 823 if (td->inputmode < 0)
814 return; 824 return;
@@ -816,6 +826,18 @@ static void mt_set_input_mode(struct hid_device *hdev)
816 re = &(hdev->report_enum[HID_FEATURE_REPORT]); 826 re = &(hdev->report_enum[HID_FEATURE_REPORT]);
817 r = re->report_id_hash[td->inputmode]; 827 r = re->report_id_hash[td->inputmode];
818 if (r) { 828 if (r) {
829 if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) {
830 report_len = hid_report_len(r);
831 buf = hid_alloc_report_buf(r, GFP_KERNEL);
832 if (!buf) {
833 hid_err(hdev, "failed to allocate buffer for report\n");
834 return;
835 }
836 hid_hw_raw_request(hdev, r->id, buf, report_len,
837 HID_FEATURE_REPORT,
838 HID_REQ_GET_REPORT);
839 kfree(buf);
840 }
819 r->field[0]->value[td->inputmode_index] = td->inputmode_value; 841 r->field[0]->value[td->inputmode_index] = td->inputmode_value;
820 hid_hw_request(hdev, r, HID_REQ_SET_REPORT); 842 hid_hw_request(hdev, r, HID_REQ_SET_REPORT);
821 } 843 }
@@ -1281,6 +1303,11 @@ static const struct hid_device_id mt_devices[] = {
1281 MT_USB_DEVICE(USB_VENDOR_ID_UNITEC, 1303 MT_USB_DEVICE(USB_VENDOR_ID_UNITEC,
1282 USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) }, 1304 USB_DEVICE_ID_UNITEC_USB_TOUCH_0A19) },
1283 1305
1306 /* VTL panels */
1307 { .driver_data = MT_CLS_VTL,
1308 MT_USB_DEVICE(USB_VENDOR_ID_VTL,
1309 USB_DEVICE_ID_VTL_MULTITOUCH_FF3F) },
1310
1284 /* Wistron panels */ 1311 /* Wistron panels */
1285 { .driver_data = MT_CLS_NSMU, 1312 { .driver_data = MT_CLS_NSMU,
1286 MT_USB_DEVICE(USB_VENDOR_ID_WISTRON, 1313 MT_USB_DEVICE(USB_VENDOR_ID_WISTRON,
diff --git a/drivers/hid/hid-plantronics.c b/drivers/hid/hid-plantronics.c
new file mode 100644
index 000000000000..2180e0789b76
--- /dev/null
+++ b/drivers/hid/hid-plantronics.c
@@ -0,0 +1,55 @@
1/*
2 * Plantronics USB HID Driver
3 *
4 * Copyright (c) 2014 JD Cole <jd.cole@plantronics.com>
5 * Copyright (c) 2014 Terry Junge <terry.junge@plantronics.com>
6 */
7
8/*
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the Free
11 * Software Foundation; either version 2 of the License, or (at your option)
12 * any later version.
13 */
14
15#include "hid-ids.h"
16
17#include <linux/hid.h>
18#include <linux/module.h>
19
20static int plantronics_input_mapping(struct hid_device *hdev,
21 struct hid_input *hi,
22 struct hid_field *field,
23 struct hid_usage *usage,
24 unsigned long **bit, int *max)
25{
26 if (field->application == HID_CP_CONSUMERCONTROL
27 && (usage->hid & HID_USAGE_PAGE) == HID_UP_CONSUMER) {
28 hid_dbg(hdev, "usage: %08x (appl: %08x) - defaulted\n",
29 usage->hid, field->application);
30 return 0;
31 }
32
33 hid_dbg(hdev, "usage: %08x (appl: %08x) - ignored\n",
34 usage->hid, field->application);
35
36 return -1;
37}
38
39static const struct hid_device_id plantronics_devices[] = {
40 { HID_USB_DEVICE(USB_VENDOR_ID_PLANTRONICS, HID_ANY_ID) },
41 { }
42};
43MODULE_DEVICE_TABLE(hid, plantronics_devices);
44
45static struct hid_driver plantronics_driver = {
46 .name = "plantronics",
47 .id_table = plantronics_devices,
48 .input_mapping = plantronics_input_mapping,
49};
50module_hid_driver(plantronics_driver);
51
52MODULE_AUTHOR("JD Cole <jd.cole@plantronics.com>");
53MODULE_AUTHOR("Terry Junge <terry.junge@plantronics.com>");
54MODULE_DESCRIPTION("Plantronics USB HID Driver");
55MODULE_LICENSE("GPL");
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 3cccff73b9b9..b51200fe2f33 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -584,11 +584,15 @@ static int rmi_populate_f11(struct hid_device *hdev)
584 bool has_query10 = false; 584 bool has_query10 = false;
585 bool has_query11; 585 bool has_query11;
586 bool has_query12; 586 bool has_query12;
587 bool has_query27;
588 bool has_query28;
589 bool has_query36 = false;
587 bool has_physical_props; 590 bool has_physical_props;
588 bool has_gestures; 591 bool has_gestures;
589 bool has_rel; 592 bool has_rel;
593 bool has_data40 = false;
590 unsigned x_size, y_size; 594 unsigned x_size, y_size;
591 u16 query12_offset; 595 u16 query_offset;
592 596
593 if (!data->f11.query_base_addr) { 597 if (!data->f11.query_base_addr) {
594 hid_err(hdev, "No 2D sensor found, giving up.\n"); 598 hid_err(hdev, "No 2D sensor found, giving up.\n");
@@ -604,6 +608,8 @@ static int rmi_populate_f11(struct hid_device *hdev)
604 has_query9 = !!(buf[0] & BIT(3)); 608 has_query9 = !!(buf[0] & BIT(3));
605 has_query11 = !!(buf[0] & BIT(4)); 609 has_query11 = !!(buf[0] & BIT(4));
606 has_query12 = !!(buf[0] & BIT(5)); 610 has_query12 = !!(buf[0] & BIT(5));
611 has_query27 = !!(buf[0] & BIT(6));
612 has_query28 = !!(buf[0] & BIT(7));
607 613
608 /* query 1 to get the max number of fingers */ 614 /* query 1 to get the max number of fingers */
609 ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf); 615 ret = rmi_read(hdev, data->f11.query_base_addr + 1, buf);
@@ -626,43 +632,43 @@ static int rmi_populate_f11(struct hid_device *hdev)
626 has_rel = !!(buf[0] & BIT(3)); 632 has_rel = !!(buf[0] & BIT(3));
627 has_gestures = !!(buf[0] & BIT(5)); 633 has_gestures = !!(buf[0] & BIT(5));
628 634
635 /*
636 * At least 4 queries are guaranteed to be present in F11
637 * +1 for query 5 which is present since absolute events are
638 * reported and +1 for query 12.
639 */
640 query_offset = 6;
641
642 if (has_rel)
643 ++query_offset; /* query 6 is present */
644
629 if (has_gestures) { 645 if (has_gestures) {
630 /* query 8 to find out if query 10 exists */ 646 /* query 8 to find out if query 10 exists */
631 ret = rmi_read(hdev, data->f11.query_base_addr + 8, buf); 647 ret = rmi_read(hdev,
648 data->f11.query_base_addr + query_offset + 1, buf);
632 if (ret) { 649 if (ret) {
633 hid_err(hdev, "can not read gesture information: %d.\n", 650 hid_err(hdev, "can not read gesture information: %d.\n",
634 ret); 651 ret);
635 return ret; 652 return ret;
636 } 653 }
637 has_query10 = !!(buf[0] & BIT(2)); 654 has_query10 = !!(buf[0] & BIT(2));
638 }
639 655
640 /* 656 query_offset += 2; /* query 7 and 8 are present */
641 * At least 4 queries are guaranteed to be present in F11 657 }
642 * +1 for query 5 which is present since absolute events are
643 * reported and +1 for query 12.
644 */
645 query12_offset = 6;
646
647 if (has_rel)
648 ++query12_offset; /* query 6 is present */
649
650 if (has_gestures)
651 query12_offset += 2; /* query 7 and 8 are present */
652 658
653 if (has_query9) 659 if (has_query9)
654 ++query12_offset; 660 ++query_offset;
655 661
656 if (has_query10) 662 if (has_query10)
657 ++query12_offset; 663 ++query_offset;
658 664
659 if (has_query11) 665 if (has_query11)
660 ++query12_offset; 666 ++query_offset;
661 667
662 /* query 12 to know if the physical properties are reported */ 668 /* query 12 to know if the physical properties are reported */
663 if (has_query12) { 669 if (has_query12) {
664 ret = rmi_read(hdev, data->f11.query_base_addr 670 ret = rmi_read(hdev, data->f11.query_base_addr
665 + query12_offset, buf); 671 + query_offset, buf);
666 if (ret) { 672 if (ret) {
667 hid_err(hdev, "can not get query 12: %d.\n", ret); 673 hid_err(hdev, "can not get query 12: %d.\n", ret);
668 return ret; 674 return ret;
@@ -670,9 +676,10 @@ static int rmi_populate_f11(struct hid_device *hdev)
670 has_physical_props = !!(buf[0] & BIT(5)); 676 has_physical_props = !!(buf[0] & BIT(5));
671 677
672 if (has_physical_props) { 678 if (has_physical_props) {
679 query_offset += 1;
673 ret = rmi_read_block(hdev, 680 ret = rmi_read_block(hdev,
674 data->f11.query_base_addr 681 data->f11.query_base_addr
675 + query12_offset + 1, buf, 4); 682 + query_offset, buf, 4);
676 if (ret) { 683 if (ret) {
677 hid_err(hdev, "can not read query 15-18: %d.\n", 684 hid_err(hdev, "can not read query 15-18: %d.\n",
678 ret); 685 ret);
@@ -687,9 +694,45 @@ static int rmi_populate_f11(struct hid_device *hdev)
687 694
688 hid_info(hdev, "%s: size in mm: %d x %d\n", 695 hid_info(hdev, "%s: size in mm: %d x %d\n",
689 __func__, data->x_size_mm, data->y_size_mm); 696 __func__, data->x_size_mm, data->y_size_mm);
697
698 /*
699 * query 15 - 18 contain the size of the sensor
700 * and query 19 - 26 contain bezel dimensions
701 */
702 query_offset += 12;
703 }
704 }
705
706 if (has_query27)
707 ++query_offset;
708
709 if (has_query28) {
710 ret = rmi_read(hdev, data->f11.query_base_addr
711 + query_offset, buf);
712 if (ret) {
713 hid_err(hdev, "can not get query 28: %d.\n", ret);
714 return ret;
715 }
716
717 has_query36 = !!(buf[0] & BIT(6));
718 }
719
720 if (has_query36) {
721 query_offset += 2;
722 ret = rmi_read(hdev, data->f11.query_base_addr
723 + query_offset, buf);
724 if (ret) {
725 hid_err(hdev, "can not get query 36: %d.\n", ret);
726 return ret;
690 } 727 }
728
729 has_data40 = !!(buf[0] & BIT(5));
691 } 730 }
692 731
732
733 if (has_data40)
734 data->f11.report_size += data->max_fingers * 2;
735
693 /* 736 /*
694 * retrieve the ctrl registers 737 * retrieve the ctrl registers
695 * the ctrl register has a size of 20 but a fw bug split it into 16 + 4, 738 * the ctrl register has a size of 20 but a fw bug split it into 16 + 4,
diff --git a/drivers/hid/hid-roccat-kone.c b/drivers/hid/hid-roccat-kone.c
index 6101816a7ddd..c29265055ac1 100644
--- a/drivers/hid/hid-roccat-kone.c
+++ b/drivers/hid/hid-roccat-kone.c
@@ -46,6 +46,7 @@ static void kone_profile_activated(struct kone_device *kone, uint new_profile)
46static void kone_profile_report(struct kone_device *kone, uint new_profile) 46static void kone_profile_report(struct kone_device *kone, uint new_profile)
47{ 47{
48 struct kone_roccat_report roccat_report; 48 struct kone_roccat_report roccat_report;
49
49 roccat_report.event = kone_mouse_event_switch_profile; 50 roccat_report.event = kone_mouse_event_switch_profile;
50 roccat_report.value = new_profile; 51 roccat_report.value = new_profile;
51 roccat_report.key = 0; 52 roccat_report.key = 0;
@@ -163,6 +164,7 @@ static int kone_set_settings(struct usb_device *usb_dev,
163 struct kone_settings const *settings) 164 struct kone_settings const *settings)
164{ 165{
165 int retval; 166 int retval;
167
166 retval = kone_send(usb_dev, kone_command_settings, 168 retval = kone_send(usb_dev, kone_command_settings,
167 settings, sizeof(struct kone_settings)); 169 settings, sizeof(struct kone_settings));
168 if (retval) 170 if (retval)
@@ -387,7 +389,7 @@ static struct bin_attribute bin_attr_profile##number = { \
387 .read = kone_sysfs_read_profilex, \ 389 .read = kone_sysfs_read_profilex, \
388 .write = kone_sysfs_write_profilex, \ 390 .write = kone_sysfs_write_profilex, \
389 .private = &profile_numbers[number-1], \ 391 .private = &profile_numbers[number-1], \
390}; 392}
391PROFILE_ATTR(1); 393PROFILE_ATTR(1);
392PROFILE_ATTR(2); 394PROFILE_ATTR(2);
393PROFILE_ATTR(3); 395PROFILE_ATTR(3);
@@ -456,6 +458,7 @@ static ssize_t kone_sysfs_show_tcu(struct device *dev,
456static int kone_tcu_command(struct usb_device *usb_dev, int number) 458static int kone_tcu_command(struct usb_device *usb_dev, int number)
457{ 459{
458 unsigned char value; 460 unsigned char value;
461
459 value = number; 462 value = number;
460 return kone_send(usb_dev, kone_command_calibrate, &value, 1); 463 return kone_send(usb_dev, kone_command_calibrate, &value, 1);
461} 464}
@@ -697,10 +700,8 @@ static int kone_init_specials(struct hid_device *hdev)
697 == USB_INTERFACE_PROTOCOL_MOUSE) { 700 == USB_INTERFACE_PROTOCOL_MOUSE) {
698 701
699 kone = kzalloc(sizeof(*kone), GFP_KERNEL); 702 kone = kzalloc(sizeof(*kone), GFP_KERNEL);
700 if (!kone) { 703 if (!kone)
701 hid_err(hdev, "can't alloc device descriptor\n");
702 return -ENOMEM; 704 return -ENOMEM;
703 }
704 hid_set_drvdata(hdev, kone); 705 hid_set_drvdata(hdev, kone);
705 706
706 retval = kone_init_kone_device_struct(usb_dev, kone); 707 retval = kone_init_kone_device_struct(usb_dev, kone);
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 69cca1476a0c..5632c54eadf0 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -7,7 +7,7 @@
7 * (This module is based on "hid-ortek".) 7 * (This module is based on "hid-ortek".)
8 * Copyright (c) 2012 Andreas Hübner 8 * Copyright (c) 2012 Andreas Hübner
9 * 9 *
10 * R.A.T.7, M.M.O.7 (USB gaming mice): 10 * R.A.T.7, R.A.T.9, M.M.O.7 (USB gaming mice):
11 * Fixes the mode button which cycles through three constantly pressed 11 * Fixes the mode button which cycles through three constantly pressed
12 * buttons. All three press events are mapped to one button and the 12 * buttons. All three press events are mapped to one button and the
13 * missing release event is generated immediately. 13 * missing release event is generated immediately.
@@ -179,6 +179,8 @@ static const struct hid_device_id saitek_devices[] = {
179 .driver_data = SAITEK_FIX_PS1000 }, 179 .driver_data = SAITEK_FIX_PS1000 },
180 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7), 180 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
181 .driver_data = SAITEK_RELEASE_MODE_RAT7 }, 181 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
182 { HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
183 .driver_data = SAITEK_RELEASE_MODE_RAT7 },
182 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7), 184 { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_MMO7),
183 .driver_data = SAITEK_RELEASE_MODE_MMO7 }, 185 .driver_data = SAITEK_RELEASE_MODE_MMO7 },
184 { } 186 { }
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index bc4269e559f1..31e9d2561106 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -798,6 +798,12 @@ union sixaxis_output_report_01 {
798 __u8 buf[36]; 798 __u8 buf[36];
799}; 799};
800 800
801#define DS4_REPORT_0x02_SIZE 37
802#define DS4_REPORT_0x05_SIZE 32
803#define DS4_REPORT_0x11_SIZE 78
804#define DS4_REPORT_0x81_SIZE 7
805#define SIXAXIS_REPORT_0xF2_SIZE 18
806
801static spinlock_t sony_dev_list_lock; 807static spinlock_t sony_dev_list_lock;
802static LIST_HEAD(sony_device_list); 808static LIST_HEAD(sony_device_list);
803static DEFINE_IDA(sony_device_id_allocator); 809static DEFINE_IDA(sony_device_id_allocator);
@@ -811,6 +817,7 @@ struct sony_sc {
811 struct work_struct state_worker; 817 struct work_struct state_worker;
812 struct power_supply battery; 818 struct power_supply battery;
813 int device_id; 819 int device_id;
820 __u8 *output_report_dmabuf;
814 821
815#ifdef CONFIG_SONY_FF 822#ifdef CONFIG_SONY_FF
816 __u8 left; 823 __u8 left;
@@ -1142,9 +1149,20 @@ static int sixaxis_set_operational_usb(struct hid_device *hdev)
1142 1149
1143static int sixaxis_set_operational_bt(struct hid_device *hdev) 1150static int sixaxis_set_operational_bt(struct hid_device *hdev)
1144{ 1151{
1145 unsigned char buf[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 }; 1152 static const __u8 report[] = { 0xf4, 0x42, 0x03, 0x00, 0x00 };
1146 return hid_hw_raw_request(hdev, buf[0], buf, sizeof(buf), 1153 __u8 *buf;
1154 int ret;
1155
1156 buf = kmemdup(report, sizeof(report), GFP_KERNEL);
1157 if (!buf)
1158 return -ENOMEM;
1159
1160 ret = hid_hw_raw_request(hdev, buf[0], buf, sizeof(report),
1147 HID_FEATURE_REPORT, HID_REQ_SET_REPORT); 1161 HID_FEATURE_REPORT, HID_REQ_SET_REPORT);
1162
1163 kfree(buf);
1164
1165 return ret;
1148} 1166}
1149 1167
1150/* 1168/*
@@ -1153,10 +1171,19 @@ static int sixaxis_set_operational_bt(struct hid_device *hdev)
1153 */ 1171 */
1154static int dualshock4_set_operational_bt(struct hid_device *hdev) 1172static int dualshock4_set_operational_bt(struct hid_device *hdev)
1155{ 1173{
1156 __u8 buf[37] = { 0 }; 1174 __u8 *buf;
1175 int ret;
1176
1177 buf = kmalloc(DS4_REPORT_0x02_SIZE, GFP_KERNEL);
1178 if (!buf)
1179 return -ENOMEM;
1157 1180
1158 return hid_hw_raw_request(hdev, 0x02, buf, sizeof(buf), 1181 ret = hid_hw_raw_request(hdev, 0x02, buf, DS4_REPORT_0x02_SIZE,
1159 HID_FEATURE_REPORT, HID_REQ_GET_REPORT); 1182 HID_FEATURE_REPORT, HID_REQ_GET_REPORT);
1183
1184 kfree(buf);
1185
1186 return ret;
1160} 1187}
1161 1188
1162static void sixaxis_set_leds_from_id(int id, __u8 values[MAX_LEDS]) 1189static void sixaxis_set_leds_from_id(int id, __u8 values[MAX_LEDS])
@@ -1471,9 +1498,7 @@ error_leds:
1471 1498
1472static void sixaxis_state_worker(struct work_struct *work) 1499static void sixaxis_state_worker(struct work_struct *work)
1473{ 1500{
1474 struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); 1501 static const union sixaxis_output_report_01 default_report = {
1475 int n;
1476 union sixaxis_output_report_01 report = {
1477 .buf = { 1502 .buf = {
1478 0x01, 1503 0x01,
1479 0x00, 0xff, 0x00, 0xff, 0x00, 1504 0x00, 0xff, 0x00, 0xff, 0x00,
@@ -1485,20 +1510,27 @@ static void sixaxis_state_worker(struct work_struct *work)
1485 0x00, 0x00, 0x00, 0x00, 0x00 1510 0x00, 0x00, 0x00, 0x00, 0x00
1486 } 1511 }
1487 }; 1512 };
1513 struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
1514 struct sixaxis_output_report *report =
1515 (struct sixaxis_output_report *)sc->output_report_dmabuf;
1516 int n;
1517
1518 /* Initialize the report with default values */
1519 memcpy(report, &default_report, sizeof(struct sixaxis_output_report));
1488 1520
1489#ifdef CONFIG_SONY_FF 1521#ifdef CONFIG_SONY_FF
1490 report.data.rumble.right_motor_on = sc->right ? 1 : 0; 1522 report->rumble.right_motor_on = sc->right ? 1 : 0;
1491 report.data.rumble.left_motor_force = sc->left; 1523 report->rumble.left_motor_force = sc->left;
1492#endif 1524#endif
1493 1525
1494 report.data.leds_bitmap |= sc->led_state[0] << 1; 1526 report->leds_bitmap |= sc->led_state[0] << 1;
1495 report.data.leds_bitmap |= sc->led_state[1] << 2; 1527 report->leds_bitmap |= sc->led_state[1] << 2;
1496 report.data.leds_bitmap |= sc->led_state[2] << 3; 1528 report->leds_bitmap |= sc->led_state[2] << 3;
1497 report.data.leds_bitmap |= sc->led_state[3] << 4; 1529 report->leds_bitmap |= sc->led_state[3] << 4;
1498 1530
1499 /* Set flag for all leds off, required for 3rd party INTEC controller */ 1531 /* Set flag for all leds off, required for 3rd party INTEC controller */
1500 if ((report.data.leds_bitmap & 0x1E) == 0) 1532 if ((report->leds_bitmap & 0x1E) == 0)
1501 report.data.leds_bitmap |= 0x20; 1533 report->leds_bitmap |= 0x20;
1502 1534
1503 /* 1535 /*
1504 * The LEDs in the report are indexed in reverse order to their 1536 * The LEDs in the report are indexed in reverse order to their
@@ -1511,28 +1543,30 @@ static void sixaxis_state_worker(struct work_struct *work)
1511 */ 1543 */
1512 for (n = 0; n < 4; n++) { 1544 for (n = 0; n < 4; n++) {
1513 if (sc->led_delay_on[n] || sc->led_delay_off[n]) { 1545 if (sc->led_delay_on[n] || sc->led_delay_off[n]) {
1514 report.data.led[3 - n].duty_off = sc->led_delay_off[n]; 1546 report->led[3 - n].duty_off = sc->led_delay_off[n];
1515 report.data.led[3 - n].duty_on = sc->led_delay_on[n]; 1547 report->led[3 - n].duty_on = sc->led_delay_on[n];
1516 } 1548 }
1517 } 1549 }
1518 1550
1519 hid_hw_raw_request(sc->hdev, report.data.report_id, report.buf, 1551 hid_hw_raw_request(sc->hdev, report->report_id, (__u8 *)report,
1520 sizeof(report), HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); 1552 sizeof(struct sixaxis_output_report),
1553 HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
1521} 1554}
1522 1555
1523static void dualshock4_state_worker(struct work_struct *work) 1556static void dualshock4_state_worker(struct work_struct *work)
1524{ 1557{
1525 struct sony_sc *sc = container_of(work, struct sony_sc, state_worker); 1558 struct sony_sc *sc = container_of(work, struct sony_sc, state_worker);
1526 struct hid_device *hdev = sc->hdev; 1559 struct hid_device *hdev = sc->hdev;
1560 __u8 *buf = sc->output_report_dmabuf;
1527 int offset; 1561 int offset;
1528 1562
1529 __u8 buf[78] = { 0 };
1530
1531 if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) { 1563 if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
1564 memset(buf, 0, DS4_REPORT_0x05_SIZE);
1532 buf[0] = 0x05; 1565 buf[0] = 0x05;
1533 buf[1] = 0xFF; 1566 buf[1] = 0xFF;
1534 offset = 4; 1567 offset = 4;
1535 } else { 1568 } else {
1569 memset(buf, 0, DS4_REPORT_0x11_SIZE);
1536 buf[0] = 0x11; 1570 buf[0] = 0x11;
1537 buf[1] = 0xB0; 1571 buf[1] = 0xB0;
1538 buf[3] = 0x0F; 1572 buf[3] = 0x0F;
@@ -1560,12 +1594,33 @@ static void dualshock4_state_worker(struct work_struct *work)
1560 buf[offset++] = sc->led_delay_off[3]; 1594 buf[offset++] = sc->led_delay_off[3];
1561 1595
1562 if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) 1596 if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
1563 hid_hw_output_report(hdev, buf, 32); 1597 hid_hw_output_report(hdev, buf, DS4_REPORT_0x05_SIZE);
1564 else 1598 else
1565 hid_hw_raw_request(hdev, 0x11, buf, 78, 1599 hid_hw_raw_request(hdev, 0x11, buf, DS4_REPORT_0x11_SIZE,
1566 HID_OUTPUT_REPORT, HID_REQ_SET_REPORT); 1600 HID_OUTPUT_REPORT, HID_REQ_SET_REPORT);
1567} 1601}
1568 1602
1603static int sony_allocate_output_report(struct sony_sc *sc)
1604{
1605 if (sc->quirks & SIXAXIS_CONTROLLER)
1606 sc->output_report_dmabuf =
1607 kmalloc(sizeof(union sixaxis_output_report_01),
1608 GFP_KERNEL);
1609 else if (sc->quirks & DUALSHOCK4_CONTROLLER_BT)
1610 sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x11_SIZE,
1611 GFP_KERNEL);
1612 else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB)
1613 sc->output_report_dmabuf = kmalloc(DS4_REPORT_0x05_SIZE,
1614 GFP_KERNEL);
1615 else
1616 return 0;
1617
1618 if (!sc->output_report_dmabuf)
1619 return -ENOMEM;
1620
1621 return 0;
1622}
1623
1569#ifdef CONFIG_SONY_FF 1624#ifdef CONFIG_SONY_FF
1570static int sony_play_effect(struct input_dev *dev, void *data, 1625static int sony_play_effect(struct input_dev *dev, void *data,
1571 struct ff_effect *effect) 1626 struct ff_effect *effect)
@@ -1754,6 +1809,7 @@ static int sony_get_bt_devaddr(struct sony_sc *sc)
1754 1809
1755static int sony_check_add(struct sony_sc *sc) 1810static int sony_check_add(struct sony_sc *sc)
1756{ 1811{
1812 __u8 *buf = NULL;
1757 int n, ret; 1813 int n, ret;
1758 1814
1759 if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) || 1815 if ((sc->quirks & DUALSHOCK4_CONTROLLER_BT) ||
@@ -1769,36 +1825,44 @@ static int sony_check_add(struct sony_sc *sc)
1769 return 0; 1825 return 0;
1770 } 1826 }
1771 } else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) { 1827 } else if (sc->quirks & DUALSHOCK4_CONTROLLER_USB) {
1772 __u8 buf[7]; 1828 buf = kmalloc(DS4_REPORT_0x81_SIZE, GFP_KERNEL);
1829 if (!buf)
1830 return -ENOMEM;
1773 1831
1774 /* 1832 /*
1775 * The MAC address of a DS4 controller connected via USB can be 1833 * The MAC address of a DS4 controller connected via USB can be
1776 * retrieved with feature report 0x81. The address begins at 1834 * retrieved with feature report 0x81. The address begins at
1777 * offset 1. 1835 * offset 1.
1778 */ 1836 */
1779 ret = hid_hw_raw_request(sc->hdev, 0x81, buf, sizeof(buf), 1837 ret = hid_hw_raw_request(sc->hdev, 0x81, buf,
1780 HID_FEATURE_REPORT, HID_REQ_GET_REPORT); 1838 DS4_REPORT_0x81_SIZE, HID_FEATURE_REPORT,
1839 HID_REQ_GET_REPORT);
1781 1840
1782 if (ret != 7) { 1841 if (ret != DS4_REPORT_0x81_SIZE) {
1783 hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n"); 1842 hid_err(sc->hdev, "failed to retrieve feature report 0x81 with the DualShock 4 MAC address\n");
1784 return ret < 0 ? ret : -EINVAL; 1843 ret = ret < 0 ? ret : -EINVAL;
1844 goto out_free;
1785 } 1845 }
1786 1846
1787 memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address)); 1847 memcpy(sc->mac_address, &buf[1], sizeof(sc->mac_address));
1788 } else if (sc->quirks & SIXAXIS_CONTROLLER_USB) { 1848 } else if (sc->quirks & SIXAXIS_CONTROLLER_USB) {
1789 __u8 buf[18]; 1849 buf = kmalloc(SIXAXIS_REPORT_0xF2_SIZE, GFP_KERNEL);
1850 if (!buf)
1851 return -ENOMEM;
1790 1852
1791 /* 1853 /*
1792 * The MAC address of a Sixaxis controller connected via USB can 1854 * The MAC address of a Sixaxis controller connected via USB can
1793 * be retrieved with feature report 0xf2. The address begins at 1855 * be retrieved with feature report 0xf2. The address begins at
1794 * offset 4. 1856 * offset 4.
1795 */ 1857 */
1796 ret = hid_hw_raw_request(sc->hdev, 0xf2, buf, sizeof(buf), 1858 ret = hid_hw_raw_request(sc->hdev, 0xf2, buf,
1797 HID_FEATURE_REPORT, HID_REQ_GET_REPORT); 1859 SIXAXIS_REPORT_0xF2_SIZE, HID_FEATURE_REPORT,
1860 HID_REQ_GET_REPORT);
1798 1861
1799 if (ret != 18) { 1862 if (ret != SIXAXIS_REPORT_0xF2_SIZE) {
1800 hid_err(sc->hdev, "failed to retrieve feature report 0xf2 with the Sixaxis MAC address\n"); 1863 hid_err(sc->hdev, "failed to retrieve feature report 0xf2 with the Sixaxis MAC address\n");
1801 return ret < 0 ? ret : -EINVAL; 1864 ret = ret < 0 ? ret : -EINVAL;
1865 goto out_free;
1802 } 1866 }
1803 1867
1804 /* 1868 /*
@@ -1811,7 +1875,13 @@ static int sony_check_add(struct sony_sc *sc)
1811 return 0; 1875 return 0;
1812 } 1876 }
1813 1877
1814 return sony_check_add_dev_list(sc); 1878 ret = sony_check_add_dev_list(sc);
1879
1880out_free:
1881
1882 kfree(buf);
1883
1884 return ret;
1815} 1885}
1816 1886
1817static int sony_set_device_id(struct sony_sc *sc) 1887static int sony_set_device_id(struct sony_sc *sc)
@@ -1895,6 +1965,12 @@ static int sony_probe(struct hid_device *hdev, const struct hid_device_id *id)
1895 return ret; 1965 return ret;
1896 } 1966 }
1897 1967
1968 ret = sony_allocate_output_report(sc);
1969 if (ret < 0) {
1970 hid_err(hdev, "failed to allocate the output report buffer\n");
1971 goto err_stop;
1972 }
1973
1898 ret = sony_set_device_id(sc); 1974 ret = sony_set_device_id(sc);
1899 if (ret < 0) { 1975 if (ret < 0) {
1900 hid_err(hdev, "failed to allocate the device id\n"); 1976 hid_err(hdev, "failed to allocate the device id\n");
@@ -1984,6 +2060,7 @@ err_stop:
1984 if (sc->quirks & SONY_BATTERY_SUPPORT) 2060 if (sc->quirks & SONY_BATTERY_SUPPORT)
1985 sony_battery_remove(sc); 2061 sony_battery_remove(sc);
1986 sony_cancel_work_sync(sc); 2062 sony_cancel_work_sync(sc);
2063 kfree(sc->output_report_dmabuf);
1987 sony_remove_dev_list(sc); 2064 sony_remove_dev_list(sc);
1988 sony_release_device_id(sc); 2065 sony_release_device_id(sc);
1989 hid_hw_stop(hdev); 2066 hid_hw_stop(hdev);
@@ -2004,6 +2081,8 @@ static void sony_remove(struct hid_device *hdev)
2004 2081
2005 sony_cancel_work_sync(sc); 2082 sony_cancel_work_sync(sc);
2006 2083
2084 kfree(sc->output_report_dmabuf);
2085
2007 sony_remove_dev_list(sc); 2086 sony_remove_dev_list(sc);
2008 2087
2009 sony_release_device_id(sc); 2088 sony_release_device_id(sc);
@@ -2034,6 +2113,9 @@ static const struct hid_device_id sony_devices[] = {
2034 /* Logitech Harmony Adapter for PS3 */ 2113 /* Logitech Harmony Adapter for PS3 */
2035 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3), 2114 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_HARMONY_PS3),
2036 .driver_data = PS3REMOTE }, 2115 .driver_data = PS3REMOTE },
2116 /* SMK-Link PS3 BD Remote Control */
2117 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SMK, USB_DEVICE_ID_SMK_PS3_BDREMOTE),
2118 .driver_data = PS3REMOTE },
2037 /* Sony Dualshock 4 controllers for PS4 */ 2119 /* Sony Dualshock 4 controllers for PS4 */
2038 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER), 2120 { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
2039 .driver_data = DUALSHOCK4_CONTROLLER_USB }, 2121 .driver_data = DUALSHOCK4_CONTROLLER_USB },
diff --git a/drivers/hid/i2c-hid/i2c-hid.c b/drivers/hid/i2c-hid/i2c-hid.c
index f09e70cafaf1..d32037cbf9db 100644
--- a/drivers/hid/i2c-hid/i2c-hid.c
+++ b/drivers/hid/i2c-hid/i2c-hid.c
@@ -137,6 +137,7 @@ struct i2c_hid {
137 * descriptor. */ 137 * descriptor. */
138 unsigned int bufsize; /* i2c buffer size */ 138 unsigned int bufsize; /* i2c buffer size */
139 char *inbuf; /* Input buffer */ 139 char *inbuf; /* Input buffer */
140 char *rawbuf; /* Raw Input buffer */
140 char *cmdbuf; /* Command buffer */ 141 char *cmdbuf; /* Command buffer */
141 char *argsbuf; /* Command arguments buffer */ 142 char *argsbuf; /* Command arguments buffer */
142 143
@@ -369,7 +370,7 @@ static int i2c_hid_hwreset(struct i2c_client *client)
369static void i2c_hid_get_input(struct i2c_hid *ihid) 370static void i2c_hid_get_input(struct i2c_hid *ihid)
370{ 371{
371 int ret, ret_size; 372 int ret, ret_size;
372 int size = le16_to_cpu(ihid->hdesc.wMaxInputLength); 373 int size = ihid->bufsize;
373 374
374 ret = i2c_master_recv(ihid->client, ihid->inbuf, size); 375 ret = i2c_master_recv(ihid->client, ihid->inbuf, size);
375 if (ret != size) { 376 if (ret != size) {
@@ -437,7 +438,7 @@ static void i2c_hid_init_report(struct hid_report *report, u8 *buffer,
437 report->id, buffer, size)) 438 report->id, buffer, size))
438 return; 439 return;
439 440
440 i2c_hid_dbg(ihid, "report (len=%d): %*ph\n", size, size, ihid->inbuf); 441 i2c_hid_dbg(ihid, "report (len=%d): %*ph\n", size, size, buffer);
441 442
442 ret_size = buffer[0] | (buffer[1] << 8); 443 ret_size = buffer[0] | (buffer[1] << 8);
443 444
@@ -504,9 +505,11 @@ static void i2c_hid_find_max_report(struct hid_device *hid, unsigned int type,
504static void i2c_hid_free_buffers(struct i2c_hid *ihid) 505static void i2c_hid_free_buffers(struct i2c_hid *ihid)
505{ 506{
506 kfree(ihid->inbuf); 507 kfree(ihid->inbuf);
508 kfree(ihid->rawbuf);
507 kfree(ihid->argsbuf); 509 kfree(ihid->argsbuf);
508 kfree(ihid->cmdbuf); 510 kfree(ihid->cmdbuf);
509 ihid->inbuf = NULL; 511 ihid->inbuf = NULL;
512 ihid->rawbuf = NULL;
510 ihid->cmdbuf = NULL; 513 ihid->cmdbuf = NULL;
511 ihid->argsbuf = NULL; 514 ihid->argsbuf = NULL;
512 ihid->bufsize = 0; 515 ihid->bufsize = 0;
@@ -522,10 +525,11 @@ static int i2c_hid_alloc_buffers(struct i2c_hid *ihid, size_t report_size)
522 report_size; /* report */ 525 report_size; /* report */
523 526
524 ihid->inbuf = kzalloc(report_size, GFP_KERNEL); 527 ihid->inbuf = kzalloc(report_size, GFP_KERNEL);
528 ihid->rawbuf = kzalloc(report_size, GFP_KERNEL);
525 ihid->argsbuf = kzalloc(args_len, GFP_KERNEL); 529 ihid->argsbuf = kzalloc(args_len, GFP_KERNEL);
526 ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL); 530 ihid->cmdbuf = kzalloc(sizeof(union command) + args_len, GFP_KERNEL);
527 531
528 if (!ihid->inbuf || !ihid->argsbuf || !ihid->cmdbuf) { 532 if (!ihid->inbuf || !ihid->rawbuf || !ihid->argsbuf || !ihid->cmdbuf) {
529 i2c_hid_free_buffers(ihid); 533 i2c_hid_free_buffers(ihid);
530 return -ENOMEM; 534 return -ENOMEM;
531 } 535 }
@@ -552,12 +556,12 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
552 556
553 ret = i2c_hid_get_report(client, 557 ret = i2c_hid_get_report(client,
554 report_type == HID_FEATURE_REPORT ? 0x03 : 0x01, 558 report_type == HID_FEATURE_REPORT ? 0x03 : 0x01,
555 report_number, ihid->inbuf, ask_count); 559 report_number, ihid->rawbuf, ask_count);
556 560
557 if (ret < 0) 561 if (ret < 0)
558 return ret; 562 return ret;
559 563
560 ret_count = ihid->inbuf[0] | (ihid->inbuf[1] << 8); 564 ret_count = ihid->rawbuf[0] | (ihid->rawbuf[1] << 8);
561 565
562 if (ret_count <= 2) 566 if (ret_count <= 2)
563 return 0; 567 return 0;
@@ -566,7 +570,7 @@ static int i2c_hid_get_raw_report(struct hid_device *hid,
566 570
567 /* The query buffer contains the size, dropping it in the reply */ 571 /* The query buffer contains the size, dropping it in the reply */
568 count = min(count, ret_count - 2); 572 count = min(count, ret_count - 2);
569 memcpy(buf, ihid->inbuf + 2, count); 573 memcpy(buf, ihid->rawbuf + 2, count);
570 574
571 return count; 575 return count;
572} 576}
diff --git a/drivers/hid/usbhid/hid-core.c b/drivers/hid/usbhid/hid-core.c
index ca6849a0121e..bfbe1bedda7f 100644
--- a/drivers/hid/usbhid/hid-core.c
+++ b/drivers/hid/usbhid/hid-core.c
@@ -278,18 +278,20 @@ static void hid_irq_in(struct urb *urb)
278 usbhid->retry_delay = 0; 278 usbhid->retry_delay = 0;
279 if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open) 279 if ((hid->quirks & HID_QUIRK_ALWAYS_POLL) && !hid->open)
280 break; 280 break;
281 hid_input_report(urb->context, HID_INPUT_REPORT, 281 if (!test_bit(HID_RESUME_RUNNING, &usbhid->iofl)) {
282 urb->transfer_buffer, 282 hid_input_report(urb->context, HID_INPUT_REPORT,
283 urb->actual_length, 1); 283 urb->transfer_buffer,
284 /* 284 urb->actual_length, 1);
285 * autosuspend refused while keys are pressed 285 /*
286 * because most keyboards don't wake up when 286 * autosuspend refused while keys are pressed
287 * a key is released 287 * because most keyboards don't wake up when
288 */ 288 * a key is released
289 if (hid_check_keys_pressed(hid)) 289 */
290 set_bit(HID_KEYS_PRESSED, &usbhid->iofl); 290 if (hid_check_keys_pressed(hid))
291 else 291 set_bit(HID_KEYS_PRESSED, &usbhid->iofl);
292 clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); 292 else
293 clear_bit(HID_KEYS_PRESSED, &usbhid->iofl);
294 }
293 break; 295 break;
294 case -EPIPE: /* stall */ 296 case -EPIPE: /* stall */
295 usbhid_mark_busy(usbhid); 297 usbhid_mark_busy(usbhid);
@@ -338,8 +340,7 @@ static int hid_submit_out(struct hid_device *hid)
338 report = usbhid->out[usbhid->outtail].report; 340 report = usbhid->out[usbhid->outtail].report;
339 raw_report = usbhid->out[usbhid->outtail].raw_report; 341 raw_report = usbhid->out[usbhid->outtail].raw_report;
340 342
341 usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 343 usbhid->urbout->transfer_buffer_length = hid_report_len(report);
342 1 + (report->id > 0);
343 usbhid->urbout->dev = hid_to_usb_dev(hid); 344 usbhid->urbout->dev = hid_to_usb_dev(hid);
344 if (raw_report) { 345 if (raw_report) {
345 memcpy(usbhid->outbuf, raw_report, 346 memcpy(usbhid->outbuf, raw_report,
@@ -688,6 +689,7 @@ int usbhid_open(struct hid_device *hid)
688 goto done; 689 goto done;
689 } 690 }
690 usbhid->intf->needs_remote_wakeup = 1; 691 usbhid->intf->needs_remote_wakeup = 1;
692 set_bit(HID_RESUME_RUNNING, &usbhid->iofl);
691 res = hid_start_in(hid); 693 res = hid_start_in(hid);
692 if (res) { 694 if (res) {
693 if (res != -ENOSPC) { 695 if (res != -ENOSPC) {
@@ -701,6 +703,15 @@ int usbhid_open(struct hid_device *hid)
701 } 703 }
702 } 704 }
703 usb_autopm_put_interface(usbhid->intf); 705 usb_autopm_put_interface(usbhid->intf);
706
707 /*
708 * In case events are generated while nobody was listening,
709 * some are released when the device is re-opened.
710 * Wait 50 msec for the queue to empty before allowing events
711 * to go through hid.
712 */
713 msleep(50);
714 clear_bit(HID_RESUME_RUNNING, &usbhid->iofl);
704 } 715 }
705done: 716done:
706 mutex_unlock(&hid_open_mut); 717 mutex_unlock(&hid_open_mut);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 552671ee7c5d..dc89be90b35e 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -73,11 +73,13 @@ static const struct hid_blacklist {
73 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, 73 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
74 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL }, 74 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL }, 75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
76 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_010c, HID_QUIRK_ALWAYS_POLL },
76 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL }, 77 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
77 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 78 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
78 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
79 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 80 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
80 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 81 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
82 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
81 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, 83 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GT683R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
82 { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, 84 { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
83 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 85 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/usbhid/usbhid.h b/drivers/hid/usbhid/usbhid.h
index f633c24ce28b..807922b49aa4 100644
--- a/drivers/hid/usbhid/usbhid.h
+++ b/drivers/hid/usbhid/usbhid.h
@@ -52,6 +52,7 @@ struct usb_interface *usbhid_find_interface(int minor);
52#define HID_STARTED 8 52#define HID_STARTED 8
53#define HID_KEYS_PRESSED 10 53#define HID_KEYS_PRESSED 10
54#define HID_NO_BANDWIDTH 11 54#define HID_NO_BANDWIDTH 11
55#define HID_RESUME_RUNNING 12
55 56
56/* 57/*
57 * USB-specific HID struct, to be pointed to 58 * USB-specific HID struct, to be pointed to
diff --git a/drivers/hid/wacom.h b/drivers/hid/wacom.h
index 0cc53440543a..7db432809e9e 100644
--- a/drivers/hid/wacom.h
+++ b/drivers/hid/wacom.h
@@ -140,7 +140,7 @@ extern const struct hid_device_id wacom_ids[];
140 140
141void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len); 141void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len);
142void wacom_setup_device_quirks(struct wacom_features *features); 142void wacom_setup_device_quirks(struct wacom_features *features);
143int wacom_setup_input_capabilities(struct input_dev *input_dev, 143int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
144 struct wacom_wac *wacom_wac); 144 struct wacom_wac *wacom_wac);
145int wacom_setup_pad_input_capabilities(struct input_dev *input_dev, 145int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
146 struct wacom_wac *wacom_wac); 146 struct wacom_wac *wacom_wac);
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 8593047bb726..654202941d30 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -13,6 +13,7 @@
13 13
14#include "wacom_wac.h" 14#include "wacom_wac.h"
15#include "wacom.h" 15#include "wacom.h"
16#include <linux/input/mt.h>
16 17
17#define WAC_MSG_RETRIES 5 18#define WAC_MSG_RETRIES 5
18 19
@@ -70,22 +71,15 @@ static int wacom_raw_event(struct hid_device *hdev, struct hid_report *report,
70static int wacom_open(struct input_dev *dev) 71static int wacom_open(struct input_dev *dev)
71{ 72{
72 struct wacom *wacom = input_get_drvdata(dev); 73 struct wacom *wacom = input_get_drvdata(dev);
73 int retval;
74
75 mutex_lock(&wacom->lock);
76 retval = hid_hw_open(wacom->hdev);
77 mutex_unlock(&wacom->lock);
78 74
79 return retval; 75 return hid_hw_open(wacom->hdev);
80} 76}
81 77
82static void wacom_close(struct input_dev *dev) 78static void wacom_close(struct input_dev *dev)
83{ 79{
84 struct wacom *wacom = input_get_drvdata(dev); 80 struct wacom *wacom = input_get_drvdata(dev);
85 81
86 mutex_lock(&wacom->lock);
87 hid_hw_close(wacom->hdev); 82 hid_hw_close(wacom->hdev);
88 mutex_unlock(&wacom->lock);
89} 83}
90 84
91/* 85/*
@@ -192,9 +186,15 @@ static void wacom_usage_mapping(struct hid_device *hdev,
192 if (!pen && !finger) 186 if (!pen && !finger)
193 return; 187 return;
194 188
195 if (finger && !features->touch_max) 189 /*
196 /* touch device at least supports one touch point */ 190 * Bamboo models do not support HID_DG_CONTACTMAX.
197 features->touch_max = 1; 191 * And, Bamboo Pen only descriptor contains touch.
192 */
193 if (features->type != BAMBOO_PT) {
194 /* ISDv4 touch devices at least supports one touch point */
195 if (finger && !features->touch_max)
196 features->touch_max = 1;
197 }
198 198
199 switch (usage->hid) { 199 switch (usage->hid) {
200 case HID_GD_X: 200 case HID_GD_X:
@@ -230,6 +230,21 @@ static void wacom_usage_mapping(struct hid_device *hdev,
230 wacom_wac_usage_mapping(hdev, field, usage); 230 wacom_wac_usage_mapping(hdev, field, usage);
231} 231}
232 232
233static void wacom_post_parse_hid(struct hid_device *hdev,
234 struct wacom_features *features)
235{
236 struct wacom *wacom = hid_get_drvdata(hdev);
237 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
238
239 if (features->type == HID_GENERIC) {
240 /* Any last-minute generic device setup */
241 if (features->touch_max > 1) {
242 input_mt_init_slots(wacom_wac->input, wacom_wac->features.touch_max,
243 INPUT_MT_DIRECT);
244 }
245 }
246}
247
233static void wacom_parse_hid(struct hid_device *hdev, 248static void wacom_parse_hid(struct hid_device *hdev,
234 struct wacom_features *features) 249 struct wacom_features *features)
235{ 250{
@@ -264,6 +279,8 @@ static void wacom_parse_hid(struct hid_device *hdev,
264 wacom_usage_mapping(hdev, hreport->field[i], 279 wacom_usage_mapping(hdev, hreport->field[i],
265 hreport->field[i]->usage + j); 280 hreport->field[i]->usage + j);
266 } 281 }
282
283 wacom_post_parse_hid(hdev, features);
267} 284}
268 285
269static int wacom_hid_set_device_mode(struct hid_device *hdev) 286static int wacom_hid_set_device_mode(struct hid_device *hdev)
@@ -1129,7 +1146,7 @@ static void wacom_clean_inputs(struct wacom *wacom)
1129 input_free_device(wacom->wacom_wac.input); 1146 input_free_device(wacom->wacom_wac.input);
1130 } 1147 }
1131 if (wacom->wacom_wac.pad_input) { 1148 if (wacom->wacom_wac.pad_input) {
1132 if (wacom->wacom_wac.input_registered) 1149 if (wacom->wacom_wac.pad_registered)
1133 input_unregister_device(wacom->wacom_wac.pad_input); 1150 input_unregister_device(wacom->wacom_wac.pad_input);
1134 else 1151 else
1135 input_free_device(wacom->wacom_wac.pad_input); 1152 input_free_device(wacom->wacom_wac.pad_input);
@@ -1151,13 +1168,13 @@ static int wacom_register_inputs(struct wacom *wacom)
1151 if (!input_dev || !pad_input_dev) 1168 if (!input_dev || !pad_input_dev)
1152 return -EINVAL; 1169 return -EINVAL;
1153 1170
1154 error = wacom_setup_input_capabilities(input_dev, wacom_wac); 1171 error = wacom_setup_pentouch_input_capabilities(input_dev, wacom_wac);
1155 if (error) 1172 if (!error) {
1156 return error; 1173 error = input_register_device(input_dev);
1157 1174 if (error)
1158 error = input_register_device(input_dev); 1175 return error;
1159 if (error) 1176 wacom_wac->input_registered = true;
1160 return error; 1177 }
1161 1178
1162 error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac); 1179 error = wacom_setup_pad_input_capabilities(pad_input_dev, wacom_wac);
1163 if (error) { 1180 if (error) {
@@ -1169,22 +1186,23 @@ static int wacom_register_inputs(struct wacom *wacom)
1169 error = input_register_device(pad_input_dev); 1186 error = input_register_device(pad_input_dev);
1170 if (error) 1187 if (error)
1171 goto fail_register_pad_input; 1188 goto fail_register_pad_input;
1189 wacom_wac->pad_registered = true;
1172 1190
1173 error = wacom_initialize_leds(wacom); 1191 error = wacom_initialize_leds(wacom);
1174 if (error) 1192 if (error)
1175 goto fail_leds; 1193 goto fail_leds;
1176 } 1194 }
1177 1195
1178 wacom_wac->input_registered = true;
1179
1180 return 0; 1196 return 0;
1181 1197
1182fail_leds: 1198fail_leds:
1183 input_unregister_device(pad_input_dev); 1199 input_unregister_device(pad_input_dev);
1184 pad_input_dev = NULL; 1200 pad_input_dev = NULL;
1201 wacom_wac->pad_registered = false;
1185fail_register_pad_input: 1202fail_register_pad_input:
1186 input_unregister_device(input_dev); 1203 input_unregister_device(input_dev);
1187 wacom_wac->input = NULL; 1204 wacom_wac->input = NULL;
1205 wacom_wac->input_registered = false;
1188 return error; 1206 return error;
1189} 1207}
1190 1208
@@ -1321,12 +1339,6 @@ static void wacom_calculate_res(struct wacom_features *features)
1321 features->unitExpo); 1339 features->unitExpo);
1322} 1340}
1323 1341
1324static int wacom_hid_report_len(struct hid_report *report)
1325{
1326 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
1327 return ((report->size - 1) >> 3) + 1 + (report->id > 0);
1328}
1329
1330static size_t wacom_compute_pktlen(struct hid_device *hdev) 1342static size_t wacom_compute_pktlen(struct hid_device *hdev)
1331{ 1343{
1332 struct hid_report_enum *report_enum; 1344 struct hid_report_enum *report_enum;
@@ -1336,7 +1348,7 @@ static size_t wacom_compute_pktlen(struct hid_device *hdev)
1336 report_enum = hdev->report_enum + HID_INPUT_REPORT; 1348 report_enum = hdev->report_enum + HID_INPUT_REPORT;
1337 1349
1338 list_for_each_entry(report, &report_enum->report_list, list) { 1350 list_for_each_entry(report, &report_enum->report_list, list) {
1339 size_t report_size = wacom_hid_report_len(report); 1351 size_t report_size = hid_report_len(report);
1340 if (report_size > size) 1352 if (report_size > size)
1341 size = report_size; 1353 size = report_size;
1342 } 1354 }
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 586b2405b0d4..ac7447c7b82e 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -25,6 +25,10 @@
25#define WACOM_INTUOS_RES 100 25#define WACOM_INTUOS_RES 100
26#define WACOM_INTUOS3_RES 200 26#define WACOM_INTUOS3_RES 200
27 27
28/* Newer Cintiq and DTU have an offset between tablet and screen areas */
29#define WACOM_DTU_OFFSET 200
30#define WACOM_CINTIQ_OFFSET 400
31
28/* 32/*
29 * Scale factor relating reported contact size to logical contact area. 33 * Scale factor relating reported contact size to logical contact area.
30 * 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo 34 * 2^14/pi is a good approximation on Intuos5 and 3rd-gen Bamboo
@@ -600,8 +604,8 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
600 } 604 }
601 input_report_abs(input, ABS_PRESSURE, t); 605 input_report_abs(input, ABS_PRESSURE, t);
602 input_report_abs(input, ABS_TILT_X, 606 input_report_abs(input, ABS_TILT_X,
603 ((data[7] << 1) & 0x7e) | (data[8] >> 7)); 607 (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
604 input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); 608 input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
605 input_report_key(input, BTN_STYLUS, data[1] & 2); 609 input_report_key(input, BTN_STYLUS, data[1] & 2);
606 input_report_key(input, BTN_STYLUS2, data[1] & 4); 610 input_report_key(input, BTN_STYLUS2, data[1] & 4);
607 input_report_key(input, BTN_TOUCH, t > 10); 611 input_report_key(input, BTN_TOUCH, t > 10);
@@ -612,8 +616,8 @@ static void wacom_intuos_general(struct wacom_wac *wacom)
612 input_report_abs(input, ABS_WHEEL, 616 input_report_abs(input, ABS_WHEEL,
613 (data[6] << 2) | ((data[7] >> 6) & 3)); 617 (data[6] << 2) | ((data[7] >> 6) & 3));
614 input_report_abs(input, ABS_TILT_X, 618 input_report_abs(input, ABS_TILT_X,
615 ((data[7] << 1) & 0x7e) | (data[8] >> 7)); 619 (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
616 input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); 620 input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
617 } 621 }
618} 622}
619 623
@@ -915,8 +919,8 @@ static int wacom_intuos_irq(struct wacom_wac *wacom)
915 input_report_key(input, BTN_EXTRA, data[6] & 0x10); 919 input_report_key(input, BTN_EXTRA, data[6] & 0x10);
916 920
917 input_report_abs(input, ABS_TILT_X, 921 input_report_abs(input, ABS_TILT_X,
918 ((data[7] << 1) & 0x7e) | (data[8] >> 7)); 922 (((data[7] << 1) & 0x7e) | (data[8] >> 7)) - 64);
919 input_report_abs(input, ABS_TILT_Y, data[8] & 0x7f); 923 input_report_abs(input, ABS_TILT_Y, (data[8] & 0x7f) - 64);
920 } else { 924 } else {
921 /* 2D mouse packet */ 925 /* 2D mouse packet */
922 input_report_key(input, BTN_LEFT, data[8] & 0x04); 926 input_report_key(input, BTN_LEFT, data[8] & 0x04);
@@ -1377,11 +1381,12 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
1377{ 1381{
1378 struct wacom *wacom = hid_get_drvdata(hdev); 1382 struct wacom *wacom = hid_get_drvdata(hdev);
1379 struct wacom_wac *wacom_wac = &wacom->wacom_wac; 1383 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1380 struct input_dev *input = wacom_wac->input; 1384 struct wacom_features *features = &wacom_wac->features;
1381 unsigned touch_max = wacom_wac->features.touch_max; 1385 unsigned touch_max = wacom_wac->features.touch_max;
1382 1386
1383 switch (usage->hid) { 1387 switch (usage->hid) {
1384 case HID_GD_X: 1388 case HID_GD_X:
1389 features->last_slot_field = usage->hid;
1385 if (touch_max == 1) 1390 if (touch_max == 1)
1386 wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4); 1391 wacom_map_usage(wacom, usage, field, EV_ABS, ABS_X, 4);
1387 else 1392 else
@@ -1389,6 +1394,7 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
1389 ABS_MT_POSITION_X, 4); 1394 ABS_MT_POSITION_X, 4);
1390 break; 1395 break;
1391 case HID_GD_Y: 1396 case HID_GD_Y:
1397 features->last_slot_field = usage->hid;
1392 if (touch_max == 1) 1398 if (touch_max == 1)
1393 wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4); 1399 wacom_map_usage(wacom, usage, field, EV_ABS, ABS_Y, 4);
1394 else 1400 else
@@ -1396,19 +1402,48 @@ static void wacom_wac_finger_usage_mapping(struct hid_device *hdev,
1396 ABS_MT_POSITION_Y, 4); 1402 ABS_MT_POSITION_Y, 4);
1397 break; 1403 break;
1398 case HID_DG_CONTACTID: 1404 case HID_DG_CONTACTID:
1399 input_mt_init_slots(input, wacom_wac->features.touch_max, 1405 features->last_slot_field = usage->hid;
1400 INPUT_MT_DIRECT);
1401 break; 1406 break;
1402 case HID_DG_INRANGE: 1407 case HID_DG_INRANGE:
1408 features->last_slot_field = usage->hid;
1403 break; 1409 break;
1404 case HID_DG_INVERT: 1410 case HID_DG_INVERT:
1411 features->last_slot_field = usage->hid;
1405 break; 1412 break;
1406 case HID_DG_TIPSWITCH: 1413 case HID_DG_TIPSWITCH:
1414 features->last_slot_field = usage->hid;
1407 wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0); 1415 wacom_map_usage(wacom, usage, field, EV_KEY, BTN_TOUCH, 0);
1408 break; 1416 break;
1409 } 1417 }
1410} 1418}
1411 1419
1420static void wacom_wac_finger_slot(struct wacom_wac *wacom_wac,
1421 struct input_dev *input)
1422{
1423 struct hid_data *hid_data = &wacom_wac->hid_data;
1424 bool mt = wacom_wac->features.touch_max > 1;
1425 bool prox = hid_data->tipswitch &&
1426 !wacom_wac->shared->stylus_in_proximity;
1427
1428 if (mt) {
1429 int slot;
1430
1431 slot = input_mt_get_slot_by_key(input, hid_data->id);
1432 input_mt_slot(input, slot);
1433 input_mt_report_slot_state(input, MT_TOOL_FINGER, prox);
1434 }
1435 else {
1436 input_report_key(input, BTN_TOUCH, prox);
1437 }
1438
1439 if (prox) {
1440 input_report_abs(input, mt ? ABS_MT_POSITION_X : ABS_X,
1441 hid_data->x);
1442 input_report_abs(input, mt ? ABS_MT_POSITION_Y : ABS_Y,
1443 hid_data->y);
1444 }
1445}
1446
1412static int wacom_wac_finger_event(struct hid_device *hdev, 1447static int wacom_wac_finger_event(struct hid_device *hdev,
1413 struct hid_field *field, struct hid_usage *usage, __s32 value) 1448 struct hid_field *field, struct hid_usage *usage, __s32 value)
1414{ 1449{
@@ -1431,36 +1466,35 @@ static int wacom_wac_finger_event(struct hid_device *hdev,
1431 } 1466 }
1432 1467
1433 1468
1469 if (usage->usage_index + 1 == field->report_count) {
1470 if (usage->hid == wacom_wac->features.last_slot_field)
1471 wacom_wac_finger_slot(wacom_wac, wacom_wac->input);
1472 }
1473
1434 return 0; 1474 return 0;
1435} 1475}
1436 1476
1437static void wacom_wac_finger_mt_report(struct wacom_wac *wacom_wac, 1477static int wacom_wac_finger_count_touches(struct hid_device *hdev)
1438 struct input_dev *input, bool touch)
1439{ 1478{
1440 int slot; 1479 struct wacom *wacom = hid_get_drvdata(hdev);
1441 struct hid_data *hid_data = &wacom_wac->hid_data; 1480 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1481 struct input_dev *input = wacom_wac->input;
1482 unsigned touch_max = wacom_wac->features.touch_max;
1483 int count = 0;
1484 int i;
1442 1485
1443 slot = input_mt_get_slot_by_key(input, hid_data->id); 1486 if (touch_max == 1)
1487 return wacom_wac->hid_data.tipswitch &&
1488 !wacom_wac->shared->stylus_in_proximity;
1444 1489
1445 input_mt_slot(input, slot); 1490 for (i = 0; i < input->mt->num_slots; i++) {
1446 input_mt_report_slot_state(input, MT_TOOL_FINGER, touch); 1491 struct input_mt_slot *ps = &input->mt->slots[i];
1447 if (touch) { 1492 int id = input_mt_get_value(ps, ABS_MT_TRACKING_ID);
1448 input_report_abs(input, ABS_MT_POSITION_X, hid_data->x); 1493 if (id >= 0)
1449 input_report_abs(input, ABS_MT_POSITION_Y, hid_data->y); 1494 count++;
1450 } 1495 }
1451 input_mt_sync_frame(input);
1452}
1453 1496
1454static void wacom_wac_finger_single_touch_report(struct wacom_wac *wacom_wac, 1497 return count;
1455 struct input_dev *input, bool touch)
1456{
1457 struct hid_data *hid_data = &wacom_wac->hid_data;
1458
1459 if (touch) {
1460 input_report_abs(input, ABS_X, hid_data->x);
1461 input_report_abs(input, ABS_Y, hid_data->y);
1462 }
1463 input_report_key(input, BTN_TOUCH, touch);
1464} 1498}
1465 1499
1466static void wacom_wac_finger_report(struct hid_device *hdev, 1500static void wacom_wac_finger_report(struct hid_device *hdev,
@@ -1469,24 +1503,23 @@ static void wacom_wac_finger_report(struct hid_device *hdev,
1469 struct wacom *wacom = hid_get_drvdata(hdev); 1503 struct wacom *wacom = hid_get_drvdata(hdev);
1470 struct wacom_wac *wacom_wac = &wacom->wacom_wac; 1504 struct wacom_wac *wacom_wac = &wacom->wacom_wac;
1471 struct input_dev *input = wacom_wac->input; 1505 struct input_dev *input = wacom_wac->input;
1472 bool touch = wacom_wac->hid_data.tipswitch &&
1473 !wacom_wac->shared->stylus_in_proximity;
1474 unsigned touch_max = wacom_wac->features.touch_max; 1506 unsigned touch_max = wacom_wac->features.touch_max;
1475 1507
1476 if (touch_max > 1) 1508 if (touch_max > 1)
1477 wacom_wac_finger_mt_report(wacom_wac, input, touch); 1509 input_mt_sync_frame(input);
1478 else 1510
1479 wacom_wac_finger_single_touch_report(wacom_wac, input, touch);
1480 input_sync(input); 1511 input_sync(input);
1481 1512
1482 /* keep touch state for pen event */ 1513 /* keep touch state for pen event */
1483 wacom_wac->shared->touch_down = touch; 1514 wacom_wac->shared->touch_down = wacom_wac_finger_count_touches(hdev);
1484} 1515}
1485 1516
1486#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \ 1517#define WACOM_PEN_FIELD(f) (((f)->logical == HID_DG_STYLUS) || \
1487 ((f)->physical == HID_DG_STYLUS)) 1518 ((f)->physical == HID_DG_STYLUS) || \
1519 ((f)->application == HID_DG_PEN))
1488#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \ 1520#define WACOM_FINGER_FIELD(f) (((f)->logical == HID_DG_FINGER) || \
1489 ((f)->physical == HID_DG_FINGER)) 1521 ((f)->physical == HID_DG_FINGER) || \
1522 ((f)->application == HID_DG_TOUCHSCREEN))
1490 1523
1491void wacom_wac_usage_mapping(struct hid_device *hdev, 1524void wacom_wac_usage_mapping(struct hid_device *hdev,
1492 struct hid_field *field, struct hid_usage *usage) 1525 struct hid_field *field, struct hid_usage *usage)
@@ -1681,7 +1714,9 @@ static int wacom_bpt_pen(struct wacom_wac *wacom)
1681 return 0; 1714 return 0;
1682 1715
1683 if (data[0] == WACOM_REPORT_USB) { 1716 if (data[0] == WACOM_REPORT_USB) {
1684 if (features->type == INTUOSHT && features->touch_max) { 1717 if (features->type == INTUOSHT &&
1718 wacom->shared->touch_input &&
1719 features->touch_max) {
1685 input_report_switch(wacom->shared->touch_input, 1720 input_report_switch(wacom->shared->touch_input,
1686 SW_MUTE_DEVICE, data[8] & 0x40); 1721 SW_MUTE_DEVICE, data[8] & 0x40);
1687 input_sync(wacom->shared->touch_input); 1722 input_sync(wacom->shared->touch_input);
@@ -1774,7 +1809,8 @@ static int wacom_wireless_irq(struct wacom_wac *wacom, size_t len)
1774 int pid, battery, ps_connected; 1809 int pid, battery, ps_connected;
1775 1810
1776 if ((wacom->shared->type == INTUOSHT) && 1811 if ((wacom->shared->type == INTUOSHT) &&
1777 wacom->shared->touch_max) { 1812 wacom->shared->touch_input &&
1813 wacom->shared->touch_max) {
1778 input_report_switch(wacom->shared->touch_input, 1814 input_report_switch(wacom->shared->touch_input,
1779 SW_MUTE_DEVICE, data[5] & 0x40); 1815 SW_MUTE_DEVICE, data[5] & 0x40);
1780 input_sync(wacom->shared->touch_input); 1816 input_sync(wacom->shared->touch_input);
@@ -1838,6 +1874,7 @@ void wacom_wac_irq(struct wacom_wac *wacom_wac, size_t len)
1838 break; 1874 break;
1839 1875
1840 case DTUS: 1876 case DTUS:
1877 case DTUSX:
1841 sync = wacom_dtus_irq(wacom_wac); 1878 sync = wacom_dtus_irq(wacom_wac);
1842 break; 1879 break;
1843 1880
@@ -1926,8 +1963,10 @@ static void wacom_setup_cintiq(struct wacom_wac *wacom_wac)
1926 input_set_abs_params(input_dev, ABS_DISTANCE, 1963 input_set_abs_params(input_dev, ABS_DISTANCE,
1927 0, wacom_wac->features.distance_max, 0, 0); 1964 0, wacom_wac->features.distance_max, 0, 0);
1928 input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0); 1965 input_set_abs_params(input_dev, ABS_WHEEL, 0, 1023, 0, 0);
1929 input_set_abs_params(input_dev, ABS_TILT_X, 0, 127, 0, 0); 1966 input_set_abs_params(input_dev, ABS_TILT_X, -64, 63, 0, 0);
1930 input_set_abs_params(input_dev, ABS_TILT_Y, 0, 127, 0, 0); 1967 input_abs_set_res(input_dev, ABS_TILT_X, 57);
1968 input_set_abs_params(input_dev, ABS_TILT_Y, -64, 63, 0, 0);
1969 input_abs_set_res(input_dev, ABS_TILT_Y, 57);
1931} 1970}
1932 1971
1933static void wacom_setup_intuos(struct wacom_wac *wacom_wac) 1972static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
@@ -1947,6 +1986,7 @@ static void wacom_setup_intuos(struct wacom_wac *wacom_wac)
1947 __set_bit(BTN_TOOL_LENS, input_dev->keybit); 1986 __set_bit(BTN_TOOL_LENS, input_dev->keybit);
1948 1987
1949 input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0); 1988 input_set_abs_params(input_dev, ABS_RZ, -900, 899, 0, 0);
1989 input_abs_set_res(input_dev, ABS_RZ, 287);
1950 input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0); 1990 input_set_abs_params(input_dev, ABS_THROTTLE, -1023, 1023, 0, 0);
1951} 1991}
1952 1992
@@ -2029,7 +2069,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
2029 } 2069 }
2030} 2070}
2031 2071
2032int wacom_setup_input_capabilities(struct input_dev *input_dev, 2072int wacom_setup_pentouch_input_capabilities(struct input_dev *input_dev,
2033 struct wacom_wac *wacom_wac) 2073 struct wacom_wac *wacom_wac)
2034{ 2074{
2035 struct wacom_features *features = &wacom_wac->features; 2075 struct wacom_features *features = &wacom_wac->features;
@@ -2047,9 +2087,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2047 2087
2048 switch (features->type) { 2088 switch (features->type) {
2049 case WACOM_MO: 2089 case WACOM_MO:
2050 input_set_abs_params(input_dev, ABS_WHEEL, 0, 71, 0, 0);
2051 /* fall through */
2052
2053 case WACOM_G4: 2090 case WACOM_G4:
2054 /* fall through */ 2091 /* fall through */
2055 2092
@@ -2092,6 +2129,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2092 2129
2093 case WACOM_24HD: 2130 case WACOM_24HD:
2094 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 2131 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
2132 input_abs_set_res(input_dev, ABS_Z, 287);
2095 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0); 2133 input_set_abs_params(input_dev, ABS_THROTTLE, 0, 71, 0, 0);
2096 /* fall through */ 2134 /* fall through */
2097 2135
@@ -2106,6 +2144,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2106 case WACOM_BEE: 2144 case WACOM_BEE:
2107 case CINTIQ: 2145 case CINTIQ:
2108 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 2146 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
2147 input_abs_set_res(input_dev, ABS_Z, 287);
2109 2148
2110 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 2149 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
2111 2150
@@ -2114,6 +2153,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2114 2153
2115 case WACOM_13HD: 2154 case WACOM_13HD:
2116 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 2155 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
2156 input_abs_set_res(input_dev, ABS_Z, 287);
2117 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 2157 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
2118 wacom_setup_cintiq(wacom_wac); 2158 wacom_setup_cintiq(wacom_wac);
2119 break; 2159 break;
@@ -2122,6 +2162,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2122 case INTUOS3L: 2162 case INTUOS3L:
2123 case INTUOS3S: 2163 case INTUOS3S:
2124 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 2164 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
2165 input_abs_set_res(input_dev, ABS_Z, 287);
2125 /* fall through */ 2166 /* fall through */
2126 2167
2127 case INTUOS: 2168 case INTUOS:
@@ -2144,6 +2185,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2144 0, 0); 2185 0, 0);
2145 2186
2146 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 2187 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
2188 input_abs_set_res(input_dev, ABS_Z, 287);
2147 2189
2148 wacom_setup_intuos(wacom_wac); 2190 wacom_setup_intuos(wacom_wac);
2149 } else if (features->device_type == BTN_TOOL_FINGER) { 2191 } else if (features->device_type == BTN_TOOL_FINGER) {
@@ -2162,6 +2204,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2162 case INTUOS4L: 2204 case INTUOS4L:
2163 case INTUOS4S: 2205 case INTUOS4S:
2164 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 2206 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
2207 input_abs_set_res(input_dev, ABS_Z, 287);
2165 wacom_setup_intuos(wacom_wac); 2208 wacom_setup_intuos(wacom_wac);
2166 2209
2167 __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 2210 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
@@ -2196,6 +2239,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2196 /* fall through */ 2239 /* fall through */
2197 2240
2198 case DTUS: 2241 case DTUS:
2242 case DTUSX:
2199 case PL: 2243 case PL:
2200 case DTU: 2244 case DTU:
2201 __set_bit(BTN_TOOL_PEN, input_dev->keybit); 2245 __set_bit(BTN_TOOL_PEN, input_dev->keybit);
@@ -2246,6 +2290,9 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2246 __clear_bit(ABS_X, input_dev->absbit); 2290 __clear_bit(ABS_X, input_dev->absbit);
2247 __clear_bit(ABS_Y, input_dev->absbit); 2291 __clear_bit(ABS_Y, input_dev->absbit);
2248 __clear_bit(BTN_TOUCH, input_dev->keybit); 2292 __clear_bit(BTN_TOUCH, input_dev->keybit);
2293
2294 /* PAD is setup by wacom_setup_pad_input_capabilities later */
2295 return 1;
2249 } 2296 }
2250 } else if (features->device_type == BTN_TOOL_PEN) { 2297 } else if (features->device_type == BTN_TOOL_PEN) {
2251 __set_bit(INPUT_PROP_POINTER, input_dev->propbit); 2298 __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
@@ -2261,6 +2308,7 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
2261 2308
2262 case CINTIQ_HYBRID: 2309 case CINTIQ_HYBRID:
2263 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0); 2310 input_set_abs_params(input_dev, ABS_Z, -900, 899, 0, 0);
2311 input_abs_set_res(input_dev, ABS_Z, 287);
2264 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit); 2312 __set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
2265 2313
2266 wacom_setup_cintiq(wacom_wac); 2314 wacom_setup_cintiq(wacom_wac);
@@ -2303,9 +2351,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
2303 2351
2304 case WACOM_G4: 2352 case WACOM_G4:
2305 __set_bit(BTN_BACK, input_dev->keybit); 2353 __set_bit(BTN_BACK, input_dev->keybit);
2306 __set_bit(BTN_LEFT, input_dev->keybit);
2307 __set_bit(BTN_FORWARD, input_dev->keybit); 2354 __set_bit(BTN_FORWARD, input_dev->keybit);
2308 __set_bit(BTN_RIGHT, input_dev->keybit);
2309 input_set_capability(input_dev, EV_REL, REL_WHEEL); 2355 input_set_capability(input_dev, EV_REL, REL_WHEEL);
2310 break; 2356 break;
2311 2357
@@ -2402,7 +2448,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
2402 case INTUOSPS: 2448 case INTUOSPS:
2403 /* touch interface does not have the pad device */ 2449 /* touch interface does not have the pad device */
2404 if (features->device_type != BTN_TOOL_PEN) 2450 if (features->device_type != BTN_TOOL_PEN)
2405 return 1; 2451 return -ENODEV;
2406 2452
2407 for (i = 0; i < 7; i++) 2453 for (i = 0; i < 7; i++)
2408 __set_bit(BTN_0 + i, input_dev->keybit); 2454 __set_bit(BTN_0 + i, input_dev->keybit);
@@ -2446,8 +2492,10 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
2446 case INTUOSHT: 2492 case INTUOSHT:
2447 case BAMBOO_PT: 2493 case BAMBOO_PT:
2448 /* pad device is on the touch interface */ 2494 /* pad device is on the touch interface */
2449 if (features->device_type != BTN_TOOL_FINGER) 2495 if ((features->device_type != BTN_TOOL_FINGER) ||
2450 return 1; 2496 /* Bamboo Pen only tablet does not have pad */
2497 ((features->type == BAMBOO_PT) && !features->touch_max))
2498 return -ENODEV;
2451 2499
2452 __clear_bit(ABS_MISC, input_dev->absbit); 2500 __clear_bit(ABS_MISC, input_dev->absbit);
2453 2501
@@ -2460,7 +2508,7 @@ int wacom_setup_pad_input_capabilities(struct input_dev *input_dev,
2460 2508
2461 default: 2509 default:
2462 /* no pad supported */ 2510 /* no pad supported */
2463 return 1; 2511 return -ENODEV;
2464 } 2512 }
2465 return 0; 2513 return 0;
2466} 2514}
@@ -2664,11 +2712,13 @@ static const struct wacom_features wacom_features_0x317 =
2664 INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16, 2712 INTUOSPL, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, .touch_max = 16,
2665 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 2713 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
2666static const struct wacom_features wacom_features_0xF4 = 2714static const struct wacom_features wacom_features_0xF4 =
2667 { "Wacom Cintiq 24HD", 104280, 65400, 2047, 63, 2715 { "Wacom Cintiq 24HD", 104080, 65200, 2047, 63,
2668 WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; 2716 WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2717 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
2669static const struct wacom_features wacom_features_0xF8 = 2718static const struct wacom_features wacom_features_0xF8 =
2670 { "Wacom Cintiq 24HD touch", 104280, 65400, 2047, 63, /* Pen */ 2719 { "Wacom Cintiq 24HD touch", 104080, 65200, 2047, 63, /* Pen */
2671 WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, 2720 WACOM_24HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2721 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
2672 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 }; 2722 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0xf6 };
2673static const struct wacom_features wacom_features_0xF6 = 2723static const struct wacom_features wacom_features_0xF6 =
2674 { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */ 2724 { "Wacom Cintiq 24HD touch", .type = WACOM_24HDT, /* Touch */
@@ -2684,8 +2734,9 @@ static const struct wacom_features wacom_features_0xC6 =
2684 { "Wacom Cintiq 12WX", 53020, 33440, 1023, 63, 2734 { "Wacom Cintiq 12WX", 53020, 33440, 1023, 63,
2685 WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES }; 2735 WACOM_BEE, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES };
2686static const struct wacom_features wacom_features_0x304 = 2736static const struct wacom_features wacom_features_0x304 =
2687 { "Wacom Cintiq 13HD", 59352, 33648, 1023, 63, 2737 { "Wacom Cintiq 13HD", 59152, 33448, 1023, 63,
2688 WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; 2738 WACOM_13HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2739 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
2689static const struct wacom_features wacom_features_0xC7 = 2740static const struct wacom_features wacom_features_0xC7 =
2690 { "Wacom DTU1931", 37832, 30305, 511, 0, 2741 { "Wacom DTU1931", 37832, 30305, 511, 0,
2691 PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2742 PL, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
@@ -2697,28 +2748,38 @@ static const struct wacom_features wacom_features_0xF0 =
2697 { "Wacom DTU1631", 34623, 19553, 511, 0, 2748 { "Wacom DTU1631", 34623, 19553, 511, 0,
2698 DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2749 DTU, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2699static const struct wacom_features wacom_features_0xFB = 2750static const struct wacom_features wacom_features_0xFB =
2700 { "Wacom DTU1031", 22096, 13960, 511, 0, 2751 { "Wacom DTU1031", 21896, 13760, 511, 0,
2701 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2752 DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2753 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
2754static const struct wacom_features wacom_features_0x32F =
2755 { "Wacom DTU1031X", 22472, 12728, 511, 0,
2756 DTUSX, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2757 WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
2702static const struct wacom_features wacom_features_0x57 = 2758static const struct wacom_features wacom_features_0x57 =
2703 { "Wacom DTK2241", 95640, 54060, 2047, 63, 2759 { "Wacom DTK2241", 95640, 54060, 2047, 63,
2704 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; 2760 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2761 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
2705static const struct wacom_features wacom_features_0x59 = /* Pen */ 2762static const struct wacom_features wacom_features_0x59 = /* Pen */
2706 { "Wacom DTH2242", 95640, 54060, 2047, 63, 2763 { "Wacom DTH2242", 95640, 54060, 2047, 63,
2707 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, 2764 DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2765 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
2708 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D }; 2766 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5D };
2709static const struct wacom_features wacom_features_0x5D = /* Touch */ 2767static const struct wacom_features wacom_features_0x5D = /* Touch */
2710 { "Wacom DTH2242", .type = WACOM_24HDT, 2768 { "Wacom DTH2242", .type = WACOM_24HDT,
2711 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10, 2769 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x59, .touch_max = 10,
2712 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 2770 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
2713static const struct wacom_features wacom_features_0xCC = 2771static const struct wacom_features wacom_features_0xCC =
2714 { "Wacom Cintiq 21UX2", 87000, 65400, 2047, 63, 2772 { "Wacom Cintiq 21UX2", 86800, 65200, 2047, 63,
2715 WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; 2773 WACOM_21UX2, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2774 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
2716static const struct wacom_features wacom_features_0xFA = 2775static const struct wacom_features wacom_features_0xFA =
2717 { "Wacom Cintiq 22HD", 95640, 54060, 2047, 63, 2776 { "Wacom Cintiq 22HD", 95440, 53860, 2047, 63,
2718 WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200 }; 2777 WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2778 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET };
2719static const struct wacom_features wacom_features_0x5B = 2779static const struct wacom_features wacom_features_0x5B =
2720 { "Wacom Cintiq 22HDT", 95640, 54060, 2047, 63, 2780 { "Wacom Cintiq 22HDT", 95440, 53860, 2047, 63,
2721 WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, 2781 WACOM_22HD, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2782 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
2722 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e }; 2783 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x5e };
2723static const struct wacom_features wacom_features_0x5E = 2784static const struct wacom_features wacom_features_0x5E =
2724 { "Wacom Cintiq 22HDT", .type = WACOM_24HDT, 2785 { "Wacom Cintiq 22HDT", .type = WACOM_24HDT,
@@ -2863,21 +2924,27 @@ static const struct wacom_features wacom_features_0x6004 =
2863 { "ISD-V4", 12800, 8000, 255, 0, 2924 { "ISD-V4", 12800, 8000, 255, 0,
2864 TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES }; 2925 TABLETPC, WACOM_INTUOS_RES, WACOM_INTUOS_RES };
2865static const struct wacom_features wacom_features_0x307 = 2926static const struct wacom_features wacom_features_0x307 =
2866 { "Wacom ISDv5 307", 59352, 33648, 2047, 63, 2927 { "Wacom ISDv5 307", 59152, 33448, 2047, 63,
2867 CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, 2928 CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2929 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
2868 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 }; 2930 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x309 };
2869static const struct wacom_features wacom_features_0x309 = 2931static const struct wacom_features wacom_features_0x309 =
2870 { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */ 2932 { "Wacom ISDv5 309", .type = WACOM_24HDT, /* Touch */
2871 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10, 2933 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x0307, .touch_max = 10,
2872 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 2934 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
2873static const struct wacom_features wacom_features_0x30A = 2935static const struct wacom_features wacom_features_0x30A =
2874 { "Wacom ISDv5 30A", 59352, 33648, 2047, 63, 2936 { "Wacom ISDv5 30A", 59152, 33448, 2047, 63,
2875 CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 200, 200, 2937 CINTIQ_HYBRID, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES,
2938 WACOM_CINTIQ_OFFSET, WACOM_CINTIQ_OFFSET,
2876 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C }; 2939 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30C };
2877static const struct wacom_features wacom_features_0x30C = 2940static const struct wacom_features wacom_features_0x30C =
2878 { "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */ 2941 { "Wacom ISDv5 30C", .type = WACOM_24HDT, /* Touch */
2879 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30A, .touch_max = 10, 2942 .oVid = USB_VENDOR_ID_WACOM, .oPid = 0x30A, .touch_max = 10,
2880 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE }; 2943 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
2944static const struct wacom_features wacom_features_0x323 =
2945 { "Wacom Intuos P M", 21600, 13500, 1023, 31,
2946 INTUOSHT, WACOM_INTUOS_RES, WACOM_INTUOS_RES,
2947 .check_for_hid_type = true, .hid_type = HID_TYPE_USBNONE };
2881 2948
2882static const struct wacom_features wacom_features_HID_ANY_ID = 2949static const struct wacom_features wacom_features_HID_ANY_ID =
2883 { "Wacom HID", .type = HID_GENERIC }; 2950 { "Wacom HID", .type = HID_GENERIC };
@@ -3022,10 +3089,13 @@ const struct hid_device_id wacom_ids[] = {
3022 { USB_DEVICE_WACOM(0x314) }, 3089 { USB_DEVICE_WACOM(0x314) },
3023 { USB_DEVICE_WACOM(0x315) }, 3090 { USB_DEVICE_WACOM(0x315) },
3024 { USB_DEVICE_WACOM(0x317) }, 3091 { USB_DEVICE_WACOM(0x317) },
3092 { USB_DEVICE_WACOM(0x323) },
3093 { USB_DEVICE_WACOM(0x32F) },
3025 { USB_DEVICE_WACOM(0x4001) }, 3094 { USB_DEVICE_WACOM(0x4001) },
3026 { USB_DEVICE_WACOM(0x4004) }, 3095 { USB_DEVICE_WACOM(0x4004) },
3027 { USB_DEVICE_WACOM(0x5000) }, 3096 { USB_DEVICE_WACOM(0x5000) },
3028 { USB_DEVICE_WACOM(0x5002) }, 3097 { USB_DEVICE_WACOM(0x5002) },
3098 { USB_DEVICE_LENOVO(0x6004) },
3029 3099
3030 { USB_DEVICE_WACOM(HID_ANY_ID) }, 3100 { USB_DEVICE_WACOM(HID_ANY_ID) },
3031 { } 3101 { }
diff --git a/drivers/hid/wacom_wac.h b/drivers/hid/wacom_wac.h
index 0f0b85ec1322..bfad815cda8a 100644
--- a/drivers/hid/wacom_wac.h
+++ b/drivers/hid/wacom_wac.h
@@ -80,6 +80,7 @@ enum {
80 PL, 80 PL,
81 DTU, 81 DTU,
82 DTUS, 82 DTUS,
83 DTUSX,
83 INTUOS, 84 INTUOS,
84 INTUOS3S, 85 INTUOS3S,
85 INTUOS3, 86 INTUOS3,
@@ -144,6 +145,7 @@ struct wacom_features {
144 int pktlen; 145 int pktlen;
145 bool check_for_hid_type; 146 bool check_for_hid_type;
146 int hid_type; 147 int hid_type;
148 int last_slot_field;
147}; 149};
148 150
149struct wacom_shared { 151struct wacom_shared {
@@ -183,6 +185,7 @@ struct wacom_wac {
183 struct input_dev *input; 185 struct input_dev *input;
184 struct input_dev *pad_input; 186 struct input_dev *pad_input;
185 bool input_registered; 187 bool input_registered;
188 bool pad_registered;
186 int pid; 189 int pid;
187 int battery_capacity; 190 int battery_capacity;
188 int num_contacts_left; 191 int num_contacts_left;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index 6dbfbc209491..30f0e61341c5 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -144,13 +144,26 @@ config OMAP_IOMMU
144 select IOMMU_API 144 select IOMMU_API
145 145
146config OMAP_IOMMU_DEBUG 146config OMAP_IOMMU_DEBUG
147 tristate "Export OMAP IOMMU internals in DebugFS" 147 bool "Export OMAP IOMMU internals in DebugFS"
148 depends on OMAP_IOMMU && DEBUG_FS 148 depends on OMAP_IOMMU && DEBUG_FS
149 help 149 ---help---
150 Select this to see extensive information about 150 Select this to see extensive information about
151 the internal state of OMAP IOMMU in debugfs. 151 the internal state of OMAP IOMMU in debugfs.
152
153 Say N unless you know you need this.
152 154
153 Say N unless you know you need this. 155config ROCKCHIP_IOMMU
156 bool "Rockchip IOMMU Support"
157 depends on ARM
158 depends on ARCH_ROCKCHIP || COMPILE_TEST
159 select IOMMU_API
160 select ARM_DMA_USE_IOMMU
161 help
162 Support for IOMMUs found on Rockchip rk32xx SOCs.
163 These IOMMUs allow virtualization of the address space used by most
164 cores within the multimedia subsystem.
165 Say Y here if you are using a Rockchip SoC that includes an IOMMU
166 device.
154 167
155config TEGRA_IOMMU_GART 168config TEGRA_IOMMU_GART
156 bool "Tegra GART IOMMU Support" 169 bool "Tegra GART IOMMU Support"
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 16edef74b8ee..7b976f294a69 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -11,8 +11,8 @@ obj-$(CONFIG_INTEL_IOMMU) += iova.o intel-iommu.o
11obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o 11obj-$(CONFIG_IPMMU_VMSA) += ipmmu-vmsa.o
12obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o 12obj-$(CONFIG_IRQ_REMAP) += intel_irq_remapping.o irq_remapping.o
13obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o 13obj-$(CONFIG_OMAP_IOMMU) += omap-iommu.o
14obj-$(CONFIG_OMAP_IOMMU) += omap-iommu2.o
15obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 14obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
15obj-$(CONFIG_ROCKCHIP_IOMMU) += rockchip-iommu.o
16obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o 16obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
17obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o 17obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
18obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o 18obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 2d84c9edf3b8..b205f76d7129 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -3411,6 +3411,8 @@ static bool amd_iommu_capable(enum iommu_cap cap)
3411 return true; 3411 return true;
3412 case IOMMU_CAP_INTR_REMAP: 3412 case IOMMU_CAP_INTR_REMAP:
3413 return (irq_remapping_enabled == 1); 3413 return (irq_remapping_enabled == 1);
3414 case IOMMU_CAP_NOEXEC:
3415 return false;
3414 } 3416 }
3415 3417
3416 return false; 3418 return false;
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 90d734bbf467..a2d87a60c27f 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -279,10 +279,8 @@ static void free_pasid_state(struct pasid_state *pasid_state)
279 279
280static void put_pasid_state(struct pasid_state *pasid_state) 280static void put_pasid_state(struct pasid_state *pasid_state)
281{ 281{
282 if (atomic_dec_and_test(&pasid_state->count)) { 282 if (atomic_dec_and_test(&pasid_state->count))
283 put_device_state(pasid_state->device_state);
284 wake_up(&pasid_state->wq); 283 wake_up(&pasid_state->wq);
285 }
286} 284}
287 285
288static void put_pasid_state_wait(struct pasid_state *pasid_state) 286static void put_pasid_state_wait(struct pasid_state *pasid_state)
@@ -291,9 +289,7 @@ static void put_pasid_state_wait(struct pasid_state *pasid_state)
291 289
292 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE); 290 prepare_to_wait(&pasid_state->wq, &wait, TASK_UNINTERRUPTIBLE);
293 291
294 if (atomic_dec_and_test(&pasid_state->count)) 292 if (!atomic_dec_and_test(&pasid_state->count))
295 put_device_state(pasid_state->device_state);
296 else
297 schedule(); 293 schedule();
298 294
299 finish_wait(&pasid_state->wq, &wait); 295 finish_wait(&pasid_state->wq, &wait);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index e393ae01b5d2..b8aac1389a96 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -404,9 +404,16 @@ struct arm_smmu_cfg {
404#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) 404#define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx)
405#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) 405#define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1)
406 406
407enum arm_smmu_domain_stage {
408 ARM_SMMU_DOMAIN_S1 = 0,
409 ARM_SMMU_DOMAIN_S2,
410 ARM_SMMU_DOMAIN_NESTED,
411};
412
407struct arm_smmu_domain { 413struct arm_smmu_domain {
408 struct arm_smmu_device *smmu; 414 struct arm_smmu_device *smmu;
409 struct arm_smmu_cfg cfg; 415 struct arm_smmu_cfg cfg;
416 enum arm_smmu_domain_stage stage;
410 spinlock_t lock; 417 spinlock_t lock;
411}; 418};
412 419
@@ -906,19 +913,46 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
906 if (smmu_domain->smmu) 913 if (smmu_domain->smmu)
907 goto out_unlock; 914 goto out_unlock;
908 915
909 if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { 916 /*
917 * Mapping the requested stage onto what we support is surprisingly
918 * complicated, mainly because the spec allows S1+S2 SMMUs without
919 * support for nested translation. That means we end up with the
920 * following table:
921 *
922 * Requested Supported Actual
923 * S1 N S1
924 * S1 S1+S2 S1
925 * S1 S2 S2
926 * S1 S1 S1
927 * N N N
928 * N S1+S2 S2
929 * N S2 S2
930 * N S1 S1
931 *
932 * Note that you can't actually request stage-2 mappings.
933 */
934 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1))
935 smmu_domain->stage = ARM_SMMU_DOMAIN_S2;
936 if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2))
937 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
938
939 switch (smmu_domain->stage) {
940 case ARM_SMMU_DOMAIN_S1:
941 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
942 start = smmu->num_s2_context_banks;
943 break;
944 case ARM_SMMU_DOMAIN_NESTED:
910 /* 945 /*
911 * We will likely want to change this if/when KVM gets 946 * We will likely want to change this if/when KVM gets
912 * involved. 947 * involved.
913 */ 948 */
914 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; 949 case ARM_SMMU_DOMAIN_S2:
915 start = smmu->num_s2_context_banks;
916 } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) {
917 cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS;
918 start = smmu->num_s2_context_banks;
919 } else {
920 cfg->cbar = CBAR_TYPE_S2_TRANS; 950 cfg->cbar = CBAR_TYPE_S2_TRANS;
921 start = 0; 951 start = 0;
952 break;
953 default:
954 ret = -EINVAL;
955 goto out_unlock;
922 } 956 }
923 957
924 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, 958 ret = __arm_smmu_alloc_bitmap(smmu->context_map, start,
@@ -1281,7 +1315,7 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1281 unsigned long pfn, int prot, int stage) 1315 unsigned long pfn, int prot, int stage)
1282{ 1316{
1283 pte_t *pte, *start; 1317 pte_t *pte, *start;
1284 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; 1318 pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF;
1285 1319
1286 if (pmd_none(*pmd)) { 1320 if (pmd_none(*pmd)) {
1287 /* Allocate a new set of tables */ 1321 /* Allocate a new set of tables */
@@ -1315,10 +1349,11 @@ static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd,
1315 pteval |= ARM_SMMU_PTE_MEMATTR_NC; 1349 pteval |= ARM_SMMU_PTE_MEMATTR_NC;
1316 } 1350 }
1317 1351
1352 if (prot & IOMMU_NOEXEC)
1353 pteval |= ARM_SMMU_PTE_XN;
1354
1318 /* If no access, create a faulting entry to avoid TLB fills */ 1355 /* If no access, create a faulting entry to avoid TLB fills */
1319 if (prot & IOMMU_EXEC) 1356 if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
1320 pteval &= ~ARM_SMMU_PTE_XN;
1321 else if (!(prot & (IOMMU_READ | IOMMU_WRITE)))
1322 pteval &= ~ARM_SMMU_PTE_PAGE; 1357 pteval &= ~ARM_SMMU_PTE_PAGE;
1323 1358
1324 pteval |= ARM_SMMU_PTE_SH_IS; 1359 pteval |= ARM_SMMU_PTE_SH_IS;
@@ -1568,6 +1603,8 @@ static bool arm_smmu_capable(enum iommu_cap cap)
1568 return true; 1603 return true;
1569 case IOMMU_CAP_INTR_REMAP: 1604 case IOMMU_CAP_INTR_REMAP:
1570 return true; /* MSIs are just memory writes */ 1605 return true; /* MSIs are just memory writes */
1606 case IOMMU_CAP_NOEXEC:
1607 return true;
1571 default: 1608 default:
1572 return false; 1609 return false;
1573 } 1610 }
@@ -1644,21 +1681,57 @@ static void arm_smmu_remove_device(struct device *dev)
1644 iommu_group_remove_device(dev); 1681 iommu_group_remove_device(dev);
1645} 1682}
1646 1683
1684static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
1685 enum iommu_attr attr, void *data)
1686{
1687 struct arm_smmu_domain *smmu_domain = domain->priv;
1688
1689 switch (attr) {
1690 case DOMAIN_ATTR_NESTING:
1691 *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
1692 return 0;
1693 default:
1694 return -ENODEV;
1695 }
1696}
1697
1698static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
1699 enum iommu_attr attr, void *data)
1700{
1701 struct arm_smmu_domain *smmu_domain = domain->priv;
1702
1703 switch (attr) {
1704 case DOMAIN_ATTR_NESTING:
1705 if (smmu_domain->smmu)
1706 return -EPERM;
1707 if (*(int *)data)
1708 smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED;
1709 else
1710 smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
1711
1712 return 0;
1713 default:
1714 return -ENODEV;
1715 }
1716}
1717
1647static const struct iommu_ops arm_smmu_ops = { 1718static const struct iommu_ops arm_smmu_ops = {
1648 .capable = arm_smmu_capable, 1719 .capable = arm_smmu_capable,
1649 .domain_init = arm_smmu_domain_init, 1720 .domain_init = arm_smmu_domain_init,
1650 .domain_destroy = arm_smmu_domain_destroy, 1721 .domain_destroy = arm_smmu_domain_destroy,
1651 .attach_dev = arm_smmu_attach_dev, 1722 .attach_dev = arm_smmu_attach_dev,
1652 .detach_dev = arm_smmu_detach_dev, 1723 .detach_dev = arm_smmu_detach_dev,
1653 .map = arm_smmu_map, 1724 .map = arm_smmu_map,
1654 .unmap = arm_smmu_unmap, 1725 .unmap = arm_smmu_unmap,
1655 .map_sg = default_iommu_map_sg, 1726 .map_sg = default_iommu_map_sg,
1656 .iova_to_phys = arm_smmu_iova_to_phys, 1727 .iova_to_phys = arm_smmu_iova_to_phys,
1657 .add_device = arm_smmu_add_device, 1728 .add_device = arm_smmu_add_device,
1658 .remove_device = arm_smmu_remove_device, 1729 .remove_device = arm_smmu_remove_device,
1659 .pgsize_bitmap = (SECTION_SIZE | 1730 .domain_get_attr = arm_smmu_domain_get_attr,
1660 ARM_SMMU_PTE_CONT_SIZE | 1731 .domain_set_attr = arm_smmu_domain_set_attr,
1661 PAGE_SIZE), 1732 .pgsize_bitmap = (SECTION_SIZE |
1733 ARM_SMMU_PTE_CONT_SIZE |
1734 PAGE_SIZE),
1662}; 1735};
1663 1736
1664static void arm_smmu_device_reset(struct arm_smmu_device *smmu) 1737static void arm_smmu_device_reset(struct arm_smmu_device *smmu)
@@ -2073,8 +2146,20 @@ static struct platform_driver arm_smmu_driver = {
2073 2146
2074static int __init arm_smmu_init(void) 2147static int __init arm_smmu_init(void)
2075{ 2148{
2149 struct device_node *np;
2076 int ret; 2150 int ret;
2077 2151
2152 /*
2153 * Play nice with systems that don't have an ARM SMMU by checking that
2154 * an ARM SMMU exists in the system before proceeding with the driver
2155 * and IOMMU bus operation registration.
2156 */
2157 np = of_find_matching_node(NULL, arm_smmu_of_match);
2158 if (!np)
2159 return 0;
2160
2161 of_node_put(np);
2162
2078 ret = platform_driver_register(&arm_smmu_driver); 2163 ret = platform_driver_register(&arm_smmu_driver);
2079 if (ret) 2164 if (ret)
2080 return ret; 2165 return ret;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index c5c61cabd6e3..9847613085e1 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -44,6 +44,14 @@
44 44
45#include "irq_remapping.h" 45#include "irq_remapping.h"
46 46
47typedef int (*dmar_res_handler_t)(struct acpi_dmar_header *, void *);
48struct dmar_res_callback {
49 dmar_res_handler_t cb[ACPI_DMAR_TYPE_RESERVED];
50 void *arg[ACPI_DMAR_TYPE_RESERVED];
51 bool ignore_unhandled;
52 bool print_entry;
53};
54
47/* 55/*
48 * Assumptions: 56 * Assumptions:
49 * 1) The hotplug framework guarentees that DMAR unit will be hot-added 57 * 1) The hotplug framework guarentees that DMAR unit will be hot-added
@@ -62,11 +70,12 @@ LIST_HEAD(dmar_drhd_units);
62struct acpi_table_header * __initdata dmar_tbl; 70struct acpi_table_header * __initdata dmar_tbl;
63static acpi_size dmar_tbl_size; 71static acpi_size dmar_tbl_size;
64static int dmar_dev_scope_status = 1; 72static int dmar_dev_scope_status = 1;
73static unsigned long dmar_seq_ids[BITS_TO_LONGS(DMAR_UNITS_SUPPORTED)];
65 74
66static int alloc_iommu(struct dmar_drhd_unit *drhd); 75static int alloc_iommu(struct dmar_drhd_unit *drhd);
67static void free_iommu(struct intel_iommu *iommu); 76static void free_iommu(struct intel_iommu *iommu);
68 77
69static void __init dmar_register_drhd_unit(struct dmar_drhd_unit *drhd) 78static void dmar_register_drhd_unit(struct dmar_drhd_unit *drhd)
70{ 79{
71 /* 80 /*
72 * add INCLUDE_ALL at the tail, so scan the list will find it at 81 * add INCLUDE_ALL at the tail, so scan the list will find it at
@@ -344,24 +353,45 @@ static struct notifier_block dmar_pci_bus_nb = {
344 .priority = INT_MIN, 353 .priority = INT_MIN,
345}; 354};
346 355
356static struct dmar_drhd_unit *
357dmar_find_dmaru(struct acpi_dmar_hardware_unit *drhd)
358{
359 struct dmar_drhd_unit *dmaru;
360
361 list_for_each_entry_rcu(dmaru, &dmar_drhd_units, list)
362 if (dmaru->segment == drhd->segment &&
363 dmaru->reg_base_addr == drhd->address)
364 return dmaru;
365
366 return NULL;
367}
368
347/** 369/**
348 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition 370 * dmar_parse_one_drhd - parses exactly one DMA remapping hardware definition
349 * structure which uniquely represent one DMA remapping hardware unit 371 * structure which uniquely represent one DMA remapping hardware unit
350 * present in the platform 372 * present in the platform
351 */ 373 */
352static int __init 374static int dmar_parse_one_drhd(struct acpi_dmar_header *header, void *arg)
353dmar_parse_one_drhd(struct acpi_dmar_header *header)
354{ 375{
355 struct acpi_dmar_hardware_unit *drhd; 376 struct acpi_dmar_hardware_unit *drhd;
356 struct dmar_drhd_unit *dmaru; 377 struct dmar_drhd_unit *dmaru;
357 int ret = 0; 378 int ret = 0;
358 379
359 drhd = (struct acpi_dmar_hardware_unit *)header; 380 drhd = (struct acpi_dmar_hardware_unit *)header;
360 dmaru = kzalloc(sizeof(*dmaru), GFP_KERNEL); 381 dmaru = dmar_find_dmaru(drhd);
382 if (dmaru)
383 goto out;
384
385 dmaru = kzalloc(sizeof(*dmaru) + header->length, GFP_KERNEL);
361 if (!dmaru) 386 if (!dmaru)
362 return -ENOMEM; 387 return -ENOMEM;
363 388
364 dmaru->hdr = header; 389 /*
390 * If header is allocated from slab by ACPI _DSM method, we need to
391 * copy the content because the memory buffer will be freed on return.
392 */
393 dmaru->hdr = (void *)(dmaru + 1);
394 memcpy(dmaru->hdr, header, header->length);
365 dmaru->reg_base_addr = drhd->address; 395 dmaru->reg_base_addr = drhd->address;
366 dmaru->segment = drhd->segment; 396 dmaru->segment = drhd->segment;
367 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */ 397 dmaru->include_all = drhd->flags & 0x1; /* BIT0: INCLUDE_ALL */
@@ -381,6 +411,11 @@ dmar_parse_one_drhd(struct acpi_dmar_header *header)
381 return ret; 411 return ret;
382 } 412 }
383 dmar_register_drhd_unit(dmaru); 413 dmar_register_drhd_unit(dmaru);
414
415out:
416 if (arg)
417 (*(int *)arg)++;
418
384 return 0; 419 return 0;
385} 420}
386 421
@@ -393,7 +428,8 @@ static void dmar_free_drhd(struct dmar_drhd_unit *dmaru)
393 kfree(dmaru); 428 kfree(dmaru);
394} 429}
395 430
396static int __init dmar_parse_one_andd(struct acpi_dmar_header *header) 431static int __init dmar_parse_one_andd(struct acpi_dmar_header *header,
432 void *arg)
397{ 433{
398 struct acpi_dmar_andd *andd = (void *)header; 434 struct acpi_dmar_andd *andd = (void *)header;
399 435
@@ -414,8 +450,7 @@ static int __init dmar_parse_one_andd(struct acpi_dmar_header *header)
414} 450}
415 451
416#ifdef CONFIG_ACPI_NUMA 452#ifdef CONFIG_ACPI_NUMA
417static int __init 453static int dmar_parse_one_rhsa(struct acpi_dmar_header *header, void *arg)
418dmar_parse_one_rhsa(struct acpi_dmar_header *header)
419{ 454{
420 struct acpi_dmar_rhsa *rhsa; 455 struct acpi_dmar_rhsa *rhsa;
421 struct dmar_drhd_unit *drhd; 456 struct dmar_drhd_unit *drhd;
@@ -442,6 +477,8 @@ dmar_parse_one_rhsa(struct acpi_dmar_header *header)
442 477
443 return 0; 478 return 0;
444} 479}
480#else
481#define dmar_parse_one_rhsa dmar_res_noop
445#endif 482#endif
446 483
447static void __init 484static void __init
@@ -503,6 +540,52 @@ static int __init dmar_table_detect(void)
503 return (ACPI_SUCCESS(status) ? 1 : 0); 540 return (ACPI_SUCCESS(status) ? 1 : 0);
504} 541}
505 542
543static int dmar_walk_remapping_entries(struct acpi_dmar_header *start,
544 size_t len, struct dmar_res_callback *cb)
545{
546 int ret = 0;
547 struct acpi_dmar_header *iter, *next;
548 struct acpi_dmar_header *end = ((void *)start) + len;
549
550 for (iter = start; iter < end && ret == 0; iter = next) {
551 next = (void *)iter + iter->length;
552 if (iter->length == 0) {
553 /* Avoid looping forever on bad ACPI tables */
554 pr_debug(FW_BUG "Invalid 0-length structure\n");
555 break;
556 } else if (next > end) {
557 /* Avoid passing table end */
558 pr_warn(FW_BUG "record passes table end\n");
559 ret = -EINVAL;
560 break;
561 }
562
563 if (cb->print_entry)
564 dmar_table_print_dmar_entry(iter);
565
566 if (iter->type >= ACPI_DMAR_TYPE_RESERVED) {
567 /* continue for forward compatibility */
568 pr_debug("Unknown DMAR structure type %d\n",
569 iter->type);
570 } else if (cb->cb[iter->type]) {
571 ret = cb->cb[iter->type](iter, cb->arg[iter->type]);
572 } else if (!cb->ignore_unhandled) {
573 pr_warn("No handler for DMAR structure type %d\n",
574 iter->type);
575 ret = -EINVAL;
576 }
577 }
578
579 return ret;
580}
581
582static inline int dmar_walk_dmar_table(struct acpi_table_dmar *dmar,
583 struct dmar_res_callback *cb)
584{
585 return dmar_walk_remapping_entries((void *)(dmar + 1),
586 dmar->header.length - sizeof(*dmar), cb);
587}
588
506/** 589/**
507 * parse_dmar_table - parses the DMA reporting table 590 * parse_dmar_table - parses the DMA reporting table
508 */ 591 */
@@ -510,9 +593,18 @@ static int __init
510parse_dmar_table(void) 593parse_dmar_table(void)
511{ 594{
512 struct acpi_table_dmar *dmar; 595 struct acpi_table_dmar *dmar;
513 struct acpi_dmar_header *entry_header;
514 int ret = 0; 596 int ret = 0;
515 int drhd_count = 0; 597 int drhd_count = 0;
598 struct dmar_res_callback cb = {
599 .print_entry = true,
600 .ignore_unhandled = true,
601 .arg[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &drhd_count,
602 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_parse_one_drhd,
603 .cb[ACPI_DMAR_TYPE_RESERVED_MEMORY] = &dmar_parse_one_rmrr,
604 .cb[ACPI_DMAR_TYPE_ROOT_ATS] = &dmar_parse_one_atsr,
605 .cb[ACPI_DMAR_TYPE_HARDWARE_AFFINITY] = &dmar_parse_one_rhsa,
606 .cb[ACPI_DMAR_TYPE_NAMESPACE] = &dmar_parse_one_andd,
607 };
516 608
517 /* 609 /*
518 * Do it again, earlier dmar_tbl mapping could be mapped with 610 * Do it again, earlier dmar_tbl mapping could be mapped with
@@ -536,51 +628,10 @@ parse_dmar_table(void)
536 } 628 }
537 629
538 pr_info("Host address width %d\n", dmar->width + 1); 630 pr_info("Host address width %d\n", dmar->width + 1);
539 631 ret = dmar_walk_dmar_table(dmar, &cb);
540 entry_header = (struct acpi_dmar_header *)(dmar + 1); 632 if (ret == 0 && drhd_count == 0)
541 while (((unsigned long)entry_header) <
542 (((unsigned long)dmar) + dmar_tbl->length)) {
543 /* Avoid looping forever on bad ACPI tables */
544 if (entry_header->length == 0) {
545 pr_warn("Invalid 0-length structure\n");
546 ret = -EINVAL;
547 break;
548 }
549
550 dmar_table_print_dmar_entry(entry_header);
551
552 switch (entry_header->type) {
553 case ACPI_DMAR_TYPE_HARDWARE_UNIT:
554 drhd_count++;
555 ret = dmar_parse_one_drhd(entry_header);
556 break;
557 case ACPI_DMAR_TYPE_RESERVED_MEMORY:
558 ret = dmar_parse_one_rmrr(entry_header);
559 break;
560 case ACPI_DMAR_TYPE_ROOT_ATS:
561 ret = dmar_parse_one_atsr(entry_header);
562 break;
563 case ACPI_DMAR_TYPE_HARDWARE_AFFINITY:
564#ifdef CONFIG_ACPI_NUMA
565 ret = dmar_parse_one_rhsa(entry_header);
566#endif
567 break;
568 case ACPI_DMAR_TYPE_NAMESPACE:
569 ret = dmar_parse_one_andd(entry_header);
570 break;
571 default:
572 pr_warn("Unknown DMAR structure type %d\n",
573 entry_header->type);
574 ret = 0; /* for forward compatibility */
575 break;
576 }
577 if (ret)
578 break;
579
580 entry_header = ((void *)entry_header + entry_header->length);
581 }
582 if (drhd_count == 0)
583 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n"); 633 pr_warn(FW_BUG "No DRHD structure found in DMAR table\n");
634
584 return ret; 635 return ret;
585} 636}
586 637
@@ -778,76 +829,68 @@ static void warn_invalid_dmar(u64 addr, const char *message)
778 dmi_get_system_info(DMI_PRODUCT_VERSION)); 829 dmi_get_system_info(DMI_PRODUCT_VERSION));
779} 830}
780 831
781static int __init check_zero_address(void) 832static int __ref
833dmar_validate_one_drhd(struct acpi_dmar_header *entry, void *arg)
782{ 834{
783 struct acpi_table_dmar *dmar;
784 struct acpi_dmar_header *entry_header;
785 struct acpi_dmar_hardware_unit *drhd; 835 struct acpi_dmar_hardware_unit *drhd;
836 void __iomem *addr;
837 u64 cap, ecap;
786 838
787 dmar = (struct acpi_table_dmar *)dmar_tbl; 839 drhd = (void *)entry;
788 entry_header = (struct acpi_dmar_header *)(dmar + 1); 840 if (!drhd->address) {
789 841 warn_invalid_dmar(0, "");
790 while (((unsigned long)entry_header) < 842 return -EINVAL;
791 (((unsigned long)dmar) + dmar_tbl->length)) { 843 }
792 /* Avoid looping forever on bad ACPI tables */
793 if (entry_header->length == 0) {
794 pr_warn("Invalid 0-length structure\n");
795 return 0;
796 }
797 844
798 if (entry_header->type == ACPI_DMAR_TYPE_HARDWARE_UNIT) { 845 if (arg)
799 void __iomem *addr; 846 addr = ioremap(drhd->address, VTD_PAGE_SIZE);
800 u64 cap, ecap; 847 else
848 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE);
849 if (!addr) {
850 pr_warn("IOMMU: can't validate: %llx\n", drhd->address);
851 return -EINVAL;
852 }
801 853
802 drhd = (void *)entry_header; 854 cap = dmar_readq(addr + DMAR_CAP_REG);
803 if (!drhd->address) { 855 ecap = dmar_readq(addr + DMAR_ECAP_REG);
804 warn_invalid_dmar(0, "");
805 goto failed;
806 }
807 856
808 addr = early_ioremap(drhd->address, VTD_PAGE_SIZE); 857 if (arg)
809 if (!addr ) { 858 iounmap(addr);
810 printk("IOMMU: can't validate: %llx\n", drhd->address); 859 else
811 goto failed; 860 early_iounmap(addr, VTD_PAGE_SIZE);
812 }
813 cap = dmar_readq(addr + DMAR_CAP_REG);
814 ecap = dmar_readq(addr + DMAR_ECAP_REG);
815 early_iounmap(addr, VTD_PAGE_SIZE);
816 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
817 warn_invalid_dmar(drhd->address,
818 " returns all ones");
819 goto failed;
820 }
821 }
822 861
823 entry_header = ((void *)entry_header + entry_header->length); 862 if (cap == (uint64_t)-1 && ecap == (uint64_t)-1) {
863 warn_invalid_dmar(drhd->address, " returns all ones");
864 return -EINVAL;
824 } 865 }
825 return 1;
826 866
827failed:
828 return 0; 867 return 0;
829} 868}
830 869
831int __init detect_intel_iommu(void) 870int __init detect_intel_iommu(void)
832{ 871{
833 int ret; 872 int ret;
873 struct dmar_res_callback validate_drhd_cb = {
874 .cb[ACPI_DMAR_TYPE_HARDWARE_UNIT] = &dmar_validate_one_drhd,
875 .ignore_unhandled = true,
876 };
834 877
835 down_write(&dmar_global_lock); 878 down_write(&dmar_global_lock);
836 ret = dmar_table_detect(); 879 ret = dmar_table_detect();
837 if (ret) 880 if (ret)
838 ret = check_zero_address(); 881 ret = !dmar_walk_dmar_table((struct acpi_table_dmar *)dmar_tbl,
839 { 882 &validate_drhd_cb);
840 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) { 883 if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
841 iommu_detected = 1; 884 iommu_detected = 1;
842 /* Make sure ACS will be enabled */ 885 /* Make sure ACS will be enabled */
843 pci_request_acs(); 886 pci_request_acs();
844 } 887 }
845 888
846#ifdef CONFIG_X86 889#ifdef CONFIG_X86
847 if (ret) 890 if (ret)
848 x86_init.iommu.iommu_init = intel_iommu_init; 891 x86_init.iommu.iommu_init = intel_iommu_init;
849#endif 892#endif
850 } 893
851 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size); 894 early_acpi_os_unmap_memory((void __iomem *)dmar_tbl, dmar_tbl_size);
852 dmar_tbl = NULL; 895 dmar_tbl = NULL;
853 up_write(&dmar_global_lock); 896 up_write(&dmar_global_lock);
@@ -931,11 +974,32 @@ out:
931 return err; 974 return err;
932} 975}
933 976
977static int dmar_alloc_seq_id(struct intel_iommu *iommu)
978{
979 iommu->seq_id = find_first_zero_bit(dmar_seq_ids,
980 DMAR_UNITS_SUPPORTED);
981 if (iommu->seq_id >= DMAR_UNITS_SUPPORTED) {
982 iommu->seq_id = -1;
983 } else {
984 set_bit(iommu->seq_id, dmar_seq_ids);
985 sprintf(iommu->name, "dmar%d", iommu->seq_id);
986 }
987
988 return iommu->seq_id;
989}
990
991static void dmar_free_seq_id(struct intel_iommu *iommu)
992{
993 if (iommu->seq_id >= 0) {
994 clear_bit(iommu->seq_id, dmar_seq_ids);
995 iommu->seq_id = -1;
996 }
997}
998
934static int alloc_iommu(struct dmar_drhd_unit *drhd) 999static int alloc_iommu(struct dmar_drhd_unit *drhd)
935{ 1000{
936 struct intel_iommu *iommu; 1001 struct intel_iommu *iommu;
937 u32 ver, sts; 1002 u32 ver, sts;
938 static int iommu_allocated = 0;
939 int agaw = 0; 1003 int agaw = 0;
940 int msagaw = 0; 1004 int msagaw = 0;
941 int err; 1005 int err;
@@ -949,13 +1013,16 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
949 if (!iommu) 1013 if (!iommu)
950 return -ENOMEM; 1014 return -ENOMEM;
951 1015
952 iommu->seq_id = iommu_allocated++; 1016 if (dmar_alloc_seq_id(iommu) < 0) {
953 sprintf (iommu->name, "dmar%d", iommu->seq_id); 1017 pr_err("IOMMU: failed to allocate seq_id\n");
1018 err = -ENOSPC;
1019 goto error;
1020 }
954 1021
955 err = map_iommu(iommu, drhd->reg_base_addr); 1022 err = map_iommu(iommu, drhd->reg_base_addr);
956 if (err) { 1023 if (err) {
957 pr_err("IOMMU: failed to map %s\n", iommu->name); 1024 pr_err("IOMMU: failed to map %s\n", iommu->name);
958 goto error; 1025 goto error_free_seq_id;
959 } 1026 }
960 1027
961 err = -EINVAL; 1028 err = -EINVAL;
@@ -1005,9 +1072,11 @@ static int alloc_iommu(struct dmar_drhd_unit *drhd)
1005 1072
1006 return 0; 1073 return 0;
1007 1074
1008 err_unmap: 1075err_unmap:
1009 unmap_iommu(iommu); 1076 unmap_iommu(iommu);
1010 error: 1077error_free_seq_id:
1078 dmar_free_seq_id(iommu);
1079error:
1011 kfree(iommu); 1080 kfree(iommu);
1012 return err; 1081 return err;
1013} 1082}
@@ -1031,6 +1100,7 @@ static void free_iommu(struct intel_iommu *iommu)
1031 if (iommu->reg) 1100 if (iommu->reg)
1032 unmap_iommu(iommu); 1101 unmap_iommu(iommu);
1033 1102
1103 dmar_free_seq_id(iommu);
1034 kfree(iommu); 1104 kfree(iommu);
1035} 1105}
1036 1106
@@ -1661,12 +1731,17 @@ int __init dmar_ir_support(void)
1661 return dmar->flags & 0x1; 1731 return dmar->flags & 0x1;
1662} 1732}
1663 1733
1734/* Check whether DMAR units are in use */
1735static inline bool dmar_in_use(void)
1736{
1737 return irq_remapping_enabled || intel_iommu_enabled;
1738}
1739
1664static int __init dmar_free_unused_resources(void) 1740static int __init dmar_free_unused_resources(void)
1665{ 1741{
1666 struct dmar_drhd_unit *dmaru, *dmaru_n; 1742 struct dmar_drhd_unit *dmaru, *dmaru_n;
1667 1743
1668 /* DMAR units are in use */ 1744 if (dmar_in_use())
1669 if (irq_remapping_enabled || intel_iommu_enabled)
1670 return 0; 1745 return 0;
1671 1746
1672 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units)) 1747 if (dmar_dev_scope_status != 1 && !list_empty(&dmar_drhd_units))
@@ -1684,3 +1759,242 @@ static int __init dmar_free_unused_resources(void)
1684 1759
1685late_initcall(dmar_free_unused_resources); 1760late_initcall(dmar_free_unused_resources);
1686IOMMU_INIT_POST(detect_intel_iommu); 1761IOMMU_INIT_POST(detect_intel_iommu);
1762
1763/*
1764 * DMAR Hotplug Support
1765 * For more details, please refer to Intel(R) Virtualization Technology
1766 * for Directed-IO Architecture Specifiction, Rev 2.2, Section 8.8
1767 * "Remapping Hardware Unit Hot Plug".
1768 */
1769static u8 dmar_hp_uuid[] = {
1770 /* 0000 */ 0xA6, 0xA3, 0xC1, 0xD8, 0x9B, 0xBE, 0x9B, 0x4C,
1771 /* 0008 */ 0x91, 0xBF, 0xC3, 0xCB, 0x81, 0xFC, 0x5D, 0xAF
1772};
1773
1774/*
1775 * Currently there's only one revision and BIOS will not check the revision id,
1776 * so use 0 for safety.
1777 */
1778#define DMAR_DSM_REV_ID 0
1779#define DMAR_DSM_FUNC_DRHD 1
1780#define DMAR_DSM_FUNC_ATSR 2
1781#define DMAR_DSM_FUNC_RHSA 3
1782
1783static inline bool dmar_detect_dsm(acpi_handle handle, int func)
1784{
1785 return acpi_check_dsm(handle, dmar_hp_uuid, DMAR_DSM_REV_ID, 1 << func);
1786}
1787
1788static int dmar_walk_dsm_resource(acpi_handle handle, int func,
1789 dmar_res_handler_t handler, void *arg)
1790{
1791 int ret = -ENODEV;
1792 union acpi_object *obj;
1793 struct acpi_dmar_header *start;
1794 struct dmar_res_callback callback;
1795 static int res_type[] = {
1796 [DMAR_DSM_FUNC_DRHD] = ACPI_DMAR_TYPE_HARDWARE_UNIT,
1797 [DMAR_DSM_FUNC_ATSR] = ACPI_DMAR_TYPE_ROOT_ATS,
1798 [DMAR_DSM_FUNC_RHSA] = ACPI_DMAR_TYPE_HARDWARE_AFFINITY,
1799 };
1800
1801 if (!dmar_detect_dsm(handle, func))
1802 return 0;
1803
1804 obj = acpi_evaluate_dsm_typed(handle, dmar_hp_uuid, DMAR_DSM_REV_ID,
1805 func, NULL, ACPI_TYPE_BUFFER);
1806 if (!obj)
1807 return -ENODEV;
1808
1809 memset(&callback, 0, sizeof(callback));
1810 callback.cb[res_type[func]] = handler;
1811 callback.arg[res_type[func]] = arg;
1812 start = (struct acpi_dmar_header *)obj->buffer.pointer;
1813 ret = dmar_walk_remapping_entries(start, obj->buffer.length, &callback);
1814
1815 ACPI_FREE(obj);
1816
1817 return ret;
1818}
1819
1820static int dmar_hp_add_drhd(struct acpi_dmar_header *header, void *arg)
1821{
1822 int ret;
1823 struct dmar_drhd_unit *dmaru;
1824
1825 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1826 if (!dmaru)
1827 return -ENODEV;
1828
1829 ret = dmar_ir_hotplug(dmaru, true);
1830 if (ret == 0)
1831 ret = dmar_iommu_hotplug(dmaru, true);
1832
1833 return ret;
1834}
1835
1836static int dmar_hp_remove_drhd(struct acpi_dmar_header *header, void *arg)
1837{
1838 int i, ret;
1839 struct device *dev;
1840 struct dmar_drhd_unit *dmaru;
1841
1842 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1843 if (!dmaru)
1844 return 0;
1845
1846 /*
1847 * All PCI devices managed by this unit should have been destroyed.
1848 */
1849 if (!dmaru->include_all && dmaru->devices && dmaru->devices_cnt)
1850 for_each_active_dev_scope(dmaru->devices,
1851 dmaru->devices_cnt, i, dev)
1852 return -EBUSY;
1853
1854 ret = dmar_ir_hotplug(dmaru, false);
1855 if (ret == 0)
1856 ret = dmar_iommu_hotplug(dmaru, false);
1857
1858 return ret;
1859}
1860
1861static int dmar_hp_release_drhd(struct acpi_dmar_header *header, void *arg)
1862{
1863 struct dmar_drhd_unit *dmaru;
1864
1865 dmaru = dmar_find_dmaru((struct acpi_dmar_hardware_unit *)header);
1866 if (dmaru) {
1867 list_del_rcu(&dmaru->list);
1868 synchronize_rcu();
1869 dmar_free_drhd(dmaru);
1870 }
1871
1872 return 0;
1873}
1874
1875static int dmar_hotplug_insert(acpi_handle handle)
1876{
1877 int ret;
1878 int drhd_count = 0;
1879
1880 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1881 &dmar_validate_one_drhd, (void *)1);
1882 if (ret)
1883 goto out;
1884
1885 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1886 &dmar_parse_one_drhd, (void *)&drhd_count);
1887 if (ret == 0 && drhd_count == 0) {
1888 pr_warn(FW_BUG "No DRHD structures in buffer returned by _DSM method\n");
1889 goto out;
1890 } else if (ret) {
1891 goto release_drhd;
1892 }
1893
1894 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_RHSA,
1895 &dmar_parse_one_rhsa, NULL);
1896 if (ret)
1897 goto release_drhd;
1898
1899 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1900 &dmar_parse_one_atsr, NULL);
1901 if (ret)
1902 goto release_atsr;
1903
1904 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1905 &dmar_hp_add_drhd, NULL);
1906 if (!ret)
1907 return 0;
1908
1909 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1910 &dmar_hp_remove_drhd, NULL);
1911release_atsr:
1912 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1913 &dmar_release_one_atsr, NULL);
1914release_drhd:
1915 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1916 &dmar_hp_release_drhd, NULL);
1917out:
1918 return ret;
1919}
1920
1921static int dmar_hotplug_remove(acpi_handle handle)
1922{
1923 int ret;
1924
1925 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1926 &dmar_check_one_atsr, NULL);
1927 if (ret)
1928 return ret;
1929
1930 ret = dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1931 &dmar_hp_remove_drhd, NULL);
1932 if (ret == 0) {
1933 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_ATSR,
1934 &dmar_release_one_atsr, NULL));
1935 WARN_ON(dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1936 &dmar_hp_release_drhd, NULL));
1937 } else {
1938 dmar_walk_dsm_resource(handle, DMAR_DSM_FUNC_DRHD,
1939 &dmar_hp_add_drhd, NULL);
1940 }
1941
1942 return ret;
1943}
1944
1945static acpi_status dmar_get_dsm_handle(acpi_handle handle, u32 lvl,
1946 void *context, void **retval)
1947{
1948 acpi_handle *phdl = retval;
1949
1950 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1951 *phdl = handle;
1952 return AE_CTRL_TERMINATE;
1953 }
1954
1955 return AE_OK;
1956}
1957
1958static int dmar_device_hotplug(acpi_handle handle, bool insert)
1959{
1960 int ret;
1961 acpi_handle tmp = NULL;
1962 acpi_status status;
1963
1964 if (!dmar_in_use())
1965 return 0;
1966
1967 if (dmar_detect_dsm(handle, DMAR_DSM_FUNC_DRHD)) {
1968 tmp = handle;
1969 } else {
1970 status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
1971 ACPI_UINT32_MAX,
1972 dmar_get_dsm_handle,
1973 NULL, NULL, &tmp);
1974 if (ACPI_FAILURE(status)) {
1975 pr_warn("Failed to locate _DSM method.\n");
1976 return -ENXIO;
1977 }
1978 }
1979 if (tmp == NULL)
1980 return 0;
1981
1982 down_write(&dmar_global_lock);
1983 if (insert)
1984 ret = dmar_hotplug_insert(tmp);
1985 else
1986 ret = dmar_hotplug_remove(tmp);
1987 up_write(&dmar_global_lock);
1988
1989 return ret;
1990}
1991
1992int dmar_device_add(acpi_handle handle)
1993{
1994 return dmar_device_hotplug(handle, true);
1995}
1996
1997int dmar_device_remove(acpi_handle handle)
1998{
1999 return dmar_device_hotplug(handle, false);
2000}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 02cd26a17fe0..1232336b960e 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -195,6 +195,7 @@ static inline void set_root_present(struct root_entry *root)
195} 195}
196static inline void set_root_value(struct root_entry *root, unsigned long value) 196static inline void set_root_value(struct root_entry *root, unsigned long value)
197{ 197{
198 root->val &= ~VTD_PAGE_MASK;
198 root->val |= value & VTD_PAGE_MASK; 199 root->val |= value & VTD_PAGE_MASK;
199} 200}
200 201
@@ -247,6 +248,7 @@ static inline void context_set_translation_type(struct context_entry *context,
247static inline void context_set_address_root(struct context_entry *context, 248static inline void context_set_address_root(struct context_entry *context,
248 unsigned long value) 249 unsigned long value)
249{ 250{
251 context->lo &= ~VTD_PAGE_MASK;
250 context->lo |= value & VTD_PAGE_MASK; 252 context->lo |= value & VTD_PAGE_MASK;
251} 253}
252 254
@@ -328,17 +330,10 @@ static int hw_pass_through = 1;
328/* si_domain contains mulitple devices */ 330/* si_domain contains mulitple devices */
329#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1) 331#define DOMAIN_FLAG_STATIC_IDENTITY (1 << 1)
330 332
331/* define the limit of IOMMUs supported in each domain */
332#ifdef CONFIG_X86
333# define IOMMU_UNITS_SUPPORTED MAX_IO_APICS
334#else
335# define IOMMU_UNITS_SUPPORTED 64
336#endif
337
338struct dmar_domain { 333struct dmar_domain {
339 int id; /* domain id */ 334 int id; /* domain id */
340 int nid; /* node id */ 335 int nid; /* node id */
341 DECLARE_BITMAP(iommu_bmp, IOMMU_UNITS_SUPPORTED); 336 DECLARE_BITMAP(iommu_bmp, DMAR_UNITS_SUPPORTED);
342 /* bitmap of iommus this domain uses*/ 337 /* bitmap of iommus this domain uses*/
343 338
344 struct list_head devices; /* all devices' list */ 339 struct list_head devices; /* all devices' list */
@@ -1132,8 +1127,11 @@ static int iommu_alloc_root_entry(struct intel_iommu *iommu)
1132 unsigned long flags; 1127 unsigned long flags;
1133 1128
1134 root = (struct root_entry *)alloc_pgtable_page(iommu->node); 1129 root = (struct root_entry *)alloc_pgtable_page(iommu->node);
1135 if (!root) 1130 if (!root) {
1131 pr_err("IOMMU: allocating root entry for %s failed\n",
1132 iommu->name);
1136 return -ENOMEM; 1133 return -ENOMEM;
1134 }
1137 1135
1138 __iommu_flush_cache(iommu, root, ROOT_SIZE); 1136 __iommu_flush_cache(iommu, root, ROOT_SIZE);
1139 1137
@@ -1473,7 +1471,7 @@ static int iommu_init_domains(struct intel_iommu *iommu)
1473 return 0; 1471 return 0;
1474} 1472}
1475 1473
1476static void free_dmar_iommu(struct intel_iommu *iommu) 1474static void disable_dmar_iommu(struct intel_iommu *iommu)
1477{ 1475{
1478 struct dmar_domain *domain; 1476 struct dmar_domain *domain;
1479 int i; 1477 int i;
@@ -1497,11 +1495,16 @@ static void free_dmar_iommu(struct intel_iommu *iommu)
1497 1495
1498 if (iommu->gcmd & DMA_GCMD_TE) 1496 if (iommu->gcmd & DMA_GCMD_TE)
1499 iommu_disable_translation(iommu); 1497 iommu_disable_translation(iommu);
1498}
1500 1499
1501 kfree(iommu->domains); 1500static void free_dmar_iommu(struct intel_iommu *iommu)
1502 kfree(iommu->domain_ids); 1501{
1503 iommu->domains = NULL; 1502 if ((iommu->domains) && (iommu->domain_ids)) {
1504 iommu->domain_ids = NULL; 1503 kfree(iommu->domains);
1504 kfree(iommu->domain_ids);
1505 iommu->domains = NULL;
1506 iommu->domain_ids = NULL;
1507 }
1505 1508
1506 g_iommus[iommu->seq_id] = NULL; 1509 g_iommus[iommu->seq_id] = NULL;
1507 1510
@@ -1983,7 +1986,7 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1983{ 1986{
1984 struct dma_pte *first_pte = NULL, *pte = NULL; 1987 struct dma_pte *first_pte = NULL, *pte = NULL;
1985 phys_addr_t uninitialized_var(pteval); 1988 phys_addr_t uninitialized_var(pteval);
1986 unsigned long sg_res; 1989 unsigned long sg_res = 0;
1987 unsigned int largepage_lvl = 0; 1990 unsigned int largepage_lvl = 0;
1988 unsigned long lvl_pages = 0; 1991 unsigned long lvl_pages = 0;
1989 1992
@@ -1994,10 +1997,8 @@ static int __domain_mapping(struct dmar_domain *domain, unsigned long iov_pfn,
1994 1997
1995 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP; 1998 prot &= DMA_PTE_READ | DMA_PTE_WRITE | DMA_PTE_SNP;
1996 1999
1997 if (sg) 2000 if (!sg) {
1998 sg_res = 0; 2001 sg_res = nr_pages;
1999 else {
2000 sg_res = nr_pages + 1;
2001 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot; 2002 pteval = ((phys_addr_t)phys_pfn << VTD_PAGE_SHIFT) | prot;
2002 } 2003 }
2003 2004
@@ -2708,6 +2709,41 @@ static int __init iommu_prepare_static_identity_mapping(int hw)
2708 return 0; 2709 return 0;
2709} 2710}
2710 2711
2712static void intel_iommu_init_qi(struct intel_iommu *iommu)
2713{
2714 /*
2715 * Start from the sane iommu hardware state.
2716 * If the queued invalidation is already initialized by us
2717 * (for example, while enabling interrupt-remapping) then
2718 * we got the things already rolling from a sane state.
2719 */
2720 if (!iommu->qi) {
2721 /*
2722 * Clear any previous faults.
2723 */
2724 dmar_fault(-1, iommu);
2725 /*
2726 * Disable queued invalidation if supported and already enabled
2727 * before OS handover.
2728 */
2729 dmar_disable_qi(iommu);
2730 }
2731
2732 if (dmar_enable_qi(iommu)) {
2733 /*
2734 * Queued Invalidate not enabled, use Register Based Invalidate
2735 */
2736 iommu->flush.flush_context = __iommu_flush_context;
2737 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2738 pr_info("IOMMU: %s using Register based invalidation\n",
2739 iommu->name);
2740 } else {
2741 iommu->flush.flush_context = qi_flush_context;
2742 iommu->flush.flush_iotlb = qi_flush_iotlb;
2743 pr_info("IOMMU: %s using Queued invalidation\n", iommu->name);
2744 }
2745}
2746
2711static int __init init_dmars(void) 2747static int __init init_dmars(void)
2712{ 2748{
2713 struct dmar_drhd_unit *drhd; 2749 struct dmar_drhd_unit *drhd;
@@ -2728,14 +2764,18 @@ static int __init init_dmars(void)
2728 * threaded kernel __init code path all other access are read 2764 * threaded kernel __init code path all other access are read
2729 * only 2765 * only
2730 */ 2766 */
2731 if (g_num_of_iommus < IOMMU_UNITS_SUPPORTED) { 2767 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED) {
2732 g_num_of_iommus++; 2768 g_num_of_iommus++;
2733 continue; 2769 continue;
2734 } 2770 }
2735 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n", 2771 printk_once(KERN_ERR "intel-iommu: exceeded %d IOMMUs\n",
2736 IOMMU_UNITS_SUPPORTED); 2772 DMAR_UNITS_SUPPORTED);
2737 } 2773 }
2738 2774
2775 /* Preallocate enough resources for IOMMU hot-addition */
2776 if (g_num_of_iommus < DMAR_UNITS_SUPPORTED)
2777 g_num_of_iommus = DMAR_UNITS_SUPPORTED;
2778
2739 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *), 2779 g_iommus = kcalloc(g_num_of_iommus, sizeof(struct intel_iommu *),
2740 GFP_KERNEL); 2780 GFP_KERNEL);
2741 if (!g_iommus) { 2781 if (!g_iommus) {
@@ -2764,58 +2804,14 @@ static int __init init_dmars(void)
2764 * among all IOMMU's. Need to Split it later. 2804 * among all IOMMU's. Need to Split it later.
2765 */ 2805 */
2766 ret = iommu_alloc_root_entry(iommu); 2806 ret = iommu_alloc_root_entry(iommu);
2767 if (ret) { 2807 if (ret)
2768 printk(KERN_ERR "IOMMU: allocate root entry failed\n");
2769 goto free_iommu; 2808 goto free_iommu;
2770 }
2771 if (!ecap_pass_through(iommu->ecap)) 2809 if (!ecap_pass_through(iommu->ecap))
2772 hw_pass_through = 0; 2810 hw_pass_through = 0;
2773 } 2811 }
2774 2812
2775 /* 2813 for_each_active_iommu(iommu, drhd)
2776 * Start from the sane iommu hardware state. 2814 intel_iommu_init_qi(iommu);
2777 */
2778 for_each_active_iommu(iommu, drhd) {
2779 /*
2780 * If the queued invalidation is already initialized by us
2781 * (for example, while enabling interrupt-remapping) then
2782 * we got the things already rolling from a sane state.
2783 */
2784 if (iommu->qi)
2785 continue;
2786
2787 /*
2788 * Clear any previous faults.
2789 */
2790 dmar_fault(-1, iommu);
2791 /*
2792 * Disable queued invalidation if supported and already enabled
2793 * before OS handover.
2794 */
2795 dmar_disable_qi(iommu);
2796 }
2797
2798 for_each_active_iommu(iommu, drhd) {
2799 if (dmar_enable_qi(iommu)) {
2800 /*
2801 * Queued Invalidate not enabled, use Register Based
2802 * Invalidate
2803 */
2804 iommu->flush.flush_context = __iommu_flush_context;
2805 iommu->flush.flush_iotlb = __iommu_flush_iotlb;
2806 printk(KERN_INFO "IOMMU %d 0x%Lx: using Register based "
2807 "invalidation\n",
2808 iommu->seq_id,
2809 (unsigned long long)drhd->reg_base_addr);
2810 } else {
2811 iommu->flush.flush_context = qi_flush_context;
2812 iommu->flush.flush_iotlb = qi_flush_iotlb;
2813 printk(KERN_INFO "IOMMU %d 0x%Lx: using Queued "
2814 "invalidation\n",
2815 iommu->seq_id,
2816 (unsigned long long)drhd->reg_base_addr);
2817 }
2818 }
2819 2815
2820 if (iommu_pass_through) 2816 if (iommu_pass_through)
2821 iommu_identity_mapping |= IDENTMAP_ALL; 2817 iommu_identity_mapping |= IDENTMAP_ALL;
@@ -2901,8 +2897,10 @@ static int __init init_dmars(void)
2901 return 0; 2897 return 0;
2902 2898
2903free_iommu: 2899free_iommu:
2904 for_each_active_iommu(iommu, drhd) 2900 for_each_active_iommu(iommu, drhd) {
2901 disable_dmar_iommu(iommu);
2905 free_dmar_iommu(iommu); 2902 free_dmar_iommu(iommu);
2903 }
2906 kfree(deferred_flush); 2904 kfree(deferred_flush);
2907free_g_iommus: 2905free_g_iommus:
2908 kfree(g_iommus); 2906 kfree(g_iommus);
@@ -3682,7 +3680,7 @@ static inline void init_iommu_pm_ops(void) {}
3682#endif /* CONFIG_PM */ 3680#endif /* CONFIG_PM */
3683 3681
3684 3682
3685int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header) 3683int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg)
3686{ 3684{
3687 struct acpi_dmar_reserved_memory *rmrr; 3685 struct acpi_dmar_reserved_memory *rmrr;
3688 struct dmar_rmrr_unit *rmrru; 3686 struct dmar_rmrr_unit *rmrru;
@@ -3708,17 +3706,48 @@ int __init dmar_parse_one_rmrr(struct acpi_dmar_header *header)
3708 return 0; 3706 return 0;
3709} 3707}
3710 3708
3711int __init dmar_parse_one_atsr(struct acpi_dmar_header *hdr) 3709static struct dmar_atsr_unit *dmar_find_atsr(struct acpi_dmar_atsr *atsr)
3710{
3711 struct dmar_atsr_unit *atsru;
3712 struct acpi_dmar_atsr *tmp;
3713
3714 list_for_each_entry_rcu(atsru, &dmar_atsr_units, list) {
3715 tmp = (struct acpi_dmar_atsr *)atsru->hdr;
3716 if (atsr->segment != tmp->segment)
3717 continue;
3718 if (atsr->header.length != tmp->header.length)
3719 continue;
3720 if (memcmp(atsr, tmp, atsr->header.length) == 0)
3721 return atsru;
3722 }
3723
3724 return NULL;
3725}
3726
3727int dmar_parse_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3712{ 3728{
3713 struct acpi_dmar_atsr *atsr; 3729 struct acpi_dmar_atsr *atsr;
3714 struct dmar_atsr_unit *atsru; 3730 struct dmar_atsr_unit *atsru;
3715 3731
3732 if (system_state != SYSTEM_BOOTING && !intel_iommu_enabled)
3733 return 0;
3734
3716 atsr = container_of(hdr, struct acpi_dmar_atsr, header); 3735 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3717 atsru = kzalloc(sizeof(*atsru), GFP_KERNEL); 3736 atsru = dmar_find_atsr(atsr);
3737 if (atsru)
3738 return 0;
3739
3740 atsru = kzalloc(sizeof(*atsru) + hdr->length, GFP_KERNEL);
3718 if (!atsru) 3741 if (!atsru)
3719 return -ENOMEM; 3742 return -ENOMEM;
3720 3743
3721 atsru->hdr = hdr; 3744 /*
3745 * If memory is allocated from slab by ACPI _DSM method, we need to
3746 * copy the memory content because the memory buffer will be freed
3747 * on return.
3748 */
3749 atsru->hdr = (void *)(atsru + 1);
3750 memcpy(atsru->hdr, hdr, hdr->length);
3722 atsru->include_all = atsr->flags & 0x1; 3751 atsru->include_all = atsr->flags & 0x1;
3723 if (!atsru->include_all) { 3752 if (!atsru->include_all) {
3724 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1), 3753 atsru->devices = dmar_alloc_dev_scope((void *)(atsr + 1),
@@ -3741,6 +3770,138 @@ static void intel_iommu_free_atsr(struct dmar_atsr_unit *atsru)
3741 kfree(atsru); 3770 kfree(atsru);
3742} 3771}
3743 3772
3773int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3774{
3775 struct acpi_dmar_atsr *atsr;
3776 struct dmar_atsr_unit *atsru;
3777
3778 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3779 atsru = dmar_find_atsr(atsr);
3780 if (atsru) {
3781 list_del_rcu(&atsru->list);
3782 synchronize_rcu();
3783 intel_iommu_free_atsr(atsru);
3784 }
3785
3786 return 0;
3787}
3788
3789int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg)
3790{
3791 int i;
3792 struct device *dev;
3793 struct acpi_dmar_atsr *atsr;
3794 struct dmar_atsr_unit *atsru;
3795
3796 atsr = container_of(hdr, struct acpi_dmar_atsr, header);
3797 atsru = dmar_find_atsr(atsr);
3798 if (!atsru)
3799 return 0;
3800
3801 if (!atsru->include_all && atsru->devices && atsru->devices_cnt)
3802 for_each_active_dev_scope(atsru->devices, atsru->devices_cnt,
3803 i, dev)
3804 return -EBUSY;
3805
3806 return 0;
3807}
3808
3809static int intel_iommu_add(struct dmar_drhd_unit *dmaru)
3810{
3811 int sp, ret = 0;
3812 struct intel_iommu *iommu = dmaru->iommu;
3813
3814 if (g_iommus[iommu->seq_id])
3815 return 0;
3816
3817 if (hw_pass_through && !ecap_pass_through(iommu->ecap)) {
3818 pr_warn("IOMMU: %s doesn't support hardware pass through.\n",
3819 iommu->name);
3820 return -ENXIO;
3821 }
3822 if (!ecap_sc_support(iommu->ecap) &&
3823 domain_update_iommu_snooping(iommu)) {
3824 pr_warn("IOMMU: %s doesn't support snooping.\n",
3825 iommu->name);
3826 return -ENXIO;
3827 }
3828 sp = domain_update_iommu_superpage(iommu) - 1;
3829 if (sp >= 0 && !(cap_super_page_val(iommu->cap) & (1 << sp))) {
3830 pr_warn("IOMMU: %s doesn't support large page.\n",
3831 iommu->name);
3832 return -ENXIO;
3833 }
3834
3835 /*
3836 * Disable translation if already enabled prior to OS handover.
3837 */
3838 if (iommu->gcmd & DMA_GCMD_TE)
3839 iommu_disable_translation(iommu);
3840
3841 g_iommus[iommu->seq_id] = iommu;
3842 ret = iommu_init_domains(iommu);
3843 if (ret == 0)
3844 ret = iommu_alloc_root_entry(iommu);
3845 if (ret)
3846 goto out;
3847
3848 if (dmaru->ignored) {
3849 /*
3850 * we always have to disable PMRs or DMA may fail on this device
3851 */
3852 if (force_on)
3853 iommu_disable_protect_mem_regions(iommu);
3854 return 0;
3855 }
3856
3857 intel_iommu_init_qi(iommu);
3858 iommu_flush_write_buffer(iommu);
3859 ret = dmar_set_interrupt(iommu);
3860 if (ret)
3861 goto disable_iommu;
3862
3863 iommu_set_root_entry(iommu);
3864 iommu->flush.flush_context(iommu, 0, 0, 0, DMA_CCMD_GLOBAL_INVL);
3865 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
3866 iommu_enable_translation(iommu);
3867
3868 if (si_domain) {
3869 ret = iommu_attach_domain(si_domain, iommu);
3870 if (ret < 0 || si_domain->id != ret)
3871 goto disable_iommu;
3872 domain_attach_iommu(si_domain, iommu);
3873 }
3874
3875 iommu_disable_protect_mem_regions(iommu);
3876 return 0;
3877
3878disable_iommu:
3879 disable_dmar_iommu(iommu);
3880out:
3881 free_dmar_iommu(iommu);
3882 return ret;
3883}
3884
3885int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
3886{
3887 int ret = 0;
3888 struct intel_iommu *iommu = dmaru->iommu;
3889
3890 if (!intel_iommu_enabled)
3891 return 0;
3892 if (iommu == NULL)
3893 return -EINVAL;
3894
3895 if (insert) {
3896 ret = intel_iommu_add(dmaru);
3897 } else {
3898 disable_dmar_iommu(iommu);
3899 free_dmar_iommu(iommu);
3900 }
3901
3902 return ret;
3903}
3904
3744static void intel_iommu_free_dmars(void) 3905static void intel_iommu_free_dmars(void)
3745{ 3906{
3746 struct dmar_rmrr_unit *rmrru, *rmrr_n; 3907 struct dmar_rmrr_unit *rmrru, *rmrr_n;
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 7c80661b35c1..27541d440849 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -36,7 +36,6 @@ struct hpet_scope {
36 36
37static struct ioapic_scope ir_ioapic[MAX_IO_APICS]; 37static struct ioapic_scope ir_ioapic[MAX_IO_APICS];
38static struct hpet_scope ir_hpet[MAX_HPET_TBS]; 38static struct hpet_scope ir_hpet[MAX_HPET_TBS];
39static int ir_ioapic_num, ir_hpet_num;
40 39
41/* 40/*
42 * Lock ordering: 41 * Lock ordering:
@@ -206,7 +205,7 @@ static struct intel_iommu *map_hpet_to_ir(u8 hpet_id)
206 int i; 205 int i;
207 206
208 for (i = 0; i < MAX_HPET_TBS; i++) 207 for (i = 0; i < MAX_HPET_TBS; i++)
209 if (ir_hpet[i].id == hpet_id) 208 if (ir_hpet[i].id == hpet_id && ir_hpet[i].iommu)
210 return ir_hpet[i].iommu; 209 return ir_hpet[i].iommu;
211 return NULL; 210 return NULL;
212} 211}
@@ -216,7 +215,7 @@ static struct intel_iommu *map_ioapic_to_ir(int apic)
216 int i; 215 int i;
217 216
218 for (i = 0; i < MAX_IO_APICS; i++) 217 for (i = 0; i < MAX_IO_APICS; i++)
219 if (ir_ioapic[i].id == apic) 218 if (ir_ioapic[i].id == apic && ir_ioapic[i].iommu)
220 return ir_ioapic[i].iommu; 219 return ir_ioapic[i].iommu;
221 return NULL; 220 return NULL;
222} 221}
@@ -325,7 +324,7 @@ static int set_ioapic_sid(struct irte *irte, int apic)
325 324
326 down_read(&dmar_global_lock); 325 down_read(&dmar_global_lock);
327 for (i = 0; i < MAX_IO_APICS; i++) { 326 for (i = 0; i < MAX_IO_APICS; i++) {
328 if (ir_ioapic[i].id == apic) { 327 if (ir_ioapic[i].iommu && ir_ioapic[i].id == apic) {
329 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn; 328 sid = (ir_ioapic[i].bus << 8) | ir_ioapic[i].devfn;
330 break; 329 break;
331 } 330 }
@@ -352,7 +351,7 @@ static int set_hpet_sid(struct irte *irte, u8 id)
352 351
353 down_read(&dmar_global_lock); 352 down_read(&dmar_global_lock);
354 for (i = 0; i < MAX_HPET_TBS; i++) { 353 for (i = 0; i < MAX_HPET_TBS; i++) {
355 if (ir_hpet[i].id == id) { 354 if (ir_hpet[i].iommu && ir_hpet[i].id == id) {
356 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn; 355 sid = (ir_hpet[i].bus << 8) | ir_hpet[i].devfn;
357 break; 356 break;
358 } 357 }
@@ -473,17 +472,17 @@ static void iommu_set_irq_remapping(struct intel_iommu *iommu, int mode)
473 raw_spin_unlock_irqrestore(&iommu->register_lock, flags); 472 raw_spin_unlock_irqrestore(&iommu->register_lock, flags);
474} 473}
475 474
476 475static int intel_setup_irq_remapping(struct intel_iommu *iommu)
477static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
478{ 476{
479 struct ir_table *ir_table; 477 struct ir_table *ir_table;
480 struct page *pages; 478 struct page *pages;
481 unsigned long *bitmap; 479 unsigned long *bitmap;
482 480
483 ir_table = iommu->ir_table = kzalloc(sizeof(struct ir_table), 481 if (iommu->ir_table)
484 GFP_ATOMIC); 482 return 0;
485 483
486 if (!iommu->ir_table) 484 ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC);
485 if (!ir_table)
487 return -ENOMEM; 486 return -ENOMEM;
488 487
489 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, 488 pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO,
@@ -492,24 +491,37 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu, int mode)
492 if (!pages) { 491 if (!pages) {
493 pr_err("IR%d: failed to allocate pages of order %d\n", 492 pr_err("IR%d: failed to allocate pages of order %d\n",
494 iommu->seq_id, INTR_REMAP_PAGE_ORDER); 493 iommu->seq_id, INTR_REMAP_PAGE_ORDER);
495 kfree(iommu->ir_table); 494 goto out_free_table;
496 return -ENOMEM;
497 } 495 }
498 496
499 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES), 497 bitmap = kcalloc(BITS_TO_LONGS(INTR_REMAP_TABLE_ENTRIES),
500 sizeof(long), GFP_ATOMIC); 498 sizeof(long), GFP_ATOMIC);
501 if (bitmap == NULL) { 499 if (bitmap == NULL) {
502 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id); 500 pr_err("IR%d: failed to allocate bitmap\n", iommu->seq_id);
503 __free_pages(pages, INTR_REMAP_PAGE_ORDER); 501 goto out_free_pages;
504 kfree(ir_table);
505 return -ENOMEM;
506 } 502 }
507 503
508 ir_table->base = page_address(pages); 504 ir_table->base = page_address(pages);
509 ir_table->bitmap = bitmap; 505 ir_table->bitmap = bitmap;
510 506 iommu->ir_table = ir_table;
511 iommu_set_irq_remapping(iommu, mode);
512 return 0; 507 return 0;
508
509out_free_pages:
510 __free_pages(pages, INTR_REMAP_PAGE_ORDER);
511out_free_table:
512 kfree(ir_table);
513 return -ENOMEM;
514}
515
516static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
517{
518 if (iommu && iommu->ir_table) {
519 free_pages((unsigned long)iommu->ir_table->base,
520 INTR_REMAP_PAGE_ORDER);
521 kfree(iommu->ir_table->bitmap);
522 kfree(iommu->ir_table);
523 iommu->ir_table = NULL;
524 }
513} 525}
514 526
515/* 527/*
@@ -666,9 +678,10 @@ static int __init intel_enable_irq_remapping(void)
666 if (!ecap_ir_support(iommu->ecap)) 678 if (!ecap_ir_support(iommu->ecap))
667 continue; 679 continue;
668 680
669 if (intel_setup_irq_remapping(iommu, eim)) 681 if (intel_setup_irq_remapping(iommu))
670 goto error; 682 goto error;
671 683
684 iommu_set_irq_remapping(iommu, eim);
672 setup = 1; 685 setup = 1;
673 } 686 }
674 687
@@ -689,9 +702,11 @@ static int __init intel_enable_irq_remapping(void)
689 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; 702 return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE;
690 703
691error: 704error:
692 /* 705 for_each_iommu(iommu, drhd)
693 * handle error condition gracefully here! 706 if (ecap_ir_support(iommu->ecap)) {
694 */ 707 iommu_disable_irq_remapping(iommu);
708 intel_teardown_irq_remapping(iommu);
709 }
695 710
696 if (x2apic_present) 711 if (x2apic_present)
697 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); 712 pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n");
@@ -699,12 +714,13 @@ error:
699 return -1; 714 return -1;
700} 715}
701 716
702static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope, 717static int ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
703 struct intel_iommu *iommu) 718 struct intel_iommu *iommu,
719 struct acpi_dmar_hardware_unit *drhd)
704{ 720{
705 struct acpi_dmar_pci_path *path; 721 struct acpi_dmar_pci_path *path;
706 u8 bus; 722 u8 bus;
707 int count; 723 int count, free = -1;
708 724
709 bus = scope->bus; 725 bus = scope->bus;
710 path = (struct acpi_dmar_pci_path *)(scope + 1); 726 path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -720,19 +736,36 @@ static void ir_parse_one_hpet_scope(struct acpi_dmar_device_scope *scope,
720 PCI_SECONDARY_BUS); 736 PCI_SECONDARY_BUS);
721 path++; 737 path++;
722 } 738 }
723 ir_hpet[ir_hpet_num].bus = bus; 739
724 ir_hpet[ir_hpet_num].devfn = PCI_DEVFN(path->device, path->function); 740 for (count = 0; count < MAX_HPET_TBS; count++) {
725 ir_hpet[ir_hpet_num].iommu = iommu; 741 if (ir_hpet[count].iommu == iommu &&
726 ir_hpet[ir_hpet_num].id = scope->enumeration_id; 742 ir_hpet[count].id == scope->enumeration_id)
727 ir_hpet_num++; 743 return 0;
744 else if (ir_hpet[count].iommu == NULL && free == -1)
745 free = count;
746 }
747 if (free == -1) {
748 pr_warn("Exceeded Max HPET blocks\n");
749 return -ENOSPC;
750 }
751
752 ir_hpet[free].iommu = iommu;
753 ir_hpet[free].id = scope->enumeration_id;
754 ir_hpet[free].bus = bus;
755 ir_hpet[free].devfn = PCI_DEVFN(path->device, path->function);
756 pr_info("HPET id %d under DRHD base 0x%Lx\n",
757 scope->enumeration_id, drhd->address);
758
759 return 0;
728} 760}
729 761
730static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope, 762static int ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
731 struct intel_iommu *iommu) 763 struct intel_iommu *iommu,
764 struct acpi_dmar_hardware_unit *drhd)
732{ 765{
733 struct acpi_dmar_pci_path *path; 766 struct acpi_dmar_pci_path *path;
734 u8 bus; 767 u8 bus;
735 int count; 768 int count, free = -1;
736 769
737 bus = scope->bus; 770 bus = scope->bus;
738 path = (struct acpi_dmar_pci_path *)(scope + 1); 771 path = (struct acpi_dmar_pci_path *)(scope + 1);
@@ -749,54 +782,63 @@ static void ir_parse_one_ioapic_scope(struct acpi_dmar_device_scope *scope,
749 path++; 782 path++;
750 } 783 }
751 784
752 ir_ioapic[ir_ioapic_num].bus = bus; 785 for (count = 0; count < MAX_IO_APICS; count++) {
753 ir_ioapic[ir_ioapic_num].devfn = PCI_DEVFN(path->device, path->function); 786 if (ir_ioapic[count].iommu == iommu &&
754 ir_ioapic[ir_ioapic_num].iommu = iommu; 787 ir_ioapic[count].id == scope->enumeration_id)
755 ir_ioapic[ir_ioapic_num].id = scope->enumeration_id; 788 return 0;
756 ir_ioapic_num++; 789 else if (ir_ioapic[count].iommu == NULL && free == -1)
790 free = count;
791 }
792 if (free == -1) {
793 pr_warn("Exceeded Max IO APICS\n");
794 return -ENOSPC;
795 }
796
797 ir_ioapic[free].bus = bus;
798 ir_ioapic[free].devfn = PCI_DEVFN(path->device, path->function);
799 ir_ioapic[free].iommu = iommu;
800 ir_ioapic[free].id = scope->enumeration_id;
801 pr_info("IOAPIC id %d under DRHD base 0x%Lx IOMMU %d\n",
802 scope->enumeration_id, drhd->address, iommu->seq_id);
803
804 return 0;
757} 805}
758 806
759static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header, 807static int ir_parse_ioapic_hpet_scope(struct acpi_dmar_header *header,
760 struct intel_iommu *iommu) 808 struct intel_iommu *iommu)
761{ 809{
810 int ret = 0;
762 struct acpi_dmar_hardware_unit *drhd; 811 struct acpi_dmar_hardware_unit *drhd;
763 struct acpi_dmar_device_scope *scope; 812 struct acpi_dmar_device_scope *scope;
764 void *start, *end; 813 void *start, *end;
765 814
766 drhd = (struct acpi_dmar_hardware_unit *)header; 815 drhd = (struct acpi_dmar_hardware_unit *)header;
767
768 start = (void *)(drhd + 1); 816 start = (void *)(drhd + 1);
769 end = ((void *)drhd) + header->length; 817 end = ((void *)drhd) + header->length;
770 818
771 while (start < end) { 819 while (start < end && ret == 0) {
772 scope = start; 820 scope = start;
773 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC) { 821 if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_IOAPIC)
774 if (ir_ioapic_num == MAX_IO_APICS) { 822 ret = ir_parse_one_ioapic_scope(scope, iommu, drhd);
775 printk(KERN_WARNING "Exceeded Max IO APICS\n"); 823 else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET)
776 return -1; 824 ret = ir_parse_one_hpet_scope(scope, iommu, drhd);
777 } 825 start += scope->length;
778 826 }
779 printk(KERN_INFO "IOAPIC id %d under DRHD base "
780 " 0x%Lx IOMMU %d\n", scope->enumeration_id,
781 drhd->address, iommu->seq_id);
782 827
783 ir_parse_one_ioapic_scope(scope, iommu); 828 return ret;
784 } else if (scope->entry_type == ACPI_DMAR_SCOPE_TYPE_HPET) { 829}
785 if (ir_hpet_num == MAX_HPET_TBS) {
786 printk(KERN_WARNING "Exceeded Max HPET blocks\n");
787 return -1;
788 }
789 830
790 printk(KERN_INFO "HPET id %d under DRHD base" 831static void ir_remove_ioapic_hpet_scope(struct intel_iommu *iommu)
791 " 0x%Lx\n", scope->enumeration_id, 832{
792 drhd->address); 833 int i;
793 834
794 ir_parse_one_hpet_scope(scope, iommu); 835 for (i = 0; i < MAX_HPET_TBS; i++)
795 } 836 if (ir_hpet[i].iommu == iommu)
796 start += scope->length; 837 ir_hpet[i].iommu = NULL;
797 }
798 838
799 return 0; 839 for (i = 0; i < MAX_IO_APICS; i++)
840 if (ir_ioapic[i].iommu == iommu)
841 ir_ioapic[i].iommu = NULL;
800} 842}
801 843
802/* 844/*
@@ -1171,3 +1213,86 @@ struct irq_remap_ops intel_irq_remap_ops = {
1171 .msi_setup_irq = intel_msi_setup_irq, 1213 .msi_setup_irq = intel_msi_setup_irq,
1172 .alloc_hpet_msi = intel_alloc_hpet_msi, 1214 .alloc_hpet_msi = intel_alloc_hpet_msi,
1173}; 1215};
1216
1217/*
1218 * Support of Interrupt Remapping Unit Hotplug
1219 */
1220static int dmar_ir_add(struct dmar_drhd_unit *dmaru, struct intel_iommu *iommu)
1221{
1222 int ret;
1223 int eim = x2apic_enabled();
1224
1225 if (eim && !ecap_eim_support(iommu->ecap)) {
1226 pr_info("DRHD %Lx: EIM not supported by DRHD, ecap %Lx\n",
1227 iommu->reg_phys, iommu->ecap);
1228 return -ENODEV;
1229 }
1230
1231 if (ir_parse_ioapic_hpet_scope(dmaru->hdr, iommu)) {
1232 pr_warn("DRHD %Lx: failed to parse managed IOAPIC/HPET\n",
1233 iommu->reg_phys);
1234 return -ENODEV;
1235 }
1236
1237 /* TODO: check all IOAPICs are covered by IOMMU */
1238
1239 /* Setup Interrupt-remapping now. */
1240 ret = intel_setup_irq_remapping(iommu);
1241 if (ret) {
1242 pr_err("DRHD %Lx: failed to allocate resource\n",
1243 iommu->reg_phys);
1244 ir_remove_ioapic_hpet_scope(iommu);
1245 return ret;
1246 }
1247
1248 if (!iommu->qi) {
1249 /* Clear previous faults. */
1250 dmar_fault(-1, iommu);
1251 iommu_disable_irq_remapping(iommu);
1252 dmar_disable_qi(iommu);
1253 }
1254
1255 /* Enable queued invalidation */
1256 ret = dmar_enable_qi(iommu);
1257 if (!ret) {
1258 iommu_set_irq_remapping(iommu, eim);
1259 } else {
1260 pr_err("DRHD %Lx: failed to enable queued invalidation, ecap %Lx, ret %d\n",
1261 iommu->reg_phys, iommu->ecap, ret);
1262 intel_teardown_irq_remapping(iommu);
1263 ir_remove_ioapic_hpet_scope(iommu);
1264 }
1265
1266 return ret;
1267}
1268
1269int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
1270{
1271 int ret = 0;
1272 struct intel_iommu *iommu = dmaru->iommu;
1273
1274 if (!irq_remapping_enabled)
1275 return 0;
1276 if (iommu == NULL)
1277 return -EINVAL;
1278 if (!ecap_ir_support(iommu->ecap))
1279 return 0;
1280
1281 if (insert) {
1282 if (!iommu->ir_table)
1283 ret = dmar_ir_add(dmaru, iommu);
1284 } else {
1285 if (iommu->ir_table) {
1286 if (!bitmap_empty(iommu->ir_table->bitmap,
1287 INTR_REMAP_TABLE_ENTRIES)) {
1288 ret = -EBUSY;
1289 } else {
1290 iommu_disable_irq_remapping(iommu);
1291 intel_teardown_irq_remapping(iommu);
1292 ir_remove_ioapic_hpet_scope(iommu);
1293 }
1294 }
1295 }
1296
1297 return ret;
1298}
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 02e4313e937c..1bd63352ab17 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -1143,14 +1143,24 @@ size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
1143{ 1143{
1144 struct scatterlist *s; 1144 struct scatterlist *s;
1145 size_t mapped = 0; 1145 size_t mapped = 0;
1146 unsigned int i; 1146 unsigned int i, min_pagesz;
1147 int ret; 1147 int ret;
1148 1148
1149 for_each_sg(sg, s, nents, i) { 1149 if (unlikely(domain->ops->pgsize_bitmap == 0UL))
1150 phys_addr_t phys = page_to_phys(sg_page(s)); 1150 return 0;
1151 1151
1152 /* We are mapping on page boundarys, so offset must be 0 */ 1152 min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap);
1153 if (s->offset) 1153
1154 for_each_sg(sg, s, nents, i) {
1155 phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset;
1156
1157 /*
1158 * We are mapping on IOMMU page boundaries, so offset within
1159 * the page must be 0. However, the IOMMU may support pages
1160 * smaller than PAGE_SIZE, so s->offset may still represent
1161 * an offset of that boundary within the CPU page.
1162 */
1163 if (!IS_ALIGNED(s->offset, min_pagesz))
1154 goto out_err; 1164 goto out_err;
1155 1165
1156 ret = iommu_map(domain, iova + mapped, phys, s->length, prot); 1166 ret = iommu_map(domain, iova + mapped, phys, s->length, prot);
diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c
index e509c58eee92..99effbb17191 100644
--- a/drivers/iommu/ipmmu-vmsa.c
+++ b/drivers/iommu/ipmmu-vmsa.c
@@ -1185,7 +1185,7 @@ static int ipmmu_probe(struct platform_device *pdev)
1185 dev_name(&pdev->dev), mmu); 1185 dev_name(&pdev->dev), mmu);
1186 if (ret < 0) { 1186 if (ret < 0) {
1187 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq); 1187 dev_err(&pdev->dev, "failed to request IRQ %d\n", irq);
1188 return irq; 1188 return ret;
1189 } 1189 }
1190 1190
1191 ipmmu_device_reset(mmu); 1191 ipmmu_device_reset(mmu);
diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c
index 1c7b78ecf3e3..e1b05379ca0e 100644
--- a/drivers/iommu/msm_iommu.c
+++ b/drivers/iommu/msm_iommu.c
@@ -73,8 +73,7 @@ fail:
73 73
74static void __disable_clocks(struct msm_iommu_drvdata *drvdata) 74static void __disable_clocks(struct msm_iommu_drvdata *drvdata)
75{ 75{
76 if (drvdata->clk) 76 clk_disable(drvdata->clk);
77 clk_disable(drvdata->clk);
78 clk_disable(drvdata->pclk); 77 clk_disable(drvdata->pclk);
79} 78}
80 79
diff --git a/drivers/iommu/msm_iommu_dev.c b/drivers/iommu/msm_iommu_dev.c
index 61def7cb5263..b6d01f97e537 100644
--- a/drivers/iommu/msm_iommu_dev.c
+++ b/drivers/iommu/msm_iommu_dev.c
@@ -131,7 +131,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
131 struct clk *iommu_clk; 131 struct clk *iommu_clk;
132 struct clk *iommu_pclk; 132 struct clk *iommu_pclk;
133 struct msm_iommu_drvdata *drvdata; 133 struct msm_iommu_drvdata *drvdata;
134 struct msm_iommu_dev *iommu_dev = pdev->dev.platform_data; 134 struct msm_iommu_dev *iommu_dev = dev_get_platdata(&pdev->dev);
135 void __iomem *regs_base; 135 void __iomem *regs_base;
136 int ret, irq, par; 136 int ret, irq, par;
137 137
@@ -224,8 +224,7 @@ static int msm_iommu_probe(struct platform_device *pdev)
224 224
225 platform_set_drvdata(pdev, drvdata); 225 platform_set_drvdata(pdev, drvdata);
226 226
227 if (iommu_clk) 227 clk_disable(iommu_clk);
228 clk_disable(iommu_clk);
229 228
230 clk_disable(iommu_pclk); 229 clk_disable(iommu_pclk);
231 230
@@ -264,7 +263,7 @@ static int msm_iommu_remove(struct platform_device *pdev)
264 263
265static int msm_iommu_ctx_probe(struct platform_device *pdev) 264static int msm_iommu_ctx_probe(struct platform_device *pdev)
266{ 265{
267 struct msm_iommu_ctx_dev *c = pdev->dev.platform_data; 266 struct msm_iommu_ctx_dev *c = dev_get_platdata(&pdev->dev);
268 struct msm_iommu_drvdata *drvdata; 267 struct msm_iommu_drvdata *drvdata;
269 struct msm_iommu_ctx_drvdata *ctx_drvdata; 268 struct msm_iommu_ctx_drvdata *ctx_drvdata;
270 int i, ret; 269 int i, ret;
@@ -323,8 +322,7 @@ static int msm_iommu_ctx_probe(struct platform_device *pdev)
323 SET_NSCFG(drvdata->base, mid, 3); 322 SET_NSCFG(drvdata->base, mid, 3);
324 } 323 }
325 324
326 if (drvdata->clk) 325 clk_disable(drvdata->clk);
327 clk_disable(drvdata->clk);
328 clk_disable(drvdata->pclk); 326 clk_disable(drvdata->pclk);
329 327
330 dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num); 328 dev_info(&pdev->dev, "context %s using bank %d\n", c->name, c->num);
diff --git a/drivers/iommu/omap-iommu-debug.c b/drivers/iommu/omap-iommu-debug.c
index 531658d17333..f3d20a2039d2 100644
--- a/drivers/iommu/omap-iommu-debug.c
+++ b/drivers/iommu/omap-iommu-debug.c
@@ -10,45 +10,35 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#include <linux/module.h>
14#include <linux/err.h> 13#include <linux/err.h>
15#include <linux/clk.h>
16#include <linux/io.h> 14#include <linux/io.h>
17#include <linux/slab.h> 15#include <linux/slab.h>
18#include <linux/uaccess.h> 16#include <linux/uaccess.h>
19#include <linux/platform_device.h>
20#include <linux/debugfs.h> 17#include <linux/debugfs.h>
21#include <linux/omap-iommu.h>
22#include <linux/platform_data/iommu-omap.h> 18#include <linux/platform_data/iommu-omap.h>
23 19
24#include "omap-iopgtable.h" 20#include "omap-iopgtable.h"
25#include "omap-iommu.h" 21#include "omap-iommu.h"
26 22
27#define MAXCOLUMN 100 /* for short messages */
28
29static DEFINE_MUTEX(iommu_debug_lock); 23static DEFINE_MUTEX(iommu_debug_lock);
30 24
31static struct dentry *iommu_debug_root; 25static struct dentry *iommu_debug_root;
32 26
33static ssize_t debug_read_ver(struct file *file, char __user *userbuf, 27static inline bool is_omap_iommu_detached(struct omap_iommu *obj)
34 size_t count, loff_t *ppos)
35{ 28{
36 u32 ver = omap_iommu_arch_version(); 29 return !obj->domain;
37 char buf[MAXCOLUMN], *p = buf;
38
39 p += sprintf(p, "H/W version: %d.%d\n", (ver >> 4) & 0xf , ver & 0xf);
40
41 return simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
42} 30}
43 31
44static ssize_t debug_read_regs(struct file *file, char __user *userbuf, 32static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
45 size_t count, loff_t *ppos) 33 size_t count, loff_t *ppos)
46{ 34{
47 struct device *dev = file->private_data; 35 struct omap_iommu *obj = file->private_data;
48 struct omap_iommu *obj = dev_to_omap_iommu(dev);
49 char *p, *buf; 36 char *p, *buf;
50 ssize_t bytes; 37 ssize_t bytes;
51 38
39 if (is_omap_iommu_detached(obj))
40 return -EPERM;
41
52 buf = kmalloc(count, GFP_KERNEL); 42 buf = kmalloc(count, GFP_KERNEL);
53 if (!buf) 43 if (!buf)
54 return -ENOMEM; 44 return -ENOMEM;
@@ -68,11 +58,13 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
68static ssize_t debug_read_tlb(struct file *file, char __user *userbuf, 58static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
69 size_t count, loff_t *ppos) 59 size_t count, loff_t *ppos)
70{ 60{
71 struct device *dev = file->private_data; 61 struct omap_iommu *obj = file->private_data;
72 struct omap_iommu *obj = dev_to_omap_iommu(dev);
73 char *p, *buf; 62 char *p, *buf;
74 ssize_t bytes, rest; 63 ssize_t bytes, rest;
75 64
65 if (is_omap_iommu_detached(obj))
66 return -EPERM;
67
76 buf = kmalloc(count, GFP_KERNEL); 68 buf = kmalloc(count, GFP_KERNEL);
77 if (!buf) 69 if (!buf)
78 return -ENOMEM; 70 return -ENOMEM;
@@ -93,133 +85,69 @@ static ssize_t debug_read_tlb(struct file *file, char __user *userbuf,
93 return bytes; 85 return bytes;
94} 86}
95 87
96static ssize_t debug_write_pagetable(struct file *file, 88static void dump_ioptable(struct seq_file *s)
97 const char __user *userbuf, size_t count, loff_t *ppos)
98{ 89{
99 struct iotlb_entry e; 90 int i, j;
100 struct cr_regs cr; 91 u32 da;
101 int err; 92 u32 *iopgd, *iopte;
102 struct device *dev = file->private_data; 93 struct omap_iommu *obj = s->private;
103 struct omap_iommu *obj = dev_to_omap_iommu(dev);
104 char buf[MAXCOLUMN], *p = buf;
105
106 count = min(count, sizeof(buf));
107
108 mutex_lock(&iommu_debug_lock);
109 if (copy_from_user(p, userbuf, count)) {
110 mutex_unlock(&iommu_debug_lock);
111 return -EFAULT;
112 }
113
114 sscanf(p, "%x %x", &cr.cam, &cr.ram);
115 if (!cr.cam || !cr.ram) {
116 mutex_unlock(&iommu_debug_lock);
117 return -EINVAL;
118 }
119
120 omap_iotlb_cr_to_e(&cr, &e);
121 err = omap_iopgtable_store_entry(obj, &e);
122 if (err)
123 dev_err(obj->dev, "%s: fail to store cr\n", __func__);
124
125 mutex_unlock(&iommu_debug_lock);
126 return count;
127}
128
129#define dump_ioptable_entry_one(lv, da, val) \
130 ({ \
131 int __err = 0; \
132 ssize_t bytes; \
133 const int maxcol = 22; \
134 const char *str = "%d: %08x %08x\n"; \
135 bytes = snprintf(p, maxcol, str, lv, da, val); \
136 p += bytes; \
137 len -= bytes; \
138 if (len < maxcol) \
139 __err = -ENOMEM; \
140 __err; \
141 })
142
143static ssize_t dump_ioptable(struct omap_iommu *obj, char *buf, ssize_t len)
144{
145 int i;
146 u32 *iopgd;
147 char *p = buf;
148 94
149 spin_lock(&obj->page_table_lock); 95 spin_lock(&obj->page_table_lock);
150 96
151 iopgd = iopgd_offset(obj, 0); 97 iopgd = iopgd_offset(obj, 0);
152 for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) { 98 for (i = 0; i < PTRS_PER_IOPGD; i++, iopgd++) {
153 int j, err;
154 u32 *iopte;
155 u32 da;
156
157 if (!*iopgd) 99 if (!*iopgd)
158 continue; 100 continue;
159 101
160 if (!(*iopgd & IOPGD_TABLE)) { 102 if (!(*iopgd & IOPGD_TABLE)) {
161 da = i << IOPGD_SHIFT; 103 da = i << IOPGD_SHIFT;
162 104 seq_printf(s, "1: 0x%08x 0x%08x\n", da, *iopgd);
163 err = dump_ioptable_entry_one(1, da, *iopgd);
164 if (err)
165 goto out;
166 continue; 105 continue;
167 } 106 }
168 107
169 iopte = iopte_offset(iopgd, 0); 108 iopte = iopte_offset(iopgd, 0);
170
171 for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) { 109 for (j = 0; j < PTRS_PER_IOPTE; j++, iopte++) {
172 if (!*iopte) 110 if (!*iopte)
173 continue; 111 continue;
174 112
175 da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT); 113 da = (i << IOPGD_SHIFT) + (j << IOPTE_SHIFT);
176 err = dump_ioptable_entry_one(2, da, *iopgd); 114 seq_printf(s, "2: 0x%08x 0x%08x\n", da, *iopte);
177 if (err)
178 goto out;
179 } 115 }
180 } 116 }
181out:
182 spin_unlock(&obj->page_table_lock);
183 117
184 return p - buf; 118 spin_unlock(&obj->page_table_lock);
185} 119}
186 120
187static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf, 121static int debug_read_pagetable(struct seq_file *s, void *data)
188 size_t count, loff_t *ppos)
189{ 122{
190 struct device *dev = file->private_data; 123 struct omap_iommu *obj = s->private;
191 struct omap_iommu *obj = dev_to_omap_iommu(dev);
192 char *p, *buf;
193 size_t bytes;
194 124
195 buf = (char *)__get_free_page(GFP_KERNEL); 125 if (is_omap_iommu_detached(obj))
196 if (!buf) 126 return -EPERM;
197 return -ENOMEM;
198 p = buf;
199
200 p += sprintf(p, "L: %8s %8s\n", "da:", "pa:");
201 p += sprintf(p, "-----------------------------------------\n");
202 127
203 mutex_lock(&iommu_debug_lock); 128 mutex_lock(&iommu_debug_lock);
204 129
205 bytes = PAGE_SIZE - (p - buf); 130 seq_printf(s, "L: %8s %8s\n", "da:", "pte:");
206 p += dump_ioptable(obj, p, bytes); 131 seq_puts(s, "--------------------------\n");
207 132 dump_ioptable(s);
208 bytes = simple_read_from_buffer(userbuf, count, ppos, buf, p - buf);
209 133
210 mutex_unlock(&iommu_debug_lock); 134 mutex_unlock(&iommu_debug_lock);
211 free_page((unsigned long)buf);
212 135
213 return bytes; 136 return 0;
214} 137}
215 138
216#define DEBUG_FOPS(name) \ 139#define DEBUG_SEQ_FOPS_RO(name) \
217 static const struct file_operations debug_##name##_fops = { \ 140 static int debug_open_##name(struct inode *inode, struct file *file) \
218 .open = simple_open, \ 141 { \
219 .read = debug_read_##name, \ 142 return single_open(file, debug_read_##name, inode->i_private); \
220 .write = debug_write_##name, \ 143 } \
221 .llseek = generic_file_llseek, \ 144 \
222 }; 145 static const struct file_operations debug_##name##_fops = { \
146 .open = debug_open_##name, \
147 .read = seq_read, \
148 .llseek = seq_lseek, \
149 .release = single_release, \
150 }
223 151
224#define DEBUG_FOPS_RO(name) \ 152#define DEBUG_FOPS_RO(name) \
225 static const struct file_operations debug_##name##_fops = { \ 153 static const struct file_operations debug_##name##_fops = { \
@@ -228,103 +156,63 @@ static ssize_t debug_read_pagetable(struct file *file, char __user *userbuf,
228 .llseek = generic_file_llseek, \ 156 .llseek = generic_file_llseek, \
229 }; 157 };
230 158
231DEBUG_FOPS_RO(ver);
232DEBUG_FOPS_RO(regs); 159DEBUG_FOPS_RO(regs);
233DEBUG_FOPS_RO(tlb); 160DEBUG_FOPS_RO(tlb);
234DEBUG_FOPS(pagetable); 161DEBUG_SEQ_FOPS_RO(pagetable);
235 162
236#define __DEBUG_ADD_FILE(attr, mode) \ 163#define __DEBUG_ADD_FILE(attr, mode) \
237 { \ 164 { \
238 struct dentry *dent; \ 165 struct dentry *dent; \
239 dent = debugfs_create_file(#attr, mode, parent, \ 166 dent = debugfs_create_file(#attr, mode, obj->debug_dir, \
240 dev, &debug_##attr##_fops); \ 167 obj, &debug_##attr##_fops); \
241 if (!dent) \ 168 if (!dent) \
242 return -ENOMEM; \ 169 goto err; \
243 } 170 }
244 171
245#define DEBUG_ADD_FILE(name) __DEBUG_ADD_FILE(name, 0600)
246#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400) 172#define DEBUG_ADD_FILE_RO(name) __DEBUG_ADD_FILE(name, 0400)
247 173
248static int iommu_debug_register(struct device *dev, void *data) 174void omap_iommu_debugfs_add(struct omap_iommu *obj)
249{ 175{
250 struct platform_device *pdev = to_platform_device(dev); 176 struct dentry *d;
251 struct omap_iommu *obj = platform_get_drvdata(pdev);
252 struct omap_iommu_arch_data *arch_data;
253 struct dentry *d, *parent;
254
255 if (!obj || !obj->dev)
256 return -EINVAL;
257
258 arch_data = kzalloc(sizeof(*arch_data), GFP_KERNEL);
259 if (!arch_data)
260 return -ENOMEM;
261
262 arch_data->iommu_dev = obj;
263 177
264 dev->archdata.iommu = arch_data; 178 if (!iommu_debug_root)
179 return;
265 180
266 d = debugfs_create_dir(obj->name, iommu_debug_root); 181 obj->debug_dir = debugfs_create_dir(obj->name, iommu_debug_root);
267 if (!d) 182 if (!obj->debug_dir)
268 goto nomem; 183 return;
269 parent = d;
270 184
271 d = debugfs_create_u8("nr_tlb_entries", 400, parent, 185 d = debugfs_create_u8("nr_tlb_entries", 0400, obj->debug_dir,
272 (u8 *)&obj->nr_tlb_entries); 186 (u8 *)&obj->nr_tlb_entries);
273 if (!d) 187 if (!d)
274 goto nomem; 188 return;
275 189
276 DEBUG_ADD_FILE_RO(ver);
277 DEBUG_ADD_FILE_RO(regs); 190 DEBUG_ADD_FILE_RO(regs);
278 DEBUG_ADD_FILE_RO(tlb); 191 DEBUG_ADD_FILE_RO(tlb);
279 DEBUG_ADD_FILE(pagetable); 192 DEBUG_ADD_FILE_RO(pagetable);
280 193
281 return 0; 194 return;
282 195
283nomem: 196err:
284 kfree(arch_data); 197 debugfs_remove_recursive(obj->debug_dir);
285 return -ENOMEM;
286} 198}
287 199
288static int iommu_debug_unregister(struct device *dev, void *data) 200void omap_iommu_debugfs_remove(struct omap_iommu *obj)
289{ 201{
290 if (!dev->archdata.iommu) 202 if (!obj->debug_dir)
291 return 0; 203 return;
292
293 kfree(dev->archdata.iommu);
294 204
295 dev->archdata.iommu = NULL; 205 debugfs_remove_recursive(obj->debug_dir);
296
297 return 0;
298} 206}
299 207
300static int __init iommu_debug_init(void) 208void __init omap_iommu_debugfs_init(void)
301{ 209{
302 struct dentry *d; 210 iommu_debug_root = debugfs_create_dir("omap_iommu", NULL);
303 int err; 211 if (!iommu_debug_root)
304 212 pr_err("can't create debugfs dir\n");
305 d = debugfs_create_dir("iommu", NULL);
306 if (!d)
307 return -ENOMEM;
308 iommu_debug_root = d;
309
310 err = omap_foreach_iommu_device(d, iommu_debug_register);
311 if (err)
312 goto err_out;
313 return 0;
314
315err_out:
316 debugfs_remove_recursive(iommu_debug_root);
317 return err;
318} 213}
319module_init(iommu_debug_init)
320 214
321static void __exit iommu_debugfs_exit(void) 215void __exit omap_iommu_debugfs_exit(void)
322{ 216{
323 debugfs_remove_recursive(iommu_debug_root); 217 debugfs_remove(iommu_debug_root);
324 omap_foreach_iommu_device(NULL, iommu_debug_unregister);
325} 218}
326module_exit(iommu_debugfs_exit)
327
328MODULE_DESCRIPTION("omap iommu: debugfs interface");
329MODULE_AUTHOR("Hiroshi DOYU <Hiroshi.DOYU@nokia.com>");
330MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 18003c044454..bbb7dcef02d3 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -76,53 +76,23 @@ struct iotlb_lock {
76 short vict; 76 short vict;
77}; 77};
78 78
79/* accommodate the difference between omap1 and omap2/3 */
80static const struct iommu_functions *arch_iommu;
81
82static struct platform_driver omap_iommu_driver; 79static struct platform_driver omap_iommu_driver;
83static struct kmem_cache *iopte_cachep; 80static struct kmem_cache *iopte_cachep;
84 81
85/** 82/**
86 * omap_install_iommu_arch - Install archtecure specific iommu functions
87 * @ops: a pointer to architecture specific iommu functions
88 *
89 * There are several kind of iommu algorithm(tlb, pagetable) among
90 * omap series. This interface installs such an iommu algorighm.
91 **/
92int omap_install_iommu_arch(const struct iommu_functions *ops)
93{
94 if (arch_iommu)
95 return -EBUSY;
96
97 arch_iommu = ops;
98 return 0;
99}
100EXPORT_SYMBOL_GPL(omap_install_iommu_arch);
101
102/**
103 * omap_uninstall_iommu_arch - Uninstall archtecure specific iommu functions
104 * @ops: a pointer to architecture specific iommu functions
105 *
106 * This interface uninstalls the iommu algorighm installed previously.
107 **/
108void omap_uninstall_iommu_arch(const struct iommu_functions *ops)
109{
110 if (arch_iommu != ops)
111 pr_err("%s: not your arch\n", __func__);
112
113 arch_iommu = NULL;
114}
115EXPORT_SYMBOL_GPL(omap_uninstall_iommu_arch);
116
117/**
118 * omap_iommu_save_ctx - Save registers for pm off-mode support 83 * omap_iommu_save_ctx - Save registers for pm off-mode support
119 * @dev: client device 84 * @dev: client device
120 **/ 85 **/
121void omap_iommu_save_ctx(struct device *dev) 86void omap_iommu_save_ctx(struct device *dev)
122{ 87{
123 struct omap_iommu *obj = dev_to_omap_iommu(dev); 88 struct omap_iommu *obj = dev_to_omap_iommu(dev);
89 u32 *p = obj->ctx;
90 int i;
124 91
125 arch_iommu->save_ctx(obj); 92 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
93 p[i] = iommu_read_reg(obj, i * sizeof(u32));
94 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
95 }
126} 96}
127EXPORT_SYMBOL_GPL(omap_iommu_save_ctx); 97EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
128 98
@@ -133,28 +103,74 @@ EXPORT_SYMBOL_GPL(omap_iommu_save_ctx);
133void omap_iommu_restore_ctx(struct device *dev) 103void omap_iommu_restore_ctx(struct device *dev)
134{ 104{
135 struct omap_iommu *obj = dev_to_omap_iommu(dev); 105 struct omap_iommu *obj = dev_to_omap_iommu(dev);
106 u32 *p = obj->ctx;
107 int i;
136 108
137 arch_iommu->restore_ctx(obj); 109 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
110 iommu_write_reg(obj, p[i], i * sizeof(u32));
111 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
112 }
138} 113}
139EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx); 114EXPORT_SYMBOL_GPL(omap_iommu_restore_ctx);
140 115
141/** 116static void __iommu_set_twl(struct omap_iommu *obj, bool on)
142 * omap_iommu_arch_version - Return running iommu arch version
143 **/
144u32 omap_iommu_arch_version(void)
145{ 117{
146 return arch_iommu->version; 118 u32 l = iommu_read_reg(obj, MMU_CNTL);
119
120 if (on)
121 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
122 else
123 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
124
125 l &= ~MMU_CNTL_MASK;
126 if (on)
127 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
128 else
129 l |= (MMU_CNTL_MMU_EN);
130
131 iommu_write_reg(obj, l, MMU_CNTL);
132}
133
134static int omap2_iommu_enable(struct omap_iommu *obj)
135{
136 u32 l, pa;
137
138 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
139 return -EINVAL;
140
141 pa = virt_to_phys(obj->iopgd);
142 if (!IS_ALIGNED(pa, SZ_16K))
143 return -EINVAL;
144
145 l = iommu_read_reg(obj, MMU_REVISION);
146 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
147 (l >> 4) & 0xf, l & 0xf);
148
149 iommu_write_reg(obj, pa, MMU_TTB);
150
151 if (obj->has_bus_err_back)
152 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
153
154 __iommu_set_twl(obj, true);
155
156 return 0;
157}
158
159static void omap2_iommu_disable(struct omap_iommu *obj)
160{
161 u32 l = iommu_read_reg(obj, MMU_CNTL);
162
163 l &= ~MMU_CNTL_MASK;
164 iommu_write_reg(obj, l, MMU_CNTL);
165
166 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
147} 167}
148EXPORT_SYMBOL_GPL(omap_iommu_arch_version);
149 168
150static int iommu_enable(struct omap_iommu *obj) 169static int iommu_enable(struct omap_iommu *obj)
151{ 170{
152 int err; 171 int err;
153 struct platform_device *pdev = to_platform_device(obj->dev); 172 struct platform_device *pdev = to_platform_device(obj->dev);
154 struct iommu_platform_data *pdata = pdev->dev.platform_data; 173 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
155
156 if (!arch_iommu)
157 return -ENODEV;
158 174
159 if (pdata && pdata->deassert_reset) { 175 if (pdata && pdata->deassert_reset) {
160 err = pdata->deassert_reset(pdev, pdata->reset_name); 176 err = pdata->deassert_reset(pdev, pdata->reset_name);
@@ -166,7 +182,7 @@ static int iommu_enable(struct omap_iommu *obj)
166 182
167 pm_runtime_get_sync(obj->dev); 183 pm_runtime_get_sync(obj->dev);
168 184
169 err = arch_iommu->enable(obj); 185 err = omap2_iommu_enable(obj);
170 186
171 return err; 187 return err;
172} 188}
@@ -174,9 +190,9 @@ static int iommu_enable(struct omap_iommu *obj)
174static void iommu_disable(struct omap_iommu *obj) 190static void iommu_disable(struct omap_iommu *obj)
175{ 191{
176 struct platform_device *pdev = to_platform_device(obj->dev); 192 struct platform_device *pdev = to_platform_device(obj->dev);
177 struct iommu_platform_data *pdata = pdev->dev.platform_data; 193 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
178 194
179 arch_iommu->disable(obj); 195 omap2_iommu_disable(obj);
180 196
181 pm_runtime_put_sync(obj->dev); 197 pm_runtime_put_sync(obj->dev);
182 198
@@ -187,44 +203,51 @@ static void iommu_disable(struct omap_iommu *obj)
187/* 203/*
188 * TLB operations 204 * TLB operations
189 */ 205 */
190void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
191{
192 BUG_ON(!cr || !e);
193
194 arch_iommu->cr_to_e(cr, e);
195}
196EXPORT_SYMBOL_GPL(omap_iotlb_cr_to_e);
197
198static inline int iotlb_cr_valid(struct cr_regs *cr) 206static inline int iotlb_cr_valid(struct cr_regs *cr)
199{ 207{
200 if (!cr) 208 if (!cr)
201 return -EINVAL; 209 return -EINVAL;
202 210
203 return arch_iommu->cr_valid(cr); 211 return cr->cam & MMU_CAM_V;
204}
205
206static inline struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
207 struct iotlb_entry *e)
208{
209 if (!e)
210 return NULL;
211
212 return arch_iommu->alloc_cr(obj, e);
213} 212}
214 213
215static u32 iotlb_cr_to_virt(struct cr_regs *cr) 214static u32 iotlb_cr_to_virt(struct cr_regs *cr)
216{ 215{
217 return arch_iommu->cr_to_virt(cr); 216 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
217 u32 mask = get_cam_va_mask(cr->cam & page_size);
218
219 return cr->cam & mask;
218} 220}
219 221
220static u32 get_iopte_attr(struct iotlb_entry *e) 222static u32 get_iopte_attr(struct iotlb_entry *e)
221{ 223{
222 return arch_iommu->get_pte_attr(e); 224 u32 attr;
225
226 attr = e->mixed << 5;
227 attr |= e->endian;
228 attr |= e->elsz >> 3;
229 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
230 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
231 return attr;
223} 232}
224 233
225static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da) 234static u32 iommu_report_fault(struct omap_iommu *obj, u32 *da)
226{ 235{
227 return arch_iommu->fault_isr(obj, da); 236 u32 status, fault_addr;
237
238 status = iommu_read_reg(obj, MMU_IRQSTATUS);
239 status &= MMU_IRQ_MASK;
240 if (!status) {
241 *da = 0;
242 return 0;
243 }
244
245 fault_addr = iommu_read_reg(obj, MMU_FAULT_AD);
246 *da = fault_addr;
247
248 iommu_write_reg(obj, status, MMU_IRQSTATUS);
249
250 return status;
228} 251}
229 252
230static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l) 253static void iotlb_lock_get(struct omap_iommu *obj, struct iotlb_lock *l)
@@ -250,31 +273,19 @@ static void iotlb_lock_set(struct omap_iommu *obj, struct iotlb_lock *l)
250 273
251static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr) 274static void iotlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
252{ 275{
253 arch_iommu->tlb_read_cr(obj, cr); 276 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
277 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
254} 278}
255 279
256static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr) 280static void iotlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
257{ 281{
258 arch_iommu->tlb_load_cr(obj, cr); 282 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
283 iommu_write_reg(obj, cr->ram, MMU_RAM);
259 284
260 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY); 285 iommu_write_reg(obj, 1, MMU_FLUSH_ENTRY);
261 iommu_write_reg(obj, 1, MMU_LD_TLB); 286 iommu_write_reg(obj, 1, MMU_LD_TLB);
262} 287}
263 288
264/**
265 * iotlb_dump_cr - Dump an iommu tlb entry into buf
266 * @obj: target iommu
267 * @cr: contents of cam and ram register
268 * @buf: output buffer
269 **/
270static inline ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
271 char *buf)
272{
273 BUG_ON(!cr || !buf);
274
275 return arch_iommu->dump_cr(obj, cr, buf);
276}
277
278/* only used in iotlb iteration for-loop */ 289/* only used in iotlb iteration for-loop */
279static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n) 290static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
280{ 291{
@@ -289,12 +300,36 @@ static struct cr_regs __iotlb_read_cr(struct omap_iommu *obj, int n)
289 return cr; 300 return cr;
290} 301}
291 302
303#ifdef PREFETCH_IOTLB
304static struct cr_regs *iotlb_alloc_cr(struct omap_iommu *obj,
305 struct iotlb_entry *e)
306{
307 struct cr_regs *cr;
308
309 if (!e)
310 return NULL;
311
312 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
313 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
314 e->da);
315 return ERR_PTR(-EINVAL);
316 }
317
318 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
319 if (!cr)
320 return ERR_PTR(-ENOMEM);
321
322 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
323 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
324
325 return cr;
326}
327
292/** 328/**
293 * load_iotlb_entry - Set an iommu tlb entry 329 * load_iotlb_entry - Set an iommu tlb entry
294 * @obj: target iommu 330 * @obj: target iommu
295 * @e: an iommu tlb entry info 331 * @e: an iommu tlb entry info
296 **/ 332 **/
297#ifdef PREFETCH_IOTLB
298static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e) 333static int load_iotlb_entry(struct omap_iommu *obj, struct iotlb_entry *e)
299{ 334{
300 int err = 0; 335 int err = 0;
@@ -423,7 +458,45 @@ static void flush_iotlb_all(struct omap_iommu *obj)
423 pm_runtime_put_sync(obj->dev); 458 pm_runtime_put_sync(obj->dev);
424} 459}
425 460
426#if defined(CONFIG_OMAP_IOMMU_DEBUG) || defined(CONFIG_OMAP_IOMMU_DEBUG_MODULE) 461#ifdef CONFIG_OMAP_IOMMU_DEBUG
462
463#define pr_reg(name) \
464 do { \
465 ssize_t bytes; \
466 const char *str = "%20s: %08x\n"; \
467 const int maxcol = 32; \
468 bytes = snprintf(p, maxcol, str, __stringify(name), \
469 iommu_read_reg(obj, MMU_##name)); \
470 p += bytes; \
471 len -= bytes; \
472 if (len < maxcol) \
473 goto out; \
474 } while (0)
475
476static ssize_t
477omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
478{
479 char *p = buf;
480
481 pr_reg(REVISION);
482 pr_reg(IRQSTATUS);
483 pr_reg(IRQENABLE);
484 pr_reg(WALKING_ST);
485 pr_reg(CNTL);
486 pr_reg(FAULT_AD);
487 pr_reg(TTB);
488 pr_reg(LOCK);
489 pr_reg(LD_TLB);
490 pr_reg(CAM);
491 pr_reg(RAM);
492 pr_reg(GFLUSH);
493 pr_reg(FLUSH_ENTRY);
494 pr_reg(READ_CAM);
495 pr_reg(READ_RAM);
496 pr_reg(EMU_FAULT_AD);
497out:
498 return p - buf;
499}
427 500
428ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes) 501ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
429{ 502{
@@ -432,13 +505,12 @@ ssize_t omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t bytes)
432 505
433 pm_runtime_get_sync(obj->dev); 506 pm_runtime_get_sync(obj->dev);
434 507
435 bytes = arch_iommu->dump_ctx(obj, buf, bytes); 508 bytes = omap2_iommu_dump_ctx(obj, buf, bytes);
436 509
437 pm_runtime_put_sync(obj->dev); 510 pm_runtime_put_sync(obj->dev);
438 511
439 return bytes; 512 return bytes;
440} 513}
441EXPORT_SYMBOL_GPL(omap_iommu_dump_ctx);
442 514
443static int 515static int
444__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num) 516__dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
@@ -464,6 +536,24 @@ __dump_tlb_entries(struct omap_iommu *obj, struct cr_regs *crs, int num)
464} 536}
465 537
466/** 538/**
539 * iotlb_dump_cr - Dump an iommu tlb entry into buf
540 * @obj: target iommu
541 * @cr: contents of cam and ram register
542 * @buf: output buffer
543 **/
544static ssize_t iotlb_dump_cr(struct omap_iommu *obj, struct cr_regs *cr,
545 char *buf)
546{
547 char *p = buf;
548
549 /* FIXME: Need more detail analysis of cam/ram */
550 p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
551 (cr->cam & MMU_CAM_P) ? 1 : 0);
552
553 return p - buf;
554}
555
556/**
467 * omap_dump_tlb_entries - dump cr arrays to given buffer 557 * omap_dump_tlb_entries - dump cr arrays to given buffer
468 * @obj: target iommu 558 * @obj: target iommu
469 * @buf: output buffer 559 * @buf: output buffer
@@ -488,16 +578,8 @@ size_t omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t bytes)
488 578
489 return p - buf; 579 return p - buf;
490} 580}
491EXPORT_SYMBOL_GPL(omap_dump_tlb_entries);
492
493int omap_foreach_iommu_device(void *data, int (*fn)(struct device *, void *))
494{
495 return driver_for_each_device(&omap_iommu_driver.driver,
496 NULL, data, fn);
497}
498EXPORT_SYMBOL_GPL(omap_foreach_iommu_device);
499 581
500#endif /* CONFIG_OMAP_IOMMU_DEBUG_MODULE */ 582#endif /* CONFIG_OMAP_IOMMU_DEBUG */
501 583
502/* 584/*
503 * H/W pagetable operations 585 * H/W pagetable operations
@@ -680,7 +762,8 @@ iopgtable_store_entry_core(struct omap_iommu *obj, struct iotlb_entry *e)
680 * @obj: target iommu 762 * @obj: target iommu
681 * @e: an iommu tlb entry info 763 * @e: an iommu tlb entry info
682 **/ 764 **/
683int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e) 765static int
766omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
684{ 767{
685 int err; 768 int err;
686 769
@@ -690,7 +773,6 @@ int omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e)
690 prefetch_iotlb_entry(obj, e); 773 prefetch_iotlb_entry(obj, e);
691 return err; 774 return err;
692} 775}
693EXPORT_SYMBOL_GPL(omap_iopgtable_store_entry);
694 776
695/** 777/**
696 * iopgtable_lookup_entry - Lookup an iommu pte entry 778 * iopgtable_lookup_entry - Lookup an iommu pte entry
@@ -819,8 +901,9 @@ static irqreturn_t iommu_fault_handler(int irq, void *data)
819 u32 *iopgd, *iopte; 901 u32 *iopgd, *iopte;
820 struct omap_iommu *obj = data; 902 struct omap_iommu *obj = data;
821 struct iommu_domain *domain = obj->domain; 903 struct iommu_domain *domain = obj->domain;
904 struct omap_iommu_domain *omap_domain = domain->priv;
822 905
823 if (!obj->refcount) 906 if (!omap_domain->iommu_dev)
824 return IRQ_NONE; 907 return IRQ_NONE;
825 908
826 errs = iommu_report_fault(obj, &da); 909 errs = iommu_report_fault(obj, &da);
@@ -880,13 +963,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
880 963
881 spin_lock(&obj->iommu_lock); 964 spin_lock(&obj->iommu_lock);
882 965
883 /* an iommu device can only be attached once */
884 if (++obj->refcount > 1) {
885 dev_err(dev, "%s: already attached!\n", obj->name);
886 err = -EBUSY;
887 goto err_enable;
888 }
889
890 obj->iopgd = iopgd; 966 obj->iopgd = iopgd;
891 err = iommu_enable(obj); 967 err = iommu_enable(obj);
892 if (err) 968 if (err)
@@ -899,7 +975,6 @@ static struct omap_iommu *omap_iommu_attach(const char *name, u32 *iopgd)
899 return obj; 975 return obj;
900 976
901err_enable: 977err_enable:
902 obj->refcount--;
903 spin_unlock(&obj->iommu_lock); 978 spin_unlock(&obj->iommu_lock);
904 return ERR_PTR(err); 979 return ERR_PTR(err);
905} 980}
@@ -915,9 +990,7 @@ static void omap_iommu_detach(struct omap_iommu *obj)
915 990
916 spin_lock(&obj->iommu_lock); 991 spin_lock(&obj->iommu_lock);
917 992
918 if (--obj->refcount == 0) 993 iommu_disable(obj);
919 iommu_disable(obj);
920
921 obj->iopgd = NULL; 994 obj->iopgd = NULL;
922 995
923 spin_unlock(&obj->iommu_lock); 996 spin_unlock(&obj->iommu_lock);
@@ -934,7 +1007,7 @@ static int omap_iommu_probe(struct platform_device *pdev)
934 int irq; 1007 int irq;
935 struct omap_iommu *obj; 1008 struct omap_iommu *obj;
936 struct resource *res; 1009 struct resource *res;
937 struct iommu_platform_data *pdata = pdev->dev.platform_data; 1010 struct iommu_platform_data *pdata = dev_get_platdata(&pdev->dev);
938 struct device_node *of = pdev->dev.of_node; 1011 struct device_node *of = pdev->dev.of_node;
939 1012
940 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL); 1013 obj = devm_kzalloc(&pdev->dev, sizeof(*obj) + MMU_REG_SIZE, GFP_KERNEL);
@@ -981,6 +1054,8 @@ static int omap_iommu_probe(struct platform_device *pdev)
981 pm_runtime_irq_safe(obj->dev); 1054 pm_runtime_irq_safe(obj->dev);
982 pm_runtime_enable(obj->dev); 1055 pm_runtime_enable(obj->dev);
983 1056
1057 omap_iommu_debugfs_add(obj);
1058
984 dev_info(&pdev->dev, "%s registered\n", obj->name); 1059 dev_info(&pdev->dev, "%s registered\n", obj->name);
985 return 0; 1060 return 0;
986} 1061}
@@ -990,6 +1065,7 @@ static int omap_iommu_remove(struct platform_device *pdev)
990 struct omap_iommu *obj = platform_get_drvdata(pdev); 1065 struct omap_iommu *obj = platform_get_drvdata(pdev);
991 1066
992 iopgtable_clear_entry_all(obj); 1067 iopgtable_clear_entry_all(obj);
1068 omap_iommu_debugfs_remove(obj);
993 1069
994 pm_runtime_disable(obj->dev); 1070 pm_runtime_disable(obj->dev);
995 1071
@@ -1026,7 +1102,6 @@ static u32 iotlb_init_entry(struct iotlb_entry *e, u32 da, u32 pa, int pgsz)
1026 e->da = da; 1102 e->da = da;
1027 e->pa = pa; 1103 e->pa = pa;
1028 e->valid = MMU_CAM_V; 1104 e->valid = MMU_CAM_V;
1029 /* FIXME: add OMAP1 support */
1030 e->pgsz = pgsz; 1105 e->pgsz = pgsz;
1031 e->endian = MMU_RAM_ENDIAN_LITTLE; 1106 e->endian = MMU_RAM_ENDIAN_LITTLE;
1032 e->elsz = MMU_RAM_ELSZ_8; 1107 e->elsz = MMU_RAM_ELSZ_8;
@@ -1131,6 +1206,7 @@ static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
1131 1206
1132 omap_domain->iommu_dev = arch_data->iommu_dev = NULL; 1207 omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
1133 omap_domain->dev = NULL; 1208 omap_domain->dev = NULL;
1209 oiommu->domain = NULL;
1134} 1210}
1135 1211
1136static void omap_iommu_detach_dev(struct iommu_domain *domain, 1212static void omap_iommu_detach_dev(struct iommu_domain *domain,
@@ -1309,6 +1385,8 @@ static int __init omap_iommu_init(void)
1309 1385
1310 bus_set_iommu(&platform_bus_type, &omap_iommu_ops); 1386 bus_set_iommu(&platform_bus_type, &omap_iommu_ops);
1311 1387
1388 omap_iommu_debugfs_init();
1389
1312 return platform_driver_register(&omap_iommu_driver); 1390 return platform_driver_register(&omap_iommu_driver);
1313} 1391}
1314/* must be ready before omap3isp is probed */ 1392/* must be ready before omap3isp is probed */
@@ -1319,6 +1397,8 @@ static void __exit omap_iommu_exit(void)
1319 kmem_cache_destroy(iopte_cachep); 1397 kmem_cache_destroy(iopte_cachep);
1320 1398
1321 platform_driver_unregister(&omap_iommu_driver); 1399 platform_driver_unregister(&omap_iommu_driver);
1400
1401 omap_iommu_debugfs_exit();
1322} 1402}
1323module_exit(omap_iommu_exit); 1403module_exit(omap_iommu_exit);
1324 1404
diff --git a/drivers/iommu/omap-iommu.h b/drivers/iommu/omap-iommu.h
index 4f1b68c08c15..d736630df3c8 100644
--- a/drivers/iommu/omap-iommu.h
+++ b/drivers/iommu/omap-iommu.h
@@ -10,9 +10,8 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 */ 11 */
12 12
13#if defined(CONFIG_ARCH_OMAP1) 13#ifndef _OMAP_IOMMU_H
14#error "iommu for this processor not implemented yet" 14#define _OMAP_IOMMU_H
15#endif
16 15
17struct iotlb_entry { 16struct iotlb_entry {
18 u32 da; 17 u32 da;
@@ -30,10 +29,9 @@ struct omap_iommu {
30 const char *name; 29 const char *name;
31 void __iomem *regbase; 30 void __iomem *regbase;
32 struct device *dev; 31 struct device *dev;
33 void *isr_priv;
34 struct iommu_domain *domain; 32 struct iommu_domain *domain;
33 struct dentry *debug_dir;
35 34
36 unsigned int refcount;
37 spinlock_t iommu_lock; /* global for this whole object */ 35 spinlock_t iommu_lock; /* global for this whole object */
38 36
39 /* 37 /*
@@ -67,34 +65,6 @@ struct cr_regs {
67 }; 65 };
68}; 66};
69 67
70/* architecture specific functions */
71struct iommu_functions {
72 unsigned long version;
73
74 int (*enable)(struct omap_iommu *obj);
75 void (*disable)(struct omap_iommu *obj);
76 void (*set_twl)(struct omap_iommu *obj, bool on);
77 u32 (*fault_isr)(struct omap_iommu *obj, u32 *ra);
78
79 void (*tlb_read_cr)(struct omap_iommu *obj, struct cr_regs *cr);
80 void (*tlb_load_cr)(struct omap_iommu *obj, struct cr_regs *cr);
81
82 struct cr_regs *(*alloc_cr)(struct omap_iommu *obj,
83 struct iotlb_entry *e);
84 int (*cr_valid)(struct cr_regs *cr);
85 u32 (*cr_to_virt)(struct cr_regs *cr);
86 void (*cr_to_e)(struct cr_regs *cr, struct iotlb_entry *e);
87 ssize_t (*dump_cr)(struct omap_iommu *obj, struct cr_regs *cr,
88 char *buf);
89
90 u32 (*get_pte_attr)(struct iotlb_entry *e);
91
92 void (*save_ctx)(struct omap_iommu *obj);
93 void (*restore_ctx)(struct omap_iommu *obj);
94 ssize_t (*dump_ctx)(struct omap_iommu *obj, char *buf, ssize_t len);
95};
96
97#ifdef CONFIG_IOMMU_API
98/** 68/**
99 * dev_to_omap_iommu() - retrieves an omap iommu object from a user device 69 * dev_to_omap_iommu() - retrieves an omap iommu object from a user device
100 * @dev: iommu client device 70 * @dev: iommu client device
@@ -105,7 +75,6 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
105 75
106 return arch_data->iommu_dev; 76 return arch_data->iommu_dev;
107} 77}
108#endif
109 78
110/* 79/*
111 * MMU Register offsets 80 * MMU Register offsets
@@ -133,6 +102,28 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
133/* 102/*
134 * MMU Register bit definitions 103 * MMU Register bit definitions
135 */ 104 */
105/* IRQSTATUS & IRQENABLE */
106#define MMU_IRQ_MULTIHITFAULT (1 << 4)
107#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
108#define MMU_IRQ_EMUMISS (1 << 2)
109#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
110#define MMU_IRQ_TLBMISS (1 << 0)
111
112#define __MMU_IRQ_FAULT \
113 (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
114#define MMU_IRQ_MASK \
115 (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
116#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
117#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
118
119/* MMU_CNTL */
120#define MMU_CNTL_SHIFT 1
121#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
122#define MMU_CNTL_EML_TLB (1 << 3)
123#define MMU_CNTL_TWL_EN (1 << 2)
124#define MMU_CNTL_MMU_EN (1 << 1)
125
126/* CAM */
136#define MMU_CAM_VATAG_SHIFT 12 127#define MMU_CAM_VATAG_SHIFT 12
137#define MMU_CAM_VATAG_MASK \ 128#define MMU_CAM_VATAG_MASK \
138 ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT) 129 ((~0UL >> MMU_CAM_VATAG_SHIFT) << MMU_CAM_VATAG_SHIFT)
@@ -144,6 +135,7 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
144#define MMU_CAM_PGSZ_4K (2 << 0) 135#define MMU_CAM_PGSZ_4K (2 << 0)
145#define MMU_CAM_PGSZ_16M (3 << 0) 136#define MMU_CAM_PGSZ_16M (3 << 0)
146 137
138/* RAM */
147#define MMU_RAM_PADDR_SHIFT 12 139#define MMU_RAM_PADDR_SHIFT 12
148#define MMU_RAM_PADDR_MASK \ 140#define MMU_RAM_PADDR_MASK \
149 ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT) 141 ((~0UL >> MMU_RAM_PADDR_SHIFT) << MMU_RAM_PADDR_SHIFT)
@@ -165,6 +157,12 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
165 157
166#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1 158#define MMU_GP_REG_BUS_ERR_BACK_EN 0x1
167 159
160#define get_cam_va_mask(pgsz) \
161 (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
162 ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
163 ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
164 ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
165
168/* 166/*
169 * utilities for super page(16MB, 1MB, 64KB and 4KB) 167 * utilities for super page(16MB, 1MB, 64KB and 4KB)
170 */ 168 */
@@ -192,27 +190,25 @@ static inline struct omap_iommu *dev_to_omap_iommu(struct device *dev)
192/* 190/*
193 * global functions 191 * global functions
194 */ 192 */
195extern u32 omap_iommu_arch_version(void); 193#ifdef CONFIG_OMAP_IOMMU_DEBUG
196
197extern void omap_iotlb_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e);
198
199extern int
200omap_iopgtable_store_entry(struct omap_iommu *obj, struct iotlb_entry *e);
201
202extern void omap_iommu_save_ctx(struct device *dev);
203extern void omap_iommu_restore_ctx(struct device *dev);
204
205extern int omap_foreach_iommu_device(void *data,
206 int (*fn)(struct device *, void *));
207
208extern int omap_install_iommu_arch(const struct iommu_functions *ops);
209extern void omap_uninstall_iommu_arch(const struct iommu_functions *ops);
210
211extern ssize_t 194extern ssize_t
212omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len); 195omap_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len);
213extern size_t 196extern size_t
214omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len); 197omap_dump_tlb_entries(struct omap_iommu *obj, char *buf, ssize_t len);
215 198
199void omap_iommu_debugfs_init(void);
200void omap_iommu_debugfs_exit(void);
201
202void omap_iommu_debugfs_add(struct omap_iommu *obj);
203void omap_iommu_debugfs_remove(struct omap_iommu *obj);
204#else
205static inline void omap_iommu_debugfs_init(void) { }
206static inline void omap_iommu_debugfs_exit(void) { }
207
208static inline void omap_iommu_debugfs_add(struct omap_iommu *obj) { }
209static inline void omap_iommu_debugfs_remove(struct omap_iommu *obj) { }
210#endif
211
216/* 212/*
217 * register accessors 213 * register accessors
218 */ 214 */
@@ -225,3 +221,5 @@ static inline void iommu_write_reg(struct omap_iommu *obj, u32 val, size_t offs)
225{ 221{
226 __raw_writel(val, obj->regbase + offs); 222 __raw_writel(val, obj->regbase + offs);
227} 223}
224
225#endif /* _OMAP_IOMMU_H */
diff --git a/drivers/iommu/omap-iommu2.c b/drivers/iommu/omap-iommu2.c
deleted file mode 100644
index 5e1ea3b0bf16..000000000000
--- a/drivers/iommu/omap-iommu2.c
+++ /dev/null
@@ -1,337 +0,0 @@
1/*
2 * omap iommu: omap2/3 architecture specific functions
3 *
4 * Copyright (C) 2008-2009 Nokia Corporation
5 *
6 * Written by Hiroshi DOYU <Hiroshi.DOYU@nokia.com>,
7 * Paul Mundt and Toshihiro Kobayashi
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/err.h>
15#include <linux/device.h>
16#include <linux/io.h>
17#include <linux/jiffies.h>
18#include <linux/module.h>
19#include <linux/omap-iommu.h>
20#include <linux/slab.h>
21#include <linux/stringify.h>
22#include <linux/platform_data/iommu-omap.h>
23
24#include "omap-iommu.h"
25
26/*
27 * omap2 architecture specific register bit definitions
28 */
29#define IOMMU_ARCH_VERSION 0x00000011
30
31/* IRQSTATUS & IRQENABLE */
32#define MMU_IRQ_MULTIHITFAULT (1 << 4)
33#define MMU_IRQ_TABLEWALKFAULT (1 << 3)
34#define MMU_IRQ_EMUMISS (1 << 2)
35#define MMU_IRQ_TRANSLATIONFAULT (1 << 1)
36#define MMU_IRQ_TLBMISS (1 << 0)
37
38#define __MMU_IRQ_FAULT \
39 (MMU_IRQ_MULTIHITFAULT | MMU_IRQ_EMUMISS | MMU_IRQ_TRANSLATIONFAULT)
40#define MMU_IRQ_MASK \
41 (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT | MMU_IRQ_TLBMISS)
42#define MMU_IRQ_TWL_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TABLEWALKFAULT)
43#define MMU_IRQ_TLB_MISS_MASK (__MMU_IRQ_FAULT | MMU_IRQ_TLBMISS)
44
45/* MMU_CNTL */
46#define MMU_CNTL_SHIFT 1
47#define MMU_CNTL_MASK (7 << MMU_CNTL_SHIFT)
48#define MMU_CNTL_EML_TLB (1 << 3)
49#define MMU_CNTL_TWL_EN (1 << 2)
50#define MMU_CNTL_MMU_EN (1 << 1)
51
52#define get_cam_va_mask(pgsz) \
53 (((pgsz) == MMU_CAM_PGSZ_16M) ? 0xff000000 : \
54 ((pgsz) == MMU_CAM_PGSZ_1M) ? 0xfff00000 : \
55 ((pgsz) == MMU_CAM_PGSZ_64K) ? 0xffff0000 : \
56 ((pgsz) == MMU_CAM_PGSZ_4K) ? 0xfffff000 : 0)
57
58/* IOMMU errors */
59#define OMAP_IOMMU_ERR_TLB_MISS (1 << 0)
60#define OMAP_IOMMU_ERR_TRANS_FAULT (1 << 1)
61#define OMAP_IOMMU_ERR_EMU_MISS (1 << 2)
62#define OMAP_IOMMU_ERR_TBLWALK_FAULT (1 << 3)
63#define OMAP_IOMMU_ERR_MULTIHIT_FAULT (1 << 4)
64
65static void __iommu_set_twl(struct omap_iommu *obj, bool on)
66{
67 u32 l = iommu_read_reg(obj, MMU_CNTL);
68
69 if (on)
70 iommu_write_reg(obj, MMU_IRQ_TWL_MASK, MMU_IRQENABLE);
71 else
72 iommu_write_reg(obj, MMU_IRQ_TLB_MISS_MASK, MMU_IRQENABLE);
73
74 l &= ~MMU_CNTL_MASK;
75 if (on)
76 l |= (MMU_CNTL_MMU_EN | MMU_CNTL_TWL_EN);
77 else
78 l |= (MMU_CNTL_MMU_EN);
79
80 iommu_write_reg(obj, l, MMU_CNTL);
81}
82
83
84static int omap2_iommu_enable(struct omap_iommu *obj)
85{
86 u32 l, pa;
87
88 if (!obj->iopgd || !IS_ALIGNED((u32)obj->iopgd, SZ_16K))
89 return -EINVAL;
90
91 pa = virt_to_phys(obj->iopgd);
92 if (!IS_ALIGNED(pa, SZ_16K))
93 return -EINVAL;
94
95 l = iommu_read_reg(obj, MMU_REVISION);
96 dev_info(obj->dev, "%s: version %d.%d\n", obj->name,
97 (l >> 4) & 0xf, l & 0xf);
98
99 iommu_write_reg(obj, pa, MMU_TTB);
100
101 if (obj->has_bus_err_back)
102 iommu_write_reg(obj, MMU_GP_REG_BUS_ERR_BACK_EN, MMU_GP_REG);
103
104 __iommu_set_twl(obj, true);
105
106 return 0;
107}
108
109static void omap2_iommu_disable(struct omap_iommu *obj)
110{
111 u32 l = iommu_read_reg(obj, MMU_CNTL);
112
113 l &= ~MMU_CNTL_MASK;
114 iommu_write_reg(obj, l, MMU_CNTL);
115
116 dev_dbg(obj->dev, "%s is shutting down\n", obj->name);
117}
118
119static void omap2_iommu_set_twl(struct omap_iommu *obj, bool on)
120{
121 __iommu_set_twl(obj, false);
122}
123
124static u32 omap2_iommu_fault_isr(struct omap_iommu *obj, u32 *ra)
125{
126 u32 stat, da;
127 u32 errs = 0;
128
129 stat = iommu_read_reg(obj, MMU_IRQSTATUS);
130 stat &= MMU_IRQ_MASK;
131 if (!stat) {
132 *ra = 0;
133 return 0;
134 }
135
136 da = iommu_read_reg(obj, MMU_FAULT_AD);
137 *ra = da;
138
139 if (stat & MMU_IRQ_TLBMISS)
140 errs |= OMAP_IOMMU_ERR_TLB_MISS;
141 if (stat & MMU_IRQ_TRANSLATIONFAULT)
142 errs |= OMAP_IOMMU_ERR_TRANS_FAULT;
143 if (stat & MMU_IRQ_EMUMISS)
144 errs |= OMAP_IOMMU_ERR_EMU_MISS;
145 if (stat & MMU_IRQ_TABLEWALKFAULT)
146 errs |= OMAP_IOMMU_ERR_TBLWALK_FAULT;
147 if (stat & MMU_IRQ_MULTIHITFAULT)
148 errs |= OMAP_IOMMU_ERR_MULTIHIT_FAULT;
149 iommu_write_reg(obj, stat, MMU_IRQSTATUS);
150
151 return errs;
152}
153
154static void omap2_tlb_read_cr(struct omap_iommu *obj, struct cr_regs *cr)
155{
156 cr->cam = iommu_read_reg(obj, MMU_READ_CAM);
157 cr->ram = iommu_read_reg(obj, MMU_READ_RAM);
158}
159
160static void omap2_tlb_load_cr(struct omap_iommu *obj, struct cr_regs *cr)
161{
162 iommu_write_reg(obj, cr->cam | MMU_CAM_V, MMU_CAM);
163 iommu_write_reg(obj, cr->ram, MMU_RAM);
164}
165
166static u32 omap2_cr_to_virt(struct cr_regs *cr)
167{
168 u32 page_size = cr->cam & MMU_CAM_PGSZ_MASK;
169 u32 mask = get_cam_va_mask(cr->cam & page_size);
170
171 return cr->cam & mask;
172}
173
174static struct cr_regs *omap2_alloc_cr(struct omap_iommu *obj,
175 struct iotlb_entry *e)
176{
177 struct cr_regs *cr;
178
179 if (e->da & ~(get_cam_va_mask(e->pgsz))) {
180 dev_err(obj->dev, "%s:\twrong alignment: %08x\n", __func__,
181 e->da);
182 return ERR_PTR(-EINVAL);
183 }
184
185 cr = kmalloc(sizeof(*cr), GFP_KERNEL);
186 if (!cr)
187 return ERR_PTR(-ENOMEM);
188
189 cr->cam = (e->da & MMU_CAM_VATAG_MASK) | e->prsvd | e->pgsz | e->valid;
190 cr->ram = e->pa | e->endian | e->elsz | e->mixed;
191
192 return cr;
193}
194
195static inline int omap2_cr_valid(struct cr_regs *cr)
196{
197 return cr->cam & MMU_CAM_V;
198}
199
200static u32 omap2_get_pte_attr(struct iotlb_entry *e)
201{
202 u32 attr;
203
204 attr = e->mixed << 5;
205 attr |= e->endian;
206 attr |= e->elsz >> 3;
207 attr <<= (((e->pgsz == MMU_CAM_PGSZ_4K) ||
208 (e->pgsz == MMU_CAM_PGSZ_64K)) ? 0 : 6);
209 return attr;
210}
211
212static ssize_t
213omap2_dump_cr(struct omap_iommu *obj, struct cr_regs *cr, char *buf)
214{
215 char *p = buf;
216
217 /* FIXME: Need more detail analysis of cam/ram */
218 p += sprintf(p, "%08x %08x %01x\n", cr->cam, cr->ram,
219 (cr->cam & MMU_CAM_P) ? 1 : 0);
220
221 return p - buf;
222}
223
224#define pr_reg(name) \
225 do { \
226 ssize_t bytes; \
227 const char *str = "%20s: %08x\n"; \
228 const int maxcol = 32; \
229 bytes = snprintf(p, maxcol, str, __stringify(name), \
230 iommu_read_reg(obj, MMU_##name)); \
231 p += bytes; \
232 len -= bytes; \
233 if (len < maxcol) \
234 goto out; \
235 } while (0)
236
237static ssize_t
238omap2_iommu_dump_ctx(struct omap_iommu *obj, char *buf, ssize_t len)
239{
240 char *p = buf;
241
242 pr_reg(REVISION);
243 pr_reg(IRQSTATUS);
244 pr_reg(IRQENABLE);
245 pr_reg(WALKING_ST);
246 pr_reg(CNTL);
247 pr_reg(FAULT_AD);
248 pr_reg(TTB);
249 pr_reg(LOCK);
250 pr_reg(LD_TLB);
251 pr_reg(CAM);
252 pr_reg(RAM);
253 pr_reg(GFLUSH);
254 pr_reg(FLUSH_ENTRY);
255 pr_reg(READ_CAM);
256 pr_reg(READ_RAM);
257 pr_reg(EMU_FAULT_AD);
258out:
259 return p - buf;
260}
261
262static void omap2_iommu_save_ctx(struct omap_iommu *obj)
263{
264 int i;
265 u32 *p = obj->ctx;
266
267 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
268 p[i] = iommu_read_reg(obj, i * sizeof(u32));
269 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
270 }
271
272 BUG_ON(p[0] != IOMMU_ARCH_VERSION);
273}
274
275static void omap2_iommu_restore_ctx(struct omap_iommu *obj)
276{
277 int i;
278 u32 *p = obj->ctx;
279
280 for (i = 0; i < (MMU_REG_SIZE / sizeof(u32)); i++) {
281 iommu_write_reg(obj, p[i], i * sizeof(u32));
282 dev_dbg(obj->dev, "%s\t[%02d] %08x\n", __func__, i, p[i]);
283 }
284
285 BUG_ON(p[0] != IOMMU_ARCH_VERSION);
286}
287
288static void omap2_cr_to_e(struct cr_regs *cr, struct iotlb_entry *e)
289{
290 e->da = cr->cam & MMU_CAM_VATAG_MASK;
291 e->pa = cr->ram & MMU_RAM_PADDR_MASK;
292 e->valid = cr->cam & MMU_CAM_V;
293 e->pgsz = cr->cam & MMU_CAM_PGSZ_MASK;
294 e->endian = cr->ram & MMU_RAM_ENDIAN_MASK;
295 e->elsz = cr->ram & MMU_RAM_ELSZ_MASK;
296 e->mixed = cr->ram & MMU_RAM_MIXED;
297}
298
299static const struct iommu_functions omap2_iommu_ops = {
300 .version = IOMMU_ARCH_VERSION,
301
302 .enable = omap2_iommu_enable,
303 .disable = omap2_iommu_disable,
304 .set_twl = omap2_iommu_set_twl,
305 .fault_isr = omap2_iommu_fault_isr,
306
307 .tlb_read_cr = omap2_tlb_read_cr,
308 .tlb_load_cr = omap2_tlb_load_cr,
309
310 .cr_to_e = omap2_cr_to_e,
311 .cr_to_virt = omap2_cr_to_virt,
312 .alloc_cr = omap2_alloc_cr,
313 .cr_valid = omap2_cr_valid,
314 .dump_cr = omap2_dump_cr,
315
316 .get_pte_attr = omap2_get_pte_attr,
317
318 .save_ctx = omap2_iommu_save_ctx,
319 .restore_ctx = omap2_iommu_restore_ctx,
320 .dump_ctx = omap2_iommu_dump_ctx,
321};
322
323static int __init omap2_iommu_init(void)
324{
325 return omap_install_iommu_arch(&omap2_iommu_ops);
326}
327module_init(omap2_iommu_init);
328
329static void __exit omap2_iommu_exit(void)
330{
331 omap_uninstall_iommu_arch(&omap2_iommu_ops);
332}
333module_exit(omap2_iommu_exit);
334
335MODULE_AUTHOR("Hiroshi DOYU, Paul Mundt and Toshihiro Kobayashi");
336MODULE_DESCRIPTION("omap iommu: omap2/3 architecture specific functions");
337MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/rockchip-iommu.c b/drivers/iommu/rockchip-iommu.c
new file mode 100644
index 000000000000..b2023af384b9
--- /dev/null
+++ b/drivers/iommu/rockchip-iommu.c
@@ -0,0 +1,1038 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#include <asm/cacheflush.h>
8#include <asm/pgtable.h>
9#include <linux/compiler.h>
10#include <linux/delay.h>
11#include <linux/device.h>
12#include <linux/errno.h>
13#include <linux/interrupt.h>
14#include <linux/io.h>
15#include <linux/iommu.h>
16#include <linux/jiffies.h>
17#include <linux/list.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25
26/** MMU register offsets */
27#define RK_MMU_DTE_ADDR 0x00 /* Directory table address */
28#define RK_MMU_STATUS 0x04
29#define RK_MMU_COMMAND 0x08
30#define RK_MMU_PAGE_FAULT_ADDR 0x0C /* IOVA of last page fault */
31#define RK_MMU_ZAP_ONE_LINE 0x10 /* Shootdown one IOTLB entry */
32#define RK_MMU_INT_RAWSTAT 0x14 /* IRQ status ignoring mask */
33#define RK_MMU_INT_CLEAR 0x18 /* Acknowledge and re-arm irq */
34#define RK_MMU_INT_MASK 0x1C /* IRQ enable */
35#define RK_MMU_INT_STATUS 0x20 /* IRQ status after masking */
36#define RK_MMU_AUTO_GATING 0x24
37
38#define DTE_ADDR_DUMMY 0xCAFEBABE
39#define FORCE_RESET_TIMEOUT 100 /* ms */
40
41/* RK_MMU_STATUS fields */
42#define RK_MMU_STATUS_PAGING_ENABLED BIT(0)
43#define RK_MMU_STATUS_PAGE_FAULT_ACTIVE BIT(1)
44#define RK_MMU_STATUS_STALL_ACTIVE BIT(2)
45#define RK_MMU_STATUS_IDLE BIT(3)
46#define RK_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
47#define RK_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
48#define RK_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
49
50/* RK_MMU_COMMAND command values */
51#define RK_MMU_CMD_ENABLE_PAGING 0 /* Enable memory translation */
52#define RK_MMU_CMD_DISABLE_PAGING 1 /* Disable memory translation */
53#define RK_MMU_CMD_ENABLE_STALL 2 /* Stall paging to allow other cmds */
54#define RK_MMU_CMD_DISABLE_STALL 3 /* Stop stall re-enables paging */
55#define RK_MMU_CMD_ZAP_CACHE 4 /* Shoot down entire IOTLB */
56#define RK_MMU_CMD_PAGE_FAULT_DONE 5 /* Clear page fault */
57#define RK_MMU_CMD_FORCE_RESET 6 /* Reset all registers */
58
59/* RK_MMU_INT_* register fields */
60#define RK_MMU_IRQ_PAGE_FAULT 0x01 /* page fault */
61#define RK_MMU_IRQ_BUS_ERROR 0x02 /* bus read error */
62#define RK_MMU_IRQ_MASK (RK_MMU_IRQ_PAGE_FAULT | RK_MMU_IRQ_BUS_ERROR)
63
64#define NUM_DT_ENTRIES 1024
65#define NUM_PT_ENTRIES 1024
66
67#define SPAGE_ORDER 12
68#define SPAGE_SIZE (1 << SPAGE_ORDER)
69
70 /*
71 * Support mapping any size that fits in one page table:
72 * 4 KiB to 4 MiB
73 */
74#define RK_IOMMU_PGSIZE_BITMAP 0x007ff000
75
76#define IOMMU_REG_POLL_COUNT_FAST 1000
77
78struct rk_iommu_domain {
79 struct list_head iommus;
80 u32 *dt; /* page directory table */
81 spinlock_t iommus_lock; /* lock for iommus list */
82 spinlock_t dt_lock; /* lock for modifying page directory table */
83};
84
85struct rk_iommu {
86 struct device *dev;
87 void __iomem *base;
88 int irq;
89 struct list_head node; /* entry in rk_iommu_domain.iommus */
90 struct iommu_domain *domain; /* domain to which iommu is attached */
91};
92
93static inline void rk_table_flush(u32 *va, unsigned int count)
94{
95 phys_addr_t pa_start = virt_to_phys(va);
96 phys_addr_t pa_end = virt_to_phys(va + count);
97 size_t size = pa_end - pa_start;
98
99 __cpuc_flush_dcache_area(va, size);
100 outer_flush_range(pa_start, pa_end);
101}
102
103/**
104 * Inspired by _wait_for in intel_drv.h
105 * This is NOT safe for use in interrupt context.
106 *
107 * Note that it's important that we check the condition again after having
108 * timed out, since the timeout could be due to preemption or similar and
109 * we've never had a chance to check the condition before the timeout.
110 */
111#define rk_wait_for(COND, MS) ({ \
112 unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
113 int ret__ = 0; \
114 while (!(COND)) { \
115 if (time_after(jiffies, timeout__)) { \
116 ret__ = (COND) ? 0 : -ETIMEDOUT; \
117 break; \
118 } \
119 usleep_range(50, 100); \
120 } \
121 ret__; \
122})
123
124/*
125 * The Rockchip rk3288 iommu uses a 2-level page table.
126 * The first level is the "Directory Table" (DT).
127 * The DT consists of 1024 4-byte Directory Table Entries (DTEs), each pointing
128 * to a "Page Table".
129 * The second level is the 1024 Page Tables (PT).
130 * Each PT consists of 1024 4-byte Page Table Entries (PTEs), each pointing to
131 * a 4 KB page of physical memory.
132 *
133 * The DT and each PT fits in a single 4 KB page (4-bytes * 1024 entries).
134 * Each iommu device has a MMU_DTE_ADDR register that contains the physical
135 * address of the start of the DT page.
136 *
137 * The structure of the page table is as follows:
138 *
139 * DT
140 * MMU_DTE_ADDR -> +-----+
141 * | |
142 * +-----+ PT
143 * | DTE | -> +-----+
144 * +-----+ | | Memory
145 * | | +-----+ Page
146 * | | | PTE | -> +-----+
147 * +-----+ +-----+ | |
148 * | | | |
149 * | | | |
150 * +-----+ | |
151 * | |
152 * | |
153 * +-----+
154 */
155
156/*
157 * Each DTE has a PT address and a valid bit:
158 * +---------------------+-----------+-+
159 * | PT address | Reserved |V|
160 * +---------------------+-----------+-+
161 * 31:12 - PT address (PTs always starts on a 4 KB boundary)
162 * 11: 1 - Reserved
163 * 0 - 1 if PT @ PT address is valid
164 */
165#define RK_DTE_PT_ADDRESS_MASK 0xfffff000
166#define RK_DTE_PT_VALID BIT(0)
167
168static inline phys_addr_t rk_dte_pt_address(u32 dte)
169{
170 return (phys_addr_t)dte & RK_DTE_PT_ADDRESS_MASK;
171}
172
173static inline bool rk_dte_is_pt_valid(u32 dte)
174{
175 return dte & RK_DTE_PT_VALID;
176}
177
178static u32 rk_mk_dte(u32 *pt)
179{
180 phys_addr_t pt_phys = virt_to_phys(pt);
181 return (pt_phys & RK_DTE_PT_ADDRESS_MASK) | RK_DTE_PT_VALID;
182}
183
184/*
185 * Each PTE has a Page address, some flags and a valid bit:
186 * +---------------------+---+-------+-+
187 * | Page address |Rsv| Flags |V|
188 * +---------------------+---+-------+-+
189 * 31:12 - Page address (Pages always start on a 4 KB boundary)
190 * 11: 9 - Reserved
191 * 8: 1 - Flags
192 * 8 - Read allocate - allocate cache space on read misses
193 * 7 - Read cache - enable cache & prefetch of data
194 * 6 - Write buffer - enable delaying writes on their way to memory
195 * 5 - Write allocate - allocate cache space on write misses
196 * 4 - Write cache - different writes can be merged together
197 * 3 - Override cache attributes
198 * if 1, bits 4-8 control cache attributes
199 * if 0, the system bus defaults are used
200 * 2 - Writable
201 * 1 - Readable
202 * 0 - 1 if Page @ Page address is valid
203 */
204#define RK_PTE_PAGE_ADDRESS_MASK 0xfffff000
205#define RK_PTE_PAGE_FLAGS_MASK 0x000001fe
206#define RK_PTE_PAGE_WRITABLE BIT(2)
207#define RK_PTE_PAGE_READABLE BIT(1)
208#define RK_PTE_PAGE_VALID BIT(0)
209
210static inline phys_addr_t rk_pte_page_address(u32 pte)
211{
212 return (phys_addr_t)pte & RK_PTE_PAGE_ADDRESS_MASK;
213}
214
215static inline bool rk_pte_is_page_valid(u32 pte)
216{
217 return pte & RK_PTE_PAGE_VALID;
218}
219
220/* TODO: set cache flags per prot IOMMU_CACHE */
221static u32 rk_mk_pte(phys_addr_t page, int prot)
222{
223 u32 flags = 0;
224 flags |= (prot & IOMMU_READ) ? RK_PTE_PAGE_READABLE : 0;
225 flags |= (prot & IOMMU_WRITE) ? RK_PTE_PAGE_WRITABLE : 0;
226 page &= RK_PTE_PAGE_ADDRESS_MASK;
227 return page | flags | RK_PTE_PAGE_VALID;
228}
229
230static u32 rk_mk_pte_invalid(u32 pte)
231{
232 return pte & ~RK_PTE_PAGE_VALID;
233}
234
235/*
236 * rk3288 iova (IOMMU Virtual Address) format
237 * 31 22.21 12.11 0
238 * +-----------+-----------+-------------+
239 * | DTE index | PTE index | Page offset |
240 * +-----------+-----------+-------------+
241 * 31:22 - DTE index - index of DTE in DT
242 * 21:12 - PTE index - index of PTE in PT @ DTE.pt_address
243 * 11: 0 - Page offset - offset into page @ PTE.page_address
244 */
245#define RK_IOVA_DTE_MASK 0xffc00000
246#define RK_IOVA_DTE_SHIFT 22
247#define RK_IOVA_PTE_MASK 0x003ff000
248#define RK_IOVA_PTE_SHIFT 12
249#define RK_IOVA_PAGE_MASK 0x00000fff
250#define RK_IOVA_PAGE_SHIFT 0
251
252static u32 rk_iova_dte_index(dma_addr_t iova)
253{
254 return (u32)(iova & RK_IOVA_DTE_MASK) >> RK_IOVA_DTE_SHIFT;
255}
256
257static u32 rk_iova_pte_index(dma_addr_t iova)
258{
259 return (u32)(iova & RK_IOVA_PTE_MASK) >> RK_IOVA_PTE_SHIFT;
260}
261
262static u32 rk_iova_page_offset(dma_addr_t iova)
263{
264 return (u32)(iova & RK_IOVA_PAGE_MASK) >> RK_IOVA_PAGE_SHIFT;
265}
266
267static u32 rk_iommu_read(struct rk_iommu *iommu, u32 offset)
268{
269 return readl(iommu->base + offset);
270}
271
272static void rk_iommu_write(struct rk_iommu *iommu, u32 offset, u32 value)
273{
274 writel(value, iommu->base + offset);
275}
276
277static void rk_iommu_command(struct rk_iommu *iommu, u32 command)
278{
279 writel(command, iommu->base + RK_MMU_COMMAND);
280}
281
282static void rk_iommu_zap_lines(struct rk_iommu *iommu, dma_addr_t iova,
283 size_t size)
284{
285 dma_addr_t iova_end = iova + size;
286 /*
287 * TODO(djkurtz): Figure out when it is more efficient to shootdown the
288 * entire iotlb rather than iterate over individual iovas.
289 */
290 for (; iova < iova_end; iova += SPAGE_SIZE)
291 rk_iommu_write(iommu, RK_MMU_ZAP_ONE_LINE, iova);
292}
293
294static bool rk_iommu_is_stall_active(struct rk_iommu *iommu)
295{
296 return rk_iommu_read(iommu, RK_MMU_STATUS) & RK_MMU_STATUS_STALL_ACTIVE;
297}
298
299static bool rk_iommu_is_paging_enabled(struct rk_iommu *iommu)
300{
301 return rk_iommu_read(iommu, RK_MMU_STATUS) &
302 RK_MMU_STATUS_PAGING_ENABLED;
303}
304
305static int rk_iommu_enable_stall(struct rk_iommu *iommu)
306{
307 int ret;
308
309 if (rk_iommu_is_stall_active(iommu))
310 return 0;
311
312 /* Stall can only be enabled if paging is enabled */
313 if (!rk_iommu_is_paging_enabled(iommu))
314 return 0;
315
316 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_STALL);
317
318 ret = rk_wait_for(rk_iommu_is_stall_active(iommu), 1);
319 if (ret)
320 dev_err(iommu->dev, "Enable stall request timed out, status: %#08x\n",
321 rk_iommu_read(iommu, RK_MMU_STATUS));
322
323 return ret;
324}
325
326static int rk_iommu_disable_stall(struct rk_iommu *iommu)
327{
328 int ret;
329
330 if (!rk_iommu_is_stall_active(iommu))
331 return 0;
332
333 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_STALL);
334
335 ret = rk_wait_for(!rk_iommu_is_stall_active(iommu), 1);
336 if (ret)
337 dev_err(iommu->dev, "Disable stall request timed out, status: %#08x\n",
338 rk_iommu_read(iommu, RK_MMU_STATUS));
339
340 return ret;
341}
342
343static int rk_iommu_enable_paging(struct rk_iommu *iommu)
344{
345 int ret;
346
347 if (rk_iommu_is_paging_enabled(iommu))
348 return 0;
349
350 rk_iommu_command(iommu, RK_MMU_CMD_ENABLE_PAGING);
351
352 ret = rk_wait_for(rk_iommu_is_paging_enabled(iommu), 1);
353 if (ret)
354 dev_err(iommu->dev, "Enable paging request timed out, status: %#08x\n",
355 rk_iommu_read(iommu, RK_MMU_STATUS));
356
357 return ret;
358}
359
360static int rk_iommu_disable_paging(struct rk_iommu *iommu)
361{
362 int ret;
363
364 if (!rk_iommu_is_paging_enabled(iommu))
365 return 0;
366
367 rk_iommu_command(iommu, RK_MMU_CMD_DISABLE_PAGING);
368
369 ret = rk_wait_for(!rk_iommu_is_paging_enabled(iommu), 1);
370 if (ret)
371 dev_err(iommu->dev, "Disable paging request timed out, status: %#08x\n",
372 rk_iommu_read(iommu, RK_MMU_STATUS));
373
374 return ret;
375}
376
377static int rk_iommu_force_reset(struct rk_iommu *iommu)
378{
379 int ret;
380 u32 dte_addr;
381
382 /*
383 * Check if register DTE_ADDR is working by writing DTE_ADDR_DUMMY
384 * and verifying that upper 5 nybbles are read back.
385 */
386 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, DTE_ADDR_DUMMY);
387
388 dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
389 if (dte_addr != (DTE_ADDR_DUMMY & RK_DTE_PT_ADDRESS_MASK)) {
390 dev_err(iommu->dev, "Error during raw reset. MMU_DTE_ADDR is not functioning\n");
391 return -EFAULT;
392 }
393
394 rk_iommu_command(iommu, RK_MMU_CMD_FORCE_RESET);
395
396 ret = rk_wait_for(rk_iommu_read(iommu, RK_MMU_DTE_ADDR) == 0x00000000,
397 FORCE_RESET_TIMEOUT);
398 if (ret)
399 dev_err(iommu->dev, "FORCE_RESET command timed out\n");
400
401 return ret;
402}
403
404static void log_iova(struct rk_iommu *iommu, dma_addr_t iova)
405{
406 u32 dte_index, pte_index, page_offset;
407 u32 mmu_dte_addr;
408 phys_addr_t mmu_dte_addr_phys, dte_addr_phys;
409 u32 *dte_addr;
410 u32 dte;
411 phys_addr_t pte_addr_phys = 0;
412 u32 *pte_addr = NULL;
413 u32 pte = 0;
414 phys_addr_t page_addr_phys = 0;
415 u32 page_flags = 0;
416
417 dte_index = rk_iova_dte_index(iova);
418 pte_index = rk_iova_pte_index(iova);
419 page_offset = rk_iova_page_offset(iova);
420
421 mmu_dte_addr = rk_iommu_read(iommu, RK_MMU_DTE_ADDR);
422 mmu_dte_addr_phys = (phys_addr_t)mmu_dte_addr;
423
424 dte_addr_phys = mmu_dte_addr_phys + (4 * dte_index);
425 dte_addr = phys_to_virt(dte_addr_phys);
426 dte = *dte_addr;
427
428 if (!rk_dte_is_pt_valid(dte))
429 goto print_it;
430
431 pte_addr_phys = rk_dte_pt_address(dte) + (pte_index * 4);
432 pte_addr = phys_to_virt(pte_addr_phys);
433 pte = *pte_addr;
434
435 if (!rk_pte_is_page_valid(pte))
436 goto print_it;
437
438 page_addr_phys = rk_pte_page_address(pte) + page_offset;
439 page_flags = pte & RK_PTE_PAGE_FLAGS_MASK;
440
441print_it:
442 dev_err(iommu->dev, "iova = %pad: dte_index: %#03x pte_index: %#03x page_offset: %#03x\n",
443 &iova, dte_index, pte_index, page_offset);
444 dev_err(iommu->dev, "mmu_dte_addr: %pa dte@%pa: %#08x valid: %u pte@%pa: %#08x valid: %u page@%pa flags: %#03x\n",
445 &mmu_dte_addr_phys, &dte_addr_phys, dte,
446 rk_dte_is_pt_valid(dte), &pte_addr_phys, pte,
447 rk_pte_is_page_valid(pte), &page_addr_phys, page_flags);
448}
449
450static irqreturn_t rk_iommu_irq(int irq, void *dev_id)
451{
452 struct rk_iommu *iommu = dev_id;
453 u32 status;
454 u32 int_status;
455 dma_addr_t iova;
456
457 int_status = rk_iommu_read(iommu, RK_MMU_INT_STATUS);
458 if (int_status == 0)
459 return IRQ_NONE;
460
461 iova = rk_iommu_read(iommu, RK_MMU_PAGE_FAULT_ADDR);
462
463 if (int_status & RK_MMU_IRQ_PAGE_FAULT) {
464 int flags;
465
466 status = rk_iommu_read(iommu, RK_MMU_STATUS);
467 flags = (status & RK_MMU_STATUS_PAGE_FAULT_IS_WRITE) ?
468 IOMMU_FAULT_WRITE : IOMMU_FAULT_READ;
469
470 dev_err(iommu->dev, "Page fault at %pad of type %s\n",
471 &iova,
472 (flags == IOMMU_FAULT_WRITE) ? "write" : "read");
473
474 log_iova(iommu, iova);
475
476 /*
477 * Report page fault to any installed handlers.
478 * Ignore the return code, though, since we always zap cache
479 * and clear the page fault anyway.
480 */
481 if (iommu->domain)
482 report_iommu_fault(iommu->domain, iommu->dev, iova,
483 flags);
484 else
485 dev_err(iommu->dev, "Page fault while iommu not attached to domain?\n");
486
487 rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
488 rk_iommu_command(iommu, RK_MMU_CMD_PAGE_FAULT_DONE);
489 }
490
491 if (int_status & RK_MMU_IRQ_BUS_ERROR)
492 dev_err(iommu->dev, "BUS_ERROR occurred at %pad\n", &iova);
493
494 if (int_status & ~RK_MMU_IRQ_MASK)
495 dev_err(iommu->dev, "unexpected int_status: %#08x\n",
496 int_status);
497
498 rk_iommu_write(iommu, RK_MMU_INT_CLEAR, int_status);
499
500 return IRQ_HANDLED;
501}
502
503static phys_addr_t rk_iommu_iova_to_phys(struct iommu_domain *domain,
504 dma_addr_t iova)
505{
506 struct rk_iommu_domain *rk_domain = domain->priv;
507 unsigned long flags;
508 phys_addr_t pt_phys, phys = 0;
509 u32 dte, pte;
510 u32 *page_table;
511
512 spin_lock_irqsave(&rk_domain->dt_lock, flags);
513
514 dte = rk_domain->dt[rk_iova_dte_index(iova)];
515 if (!rk_dte_is_pt_valid(dte))
516 goto out;
517
518 pt_phys = rk_dte_pt_address(dte);
519 page_table = (u32 *)phys_to_virt(pt_phys);
520 pte = page_table[rk_iova_pte_index(iova)];
521 if (!rk_pte_is_page_valid(pte))
522 goto out;
523
524 phys = rk_pte_page_address(pte) + rk_iova_page_offset(iova);
525out:
526 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
527
528 return phys;
529}
530
531static void rk_iommu_zap_iova(struct rk_iommu_domain *rk_domain,
532 dma_addr_t iova, size_t size)
533{
534 struct list_head *pos;
535 unsigned long flags;
536
537 /* shootdown these iova from all iommus using this domain */
538 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
539 list_for_each(pos, &rk_domain->iommus) {
540 struct rk_iommu *iommu;
541 iommu = list_entry(pos, struct rk_iommu, node);
542 rk_iommu_zap_lines(iommu, iova, size);
543 }
544 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
545}
546
547static u32 *rk_dte_get_page_table(struct rk_iommu_domain *rk_domain,
548 dma_addr_t iova)
549{
550 u32 *page_table, *dte_addr;
551 u32 dte;
552 phys_addr_t pt_phys;
553
554 assert_spin_locked(&rk_domain->dt_lock);
555
556 dte_addr = &rk_domain->dt[rk_iova_dte_index(iova)];
557 dte = *dte_addr;
558 if (rk_dte_is_pt_valid(dte))
559 goto done;
560
561 page_table = (u32 *)get_zeroed_page(GFP_ATOMIC | GFP_DMA32);
562 if (!page_table)
563 return ERR_PTR(-ENOMEM);
564
565 dte = rk_mk_dte(page_table);
566 *dte_addr = dte;
567
568 rk_table_flush(page_table, NUM_PT_ENTRIES);
569 rk_table_flush(dte_addr, 1);
570
571 /*
572 * Zap the first iova of newly allocated page table so iommu evicts
573 * old cached value of new dte from the iotlb.
574 */
575 rk_iommu_zap_iova(rk_domain, iova, SPAGE_SIZE);
576
577done:
578 pt_phys = rk_dte_pt_address(dte);
579 return (u32 *)phys_to_virt(pt_phys);
580}
581
582static size_t rk_iommu_unmap_iova(struct rk_iommu_domain *rk_domain,
583 u32 *pte_addr, dma_addr_t iova, size_t size)
584{
585 unsigned int pte_count;
586 unsigned int pte_total = size / SPAGE_SIZE;
587
588 assert_spin_locked(&rk_domain->dt_lock);
589
590 for (pte_count = 0; pte_count < pte_total; pte_count++) {
591 u32 pte = pte_addr[pte_count];
592 if (!rk_pte_is_page_valid(pte))
593 break;
594
595 pte_addr[pte_count] = rk_mk_pte_invalid(pte);
596 }
597
598 rk_table_flush(pte_addr, pte_count);
599
600 return pte_count * SPAGE_SIZE;
601}
602
603static int rk_iommu_map_iova(struct rk_iommu_domain *rk_domain, u32 *pte_addr,
604 dma_addr_t iova, phys_addr_t paddr, size_t size,
605 int prot)
606{
607 unsigned int pte_count;
608 unsigned int pte_total = size / SPAGE_SIZE;
609 phys_addr_t page_phys;
610
611 assert_spin_locked(&rk_domain->dt_lock);
612
613 for (pte_count = 0; pte_count < pte_total; pte_count++) {
614 u32 pte = pte_addr[pte_count];
615
616 if (rk_pte_is_page_valid(pte))
617 goto unwind;
618
619 pte_addr[pte_count] = rk_mk_pte(paddr, prot);
620
621 paddr += SPAGE_SIZE;
622 }
623
624 rk_table_flush(pte_addr, pte_count);
625
626 return 0;
627unwind:
628 /* Unmap the range of iovas that we just mapped */
629 rk_iommu_unmap_iova(rk_domain, pte_addr, iova, pte_count * SPAGE_SIZE);
630
631 iova += pte_count * SPAGE_SIZE;
632 page_phys = rk_pte_page_address(pte_addr[pte_count]);
633 pr_err("iova: %pad already mapped to %pa cannot remap to phys: %pa prot: %#x\n",
634 &iova, &page_phys, &paddr, prot);
635
636 return -EADDRINUSE;
637}
638
639static int rk_iommu_map(struct iommu_domain *domain, unsigned long _iova,
640 phys_addr_t paddr, size_t size, int prot)
641{
642 struct rk_iommu_domain *rk_domain = domain->priv;
643 unsigned long flags;
644 dma_addr_t iova = (dma_addr_t)_iova;
645 u32 *page_table, *pte_addr;
646 int ret;
647
648 spin_lock_irqsave(&rk_domain->dt_lock, flags);
649
650 /*
651 * pgsize_bitmap specifies iova sizes that fit in one page table
652 * (1024 4-KiB pages = 4 MiB).
653 * So, size will always be 4096 <= size <= 4194304.
654 * Since iommu_map() guarantees that both iova and size will be
655 * aligned, we will always only be mapping from a single dte here.
656 */
657 page_table = rk_dte_get_page_table(rk_domain, iova);
658 if (IS_ERR(page_table)) {
659 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
660 return PTR_ERR(page_table);
661 }
662
663 pte_addr = &page_table[rk_iova_pte_index(iova)];
664 ret = rk_iommu_map_iova(rk_domain, pte_addr, iova, paddr, size, prot);
665 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
666
667 return ret;
668}
669
670static size_t rk_iommu_unmap(struct iommu_domain *domain, unsigned long _iova,
671 size_t size)
672{
673 struct rk_iommu_domain *rk_domain = domain->priv;
674 unsigned long flags;
675 dma_addr_t iova = (dma_addr_t)_iova;
676 phys_addr_t pt_phys;
677 u32 dte;
678 u32 *pte_addr;
679 size_t unmap_size;
680
681 spin_lock_irqsave(&rk_domain->dt_lock, flags);
682
683 /*
684 * pgsize_bitmap specifies iova sizes that fit in one page table
685 * (1024 4-KiB pages = 4 MiB).
686 * So, size will always be 4096 <= size <= 4194304.
687 * Since iommu_unmap() guarantees that both iova and size will be
688 * aligned, we will always only be unmapping from a single dte here.
689 */
690 dte = rk_domain->dt[rk_iova_dte_index(iova)];
691 /* Just return 0 if iova is unmapped */
692 if (!rk_dte_is_pt_valid(dte)) {
693 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
694 return 0;
695 }
696
697 pt_phys = rk_dte_pt_address(dte);
698 pte_addr = (u32 *)phys_to_virt(pt_phys) + rk_iova_pte_index(iova);
699 unmap_size = rk_iommu_unmap_iova(rk_domain, pte_addr, iova, size);
700
701 spin_unlock_irqrestore(&rk_domain->dt_lock, flags);
702
703 /* Shootdown iotlb entries for iova range that was just unmapped */
704 rk_iommu_zap_iova(rk_domain, iova, unmap_size);
705
706 return unmap_size;
707}
708
709static struct rk_iommu *rk_iommu_from_dev(struct device *dev)
710{
711 struct iommu_group *group;
712 struct device *iommu_dev;
713 struct rk_iommu *rk_iommu;
714
715 group = iommu_group_get(dev);
716 if (!group)
717 return NULL;
718 iommu_dev = iommu_group_get_iommudata(group);
719 rk_iommu = dev_get_drvdata(iommu_dev);
720 iommu_group_put(group);
721
722 return rk_iommu;
723}
724
725static int rk_iommu_attach_device(struct iommu_domain *domain,
726 struct device *dev)
727{
728 struct rk_iommu *iommu;
729 struct rk_iommu_domain *rk_domain = domain->priv;
730 unsigned long flags;
731 int ret;
732 phys_addr_t dte_addr;
733
734 /*
735 * Allow 'virtual devices' (e.g., drm) to attach to domain.
736 * Such a device does not belong to an iommu group.
737 */
738 iommu = rk_iommu_from_dev(dev);
739 if (!iommu)
740 return 0;
741
742 ret = rk_iommu_enable_stall(iommu);
743 if (ret)
744 return ret;
745
746 ret = rk_iommu_force_reset(iommu);
747 if (ret)
748 return ret;
749
750 iommu->domain = domain;
751
752 ret = devm_request_irq(dev, iommu->irq, rk_iommu_irq,
753 IRQF_SHARED, dev_name(dev), iommu);
754 if (ret)
755 return ret;
756
757 dte_addr = virt_to_phys(rk_domain->dt);
758 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, dte_addr);
759 rk_iommu_command(iommu, RK_MMU_CMD_ZAP_CACHE);
760 rk_iommu_write(iommu, RK_MMU_INT_MASK, RK_MMU_IRQ_MASK);
761
762 ret = rk_iommu_enable_paging(iommu);
763 if (ret)
764 return ret;
765
766 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
767 list_add_tail(&iommu->node, &rk_domain->iommus);
768 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
769
770 dev_info(dev, "Attached to iommu domain\n");
771
772 rk_iommu_disable_stall(iommu);
773
774 return 0;
775}
776
777static void rk_iommu_detach_device(struct iommu_domain *domain,
778 struct device *dev)
779{
780 struct rk_iommu *iommu;
781 struct rk_iommu_domain *rk_domain = domain->priv;
782 unsigned long flags;
783
784 /* Allow 'virtual devices' (eg drm) to detach from domain */
785 iommu = rk_iommu_from_dev(dev);
786 if (!iommu)
787 return;
788
789 spin_lock_irqsave(&rk_domain->iommus_lock, flags);
790 list_del_init(&iommu->node);
791 spin_unlock_irqrestore(&rk_domain->iommus_lock, flags);
792
793 /* Ignore error while disabling, just keep going */
794 rk_iommu_enable_stall(iommu);
795 rk_iommu_disable_paging(iommu);
796 rk_iommu_write(iommu, RK_MMU_INT_MASK, 0);
797 rk_iommu_write(iommu, RK_MMU_DTE_ADDR, 0);
798 rk_iommu_disable_stall(iommu);
799
800 devm_free_irq(dev, iommu->irq, iommu);
801
802 iommu->domain = NULL;
803
804 dev_info(dev, "Detached from iommu domain\n");
805}
806
807static int rk_iommu_domain_init(struct iommu_domain *domain)
808{
809 struct rk_iommu_domain *rk_domain;
810
811 rk_domain = kzalloc(sizeof(*rk_domain), GFP_KERNEL);
812 if (!rk_domain)
813 return -ENOMEM;
814
815 /*
816 * rk32xx iommus use a 2 level pagetable.
817 * Each level1 (dt) and level2 (pt) table has 1024 4-byte entries.
818 * Allocate one 4 KiB page for each table.
819 */
820 rk_domain->dt = (u32 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
821 if (!rk_domain->dt)
822 goto err_dt;
823
824 rk_table_flush(rk_domain->dt, NUM_DT_ENTRIES);
825
826 spin_lock_init(&rk_domain->iommus_lock);
827 spin_lock_init(&rk_domain->dt_lock);
828 INIT_LIST_HEAD(&rk_domain->iommus);
829
830 domain->priv = rk_domain;
831
832 return 0;
833err_dt:
834 kfree(rk_domain);
835 return -ENOMEM;
836}
837
838static void rk_iommu_domain_destroy(struct iommu_domain *domain)
839{
840 struct rk_iommu_domain *rk_domain = domain->priv;
841 int i;
842
843 WARN_ON(!list_empty(&rk_domain->iommus));
844
845 for (i = 0; i < NUM_DT_ENTRIES; i++) {
846 u32 dte = rk_domain->dt[i];
847 if (rk_dte_is_pt_valid(dte)) {
848 phys_addr_t pt_phys = rk_dte_pt_address(dte);
849 u32 *page_table = phys_to_virt(pt_phys);
850 free_page((unsigned long)page_table);
851 }
852 }
853
854 free_page((unsigned long)rk_domain->dt);
855 kfree(domain->priv);
856 domain->priv = NULL;
857}
858
859static bool rk_iommu_is_dev_iommu_master(struct device *dev)
860{
861 struct device_node *np = dev->of_node;
862 int ret;
863
864 /*
865 * An iommu master has an iommus property containing a list of phandles
866 * to iommu nodes, each with an #iommu-cells property with value 0.
867 */
868 ret = of_count_phandle_with_args(np, "iommus", "#iommu-cells");
869 return (ret > 0);
870}
871
872static int rk_iommu_group_set_iommudata(struct iommu_group *group,
873 struct device *dev)
874{
875 struct device_node *np = dev->of_node;
876 struct platform_device *pd;
877 int ret;
878 struct of_phandle_args args;
879
880 /*
881 * An iommu master has an iommus property containing a list of phandles
882 * to iommu nodes, each with an #iommu-cells property with value 0.
883 */
884 ret = of_parse_phandle_with_args(np, "iommus", "#iommu-cells", 0,
885 &args);
886 if (ret) {
887 dev_err(dev, "of_parse_phandle_with_args(%s) => %d\n",
888 np->full_name, ret);
889 return ret;
890 }
891 if (args.args_count != 0) {
892 dev_err(dev, "incorrect number of iommu params found for %s (found %d, expected 0)\n",
893 args.np->full_name, args.args_count);
894 return -EINVAL;
895 }
896
897 pd = of_find_device_by_node(args.np);
898 of_node_put(args.np);
899 if (!pd) {
900 dev_err(dev, "iommu %s not found\n", args.np->full_name);
901 return -EPROBE_DEFER;
902 }
903
904 /* TODO(djkurtz): handle multiple slave iommus for a single master */
905 iommu_group_set_iommudata(group, &pd->dev, NULL);
906
907 return 0;
908}
909
910static int rk_iommu_add_device(struct device *dev)
911{
912 struct iommu_group *group;
913 int ret;
914
915 if (!rk_iommu_is_dev_iommu_master(dev))
916 return -ENODEV;
917
918 group = iommu_group_get(dev);
919 if (!group) {
920 group = iommu_group_alloc();
921 if (IS_ERR(group)) {
922 dev_err(dev, "Failed to allocate IOMMU group\n");
923 return PTR_ERR(group);
924 }
925 }
926
927 ret = iommu_group_add_device(group, dev);
928 if (ret)
929 goto err_put_group;
930
931 ret = rk_iommu_group_set_iommudata(group, dev);
932 if (ret)
933 goto err_remove_device;
934
935 iommu_group_put(group);
936
937 return 0;
938
939err_remove_device:
940 iommu_group_remove_device(dev);
941err_put_group:
942 iommu_group_put(group);
943 return ret;
944}
945
946static void rk_iommu_remove_device(struct device *dev)
947{
948 if (!rk_iommu_is_dev_iommu_master(dev))
949 return;
950
951 iommu_group_remove_device(dev);
952}
953
954static const struct iommu_ops rk_iommu_ops = {
955 .domain_init = rk_iommu_domain_init,
956 .domain_destroy = rk_iommu_domain_destroy,
957 .attach_dev = rk_iommu_attach_device,
958 .detach_dev = rk_iommu_detach_device,
959 .map = rk_iommu_map,
960 .unmap = rk_iommu_unmap,
961 .add_device = rk_iommu_add_device,
962 .remove_device = rk_iommu_remove_device,
963 .iova_to_phys = rk_iommu_iova_to_phys,
964 .pgsize_bitmap = RK_IOMMU_PGSIZE_BITMAP,
965};
966
967static int rk_iommu_probe(struct platform_device *pdev)
968{
969 struct device *dev = &pdev->dev;
970 struct rk_iommu *iommu;
971 struct resource *res;
972
973 iommu = devm_kzalloc(dev, sizeof(*iommu), GFP_KERNEL);
974 if (!iommu)
975 return -ENOMEM;
976
977 platform_set_drvdata(pdev, iommu);
978 iommu->dev = dev;
979
980 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
981 iommu->base = devm_ioremap_resource(&pdev->dev, res);
982 if (IS_ERR(iommu->base))
983 return PTR_ERR(iommu->base);
984
985 iommu->irq = platform_get_irq(pdev, 0);
986 if (iommu->irq < 0) {
987 dev_err(dev, "Failed to get IRQ, %d\n", iommu->irq);
988 return -ENXIO;
989 }
990
991 return 0;
992}
993
994static int rk_iommu_remove(struct platform_device *pdev)
995{
996 return 0;
997}
998
999#ifdef CONFIG_OF
1000static const struct of_device_id rk_iommu_dt_ids[] = {
1001 { .compatible = "rockchip,iommu" },
1002 { /* sentinel */ }
1003};
1004MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
1005#endif
1006
1007static struct platform_driver rk_iommu_driver = {
1008 .probe = rk_iommu_probe,
1009 .remove = rk_iommu_remove,
1010 .driver = {
1011 .name = "rk_iommu",
1012 .owner = THIS_MODULE,
1013 .of_match_table = of_match_ptr(rk_iommu_dt_ids),
1014 },
1015};
1016
1017static int __init rk_iommu_init(void)
1018{
1019 int ret;
1020
1021 ret = bus_set_iommu(&platform_bus_type, &rk_iommu_ops);
1022 if (ret)
1023 return ret;
1024
1025 return platform_driver_register(&rk_iommu_driver);
1026}
1027static void __exit rk_iommu_exit(void)
1028{
1029 platform_driver_unregister(&rk_iommu_driver);
1030}
1031
1032subsys_initcall(rk_iommu_init);
1033module_exit(rk_iommu_exit);
1034
1035MODULE_DESCRIPTION("IOMMU API for Rockchip");
1036MODULE_AUTHOR("Simon Xue <xxm@rock-chips.com> and Daniel Kurtz <djkurtz@chromium.org>");
1037MODULE_ALIAS("platform:rockchip-iommu");
1038MODULE_LICENSE("GPL v2");
diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig
index 9efe5f10f97b..e12cb23d786c 100644
--- a/drivers/irqchip/Kconfig
+++ b/drivers/irqchip/Kconfig
@@ -130,3 +130,7 @@ config KEYSTONE_IRQ
130 help 130 help
131 Support for Texas Instruments Keystone 2 IRQ controller IP which 131 Support for Texas Instruments Keystone 2 IRQ controller IP which
132 is part of the Keystone 2 IPC mechanism 132 is part of the Keystone 2 IPC mechanism
133
134config MIPS_GIC
135 bool
136 select MIPS_CM
diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile
index f0909d05eae3..4954a314c31e 100644
--- a/drivers/irqchip/Makefile
+++ b/drivers/irqchip/Makefile
@@ -38,3 +38,4 @@ obj-$(CONFIG_IRQ_CROSSBAR) += irq-crossbar.o
38obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o 38obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o
39obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o 39obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o
40obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o 40obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o
41obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
new file mode 100644
index 000000000000..2b0468e3df6a
--- /dev/null
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -0,0 +1,789 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
8 */
9#include <linux/bitmap.h>
10#include <linux/clocksource.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/irqchip/mips-gic.h>
15#include <linux/of_address.h>
16#include <linux/sched.h>
17#include <linux/smp.h>
18
19#include <asm/mips-cm.h>
20#include <asm/setup.h>
21#include <asm/traps.h>
22
23#include <dt-bindings/interrupt-controller/mips-gic.h>
24
25#include "irqchip.h"
26
27unsigned int gic_present;
28
29struct gic_pcpu_mask {
30 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31};
32
33static void __iomem *gic_base;
34static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35static DEFINE_SPINLOCK(gic_lock);
36static struct irq_domain *gic_irq_domain;
37static int gic_shared_intrs;
38static int gic_vpes;
39static unsigned int gic_cpu_pin;
40static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
41
42static void __gic_irq_dispatch(void);
43
44static inline unsigned int gic_read(unsigned int reg)
45{
46 return __raw_readl(gic_base + reg);
47}
48
49static inline void gic_write(unsigned int reg, unsigned int val)
50{
51 __raw_writel(val, gic_base + reg);
52}
53
54static inline void gic_update_bits(unsigned int reg, unsigned int mask,
55 unsigned int val)
56{
57 unsigned int regval;
58
59 regval = gic_read(reg);
60 regval &= ~mask;
61 regval |= val;
62 gic_write(reg, regval);
63}
64
65static inline void gic_reset_mask(unsigned int intr)
66{
67 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
68 1 << GIC_INTR_BIT(intr));
69}
70
71static inline void gic_set_mask(unsigned int intr)
72{
73 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
74 1 << GIC_INTR_BIT(intr));
75}
76
77static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
78{
79 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
80 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
81 pol << GIC_INTR_BIT(intr));
82}
83
84static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
85{
86 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
87 GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
88 trig << GIC_INTR_BIT(intr));
89}
90
91static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
92{
93 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
94 1 << GIC_INTR_BIT(intr),
95 dual << GIC_INTR_BIT(intr));
96}
97
98static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
99{
100 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
101 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
102}
103
104static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
105{
106 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
107 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
108 GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
109}
110
111#ifdef CONFIG_CLKSRC_MIPS_GIC
112cycle_t gic_read_count(void)
113{
114 unsigned int hi, hi2, lo;
115
116 do {
117 hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
118 lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
119 hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
120 } while (hi2 != hi);
121
122 return (((cycle_t) hi) << 32) + lo;
123}
124
125unsigned int gic_get_count_width(void)
126{
127 unsigned int bits, config;
128
129 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
130 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
131 GIC_SH_CONFIG_COUNTBITS_SHF);
132
133 return bits;
134}
135
136void gic_write_compare(cycle_t cnt)
137{
138 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
139 (int)(cnt >> 32));
140 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
141 (int)(cnt & 0xffffffff));
142}
143
144void gic_write_cpu_compare(cycle_t cnt, int cpu)
145{
146 unsigned long flags;
147
148 local_irq_save(flags);
149
150 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
151 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
152 (int)(cnt >> 32));
153 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
154 (int)(cnt & 0xffffffff));
155
156 local_irq_restore(flags);
157}
158
159cycle_t gic_read_compare(void)
160{
161 unsigned int hi, lo;
162
163 hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
164 lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
165
166 return (((cycle_t) hi) << 32) + lo;
167}
168#endif
169
170static bool gic_local_irq_is_routable(int intr)
171{
172 u32 vpe_ctl;
173
174 /* All local interrupts are routable in EIC mode. */
175 if (cpu_has_veic)
176 return true;
177
178 vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
179 switch (intr) {
180 case GIC_LOCAL_INT_TIMER:
181 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
182 case GIC_LOCAL_INT_PERFCTR:
183 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
184 case GIC_LOCAL_INT_FDC:
185 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
186 case GIC_LOCAL_INT_SWINT0:
187 case GIC_LOCAL_INT_SWINT1:
188 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
189 default:
190 return true;
191 }
192}
193
194unsigned int gic_get_timer_pending(void)
195{
196 unsigned int vpe_pending;
197
198 vpe_pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
199 return vpe_pending & GIC_VPE_PEND_TIMER_MSK;
200}
201
202static void gic_bind_eic_interrupt(int irq, int set)
203{
204 /* Convert irq vector # to hw int # */
205 irq -= GIC_PIN_TO_VEC_OFFSET;
206
207 /* Set irq to use shadow set */
208 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
209 GIC_VPE_EIC_SS(irq), set);
210}
211
212void gic_send_ipi(unsigned int intr)
213{
214 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
215}
216
217int gic_get_c0_compare_int(void)
218{
219 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
220 return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
221 return irq_create_mapping(gic_irq_domain,
222 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
223}
224
225int gic_get_c0_perfcount_int(void)
226{
227 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
228 /* Is the erformance counter shared with the timer? */
229 if (cp0_perfcount_irq < 0)
230 return -1;
231 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
232 }
233 return irq_create_mapping(gic_irq_domain,
234 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
235}
236
237static unsigned int gic_get_int(void)
238{
239 unsigned int i;
240 unsigned long *pcpu_mask;
241 unsigned long pending_reg, intrmask_reg;
242 DECLARE_BITMAP(pending, GIC_MAX_INTRS);
243 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
244
245 /* Get per-cpu bitmaps */
246 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
247
248 pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
249 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
250
251 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
252 pending[i] = gic_read(pending_reg);
253 intrmask[i] = gic_read(intrmask_reg);
254 pending_reg += 0x4;
255 intrmask_reg += 0x4;
256 }
257
258 bitmap_and(pending, pending, intrmask, gic_shared_intrs);
259 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
260
261 return find_first_bit(pending, gic_shared_intrs);
262}
263
264static void gic_mask_irq(struct irq_data *d)
265{
266 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
267}
268
269static void gic_unmask_irq(struct irq_data *d)
270{
271 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
272}
273
274static void gic_ack_irq(struct irq_data *d)
275{
276 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
277
278 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
279}
280
281static int gic_set_type(struct irq_data *d, unsigned int type)
282{
283 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
284 unsigned long flags;
285 bool is_edge;
286
287 spin_lock_irqsave(&gic_lock, flags);
288 switch (type & IRQ_TYPE_SENSE_MASK) {
289 case IRQ_TYPE_EDGE_FALLING:
290 gic_set_polarity(irq, GIC_POL_NEG);
291 gic_set_trigger(irq, GIC_TRIG_EDGE);
292 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
293 is_edge = true;
294 break;
295 case IRQ_TYPE_EDGE_RISING:
296 gic_set_polarity(irq, GIC_POL_POS);
297 gic_set_trigger(irq, GIC_TRIG_EDGE);
298 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
299 is_edge = true;
300 break;
301 case IRQ_TYPE_EDGE_BOTH:
302 /* polarity is irrelevant in this case */
303 gic_set_trigger(irq, GIC_TRIG_EDGE);
304 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
305 is_edge = true;
306 break;
307 case IRQ_TYPE_LEVEL_LOW:
308 gic_set_polarity(irq, GIC_POL_NEG);
309 gic_set_trigger(irq, GIC_TRIG_LEVEL);
310 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
311 is_edge = false;
312 break;
313 case IRQ_TYPE_LEVEL_HIGH:
314 default:
315 gic_set_polarity(irq, GIC_POL_POS);
316 gic_set_trigger(irq, GIC_TRIG_LEVEL);
317 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
318 is_edge = false;
319 break;
320 }
321
322 if (is_edge) {
323 __irq_set_chip_handler_name_locked(d->irq,
324 &gic_edge_irq_controller,
325 handle_edge_irq, NULL);
326 } else {
327 __irq_set_chip_handler_name_locked(d->irq,
328 &gic_level_irq_controller,
329 handle_level_irq, NULL);
330 }
331 spin_unlock_irqrestore(&gic_lock, flags);
332
333 return 0;
334}
335
336#ifdef CONFIG_SMP
337static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
338 bool force)
339{
340 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
341 cpumask_t tmp = CPU_MASK_NONE;
342 unsigned long flags;
343 int i;
344
345 cpumask_and(&tmp, cpumask, cpu_online_mask);
346 if (cpus_empty(tmp))
347 return -EINVAL;
348
349 /* Assumption : cpumask refers to a single CPU */
350 spin_lock_irqsave(&gic_lock, flags);
351
352 /* Re-route this IRQ */
353 gic_map_to_vpe(irq, first_cpu(tmp));
354
355 /* Update the pcpu_masks */
356 for (i = 0; i < NR_CPUS; i++)
357 clear_bit(irq, pcpu_masks[i].pcpu_mask);
358 set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
359
360 cpumask_copy(d->affinity, cpumask);
361 spin_unlock_irqrestore(&gic_lock, flags);
362
363 return IRQ_SET_MASK_OK_NOCOPY;
364}
365#endif
366
367static struct irq_chip gic_level_irq_controller = {
368 .name = "MIPS GIC",
369 .irq_mask = gic_mask_irq,
370 .irq_unmask = gic_unmask_irq,
371 .irq_set_type = gic_set_type,
372#ifdef CONFIG_SMP
373 .irq_set_affinity = gic_set_affinity,
374#endif
375};
376
377static struct irq_chip gic_edge_irq_controller = {
378 .name = "MIPS GIC",
379 .irq_ack = gic_ack_irq,
380 .irq_mask = gic_mask_irq,
381 .irq_unmask = gic_unmask_irq,
382 .irq_set_type = gic_set_type,
383#ifdef CONFIG_SMP
384 .irq_set_affinity = gic_set_affinity,
385#endif
386};
387
388static unsigned int gic_get_local_int(void)
389{
390 unsigned long pending, masked;
391
392 pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
393 masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
394
395 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
396
397 return find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
398}
399
400static void gic_mask_local_irq(struct irq_data *d)
401{
402 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
403
404 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
405}
406
407static void gic_unmask_local_irq(struct irq_data *d)
408{
409 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
410
411 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
412}
413
414static struct irq_chip gic_local_irq_controller = {
415 .name = "MIPS GIC Local",
416 .irq_mask = gic_mask_local_irq,
417 .irq_unmask = gic_unmask_local_irq,
418};
419
420static void gic_mask_local_irq_all_vpes(struct irq_data *d)
421{
422 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
423 int i;
424 unsigned long flags;
425
426 spin_lock_irqsave(&gic_lock, flags);
427 for (i = 0; i < gic_vpes; i++) {
428 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
429 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
430 }
431 spin_unlock_irqrestore(&gic_lock, flags);
432}
433
434static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
435{
436 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
437 int i;
438 unsigned long flags;
439
440 spin_lock_irqsave(&gic_lock, flags);
441 for (i = 0; i < gic_vpes; i++) {
442 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
443 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
444 }
445 spin_unlock_irqrestore(&gic_lock, flags);
446}
447
448static struct irq_chip gic_all_vpes_local_irq_controller = {
449 .name = "MIPS GIC Local",
450 .irq_mask = gic_mask_local_irq_all_vpes,
451 .irq_unmask = gic_unmask_local_irq_all_vpes,
452};
453
454static void __gic_irq_dispatch(void)
455{
456 unsigned int intr, virq;
457
458 while ((intr = gic_get_local_int()) != GIC_NUM_LOCAL_INTRS) {
459 virq = irq_linear_revmap(gic_irq_domain,
460 GIC_LOCAL_TO_HWIRQ(intr));
461 do_IRQ(virq);
462 }
463
464 while ((intr = gic_get_int()) != gic_shared_intrs) {
465 virq = irq_linear_revmap(gic_irq_domain,
466 GIC_SHARED_TO_HWIRQ(intr));
467 do_IRQ(virq);
468 }
469}
470
471static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
472{
473 __gic_irq_dispatch();
474}
475
476#ifdef CONFIG_MIPS_GIC_IPI
477static int gic_resched_int_base;
478static int gic_call_int_base;
479
480unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
481{
482 return gic_resched_int_base + cpu;
483}
484
485unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
486{
487 return gic_call_int_base + cpu;
488}
489
490static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
491{
492 scheduler_ipi();
493
494 return IRQ_HANDLED;
495}
496
497static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
498{
499 smp_call_function_interrupt();
500
501 return IRQ_HANDLED;
502}
503
504static struct irqaction irq_resched = {
505 .handler = ipi_resched_interrupt,
506 .flags = IRQF_PERCPU,
507 .name = "IPI resched"
508};
509
510static struct irqaction irq_call = {
511 .handler = ipi_call_interrupt,
512 .flags = IRQF_PERCPU,
513 .name = "IPI call"
514};
515
516static __init void gic_ipi_init_one(unsigned int intr, int cpu,
517 struct irqaction *action)
518{
519 int virq = irq_create_mapping(gic_irq_domain,
520 GIC_SHARED_TO_HWIRQ(intr));
521 int i;
522
523 gic_map_to_vpe(intr, cpu);
524 for (i = 0; i < NR_CPUS; i++)
525 clear_bit(intr, pcpu_masks[i].pcpu_mask);
526 set_bit(intr, pcpu_masks[cpu].pcpu_mask);
527
528 irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
529
530 irq_set_handler(virq, handle_percpu_irq);
531 setup_irq(virq, action);
532}
533
534static __init void gic_ipi_init(void)
535{
536 int i;
537
538 /* Use last 2 * NR_CPUS interrupts as IPIs */
539 gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
540 gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
541
542 for (i = 0; i < nr_cpu_ids; i++) {
543 gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
544 gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
545 }
546}
547#else
548static inline void gic_ipi_init(void)
549{
550}
551#endif
552
553static void __init gic_basic_init(void)
554{
555 unsigned int i;
556
557 board_bind_eic_interrupt = &gic_bind_eic_interrupt;
558
559 /* Setup defaults */
560 for (i = 0; i < gic_shared_intrs; i++) {
561 gic_set_polarity(i, GIC_POL_POS);
562 gic_set_trigger(i, GIC_TRIG_LEVEL);
563 gic_reset_mask(i);
564 }
565
566 for (i = 0; i < gic_vpes; i++) {
567 unsigned int j;
568
569 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
570 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
571 if (!gic_local_irq_is_routable(j))
572 continue;
573 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
574 }
575 }
576}
577
578static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
579 irq_hw_number_t hw)
580{
581 int intr = GIC_HWIRQ_TO_LOCAL(hw);
582 int ret = 0;
583 int i;
584 unsigned long flags;
585
586 if (!gic_local_irq_is_routable(intr))
587 return -EPERM;
588
589 /*
590 * HACK: These are all really percpu interrupts, but the rest
591 * of the MIPS kernel code does not use the percpu IRQ API for
592 * the CP0 timer and performance counter interrupts.
593 */
594 if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
595 irq_set_chip_and_handler(virq,
596 &gic_local_irq_controller,
597 handle_percpu_devid_irq);
598 irq_set_percpu_devid(virq);
599 } else {
600 irq_set_chip_and_handler(virq,
601 &gic_all_vpes_local_irq_controller,
602 handle_percpu_irq);
603 }
604
605 spin_lock_irqsave(&gic_lock, flags);
606 for (i = 0; i < gic_vpes; i++) {
607 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
608
609 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
610
611 switch (intr) {
612 case GIC_LOCAL_INT_WD:
613 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
614 break;
615 case GIC_LOCAL_INT_COMPARE:
616 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
617 break;
618 case GIC_LOCAL_INT_TIMER:
619 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
620 break;
621 case GIC_LOCAL_INT_PERFCTR:
622 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
623 break;
624 case GIC_LOCAL_INT_SWINT0:
625 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
626 break;
627 case GIC_LOCAL_INT_SWINT1:
628 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
629 break;
630 case GIC_LOCAL_INT_FDC:
631 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
632 break;
633 default:
634 pr_err("Invalid local IRQ %d\n", intr);
635 ret = -EINVAL;
636 break;
637 }
638 }
639 spin_unlock_irqrestore(&gic_lock, flags);
640
641 return ret;
642}
643
644static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
645 irq_hw_number_t hw)
646{
647 int intr = GIC_HWIRQ_TO_SHARED(hw);
648 unsigned long flags;
649
650 irq_set_chip_and_handler(virq, &gic_level_irq_controller,
651 handle_level_irq);
652
653 spin_lock_irqsave(&gic_lock, flags);
654 gic_map_to_pin(intr, gic_cpu_pin);
655 /* Map to VPE 0 by default */
656 gic_map_to_vpe(intr, 0);
657 set_bit(intr, pcpu_masks[0].pcpu_mask);
658 spin_unlock_irqrestore(&gic_lock, flags);
659
660 return 0;
661}
662
663static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
664 irq_hw_number_t hw)
665{
666 if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
667 return gic_local_irq_domain_map(d, virq, hw);
668 return gic_shared_irq_domain_map(d, virq, hw);
669}
670
671static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
672 const u32 *intspec, unsigned int intsize,
673 irq_hw_number_t *out_hwirq,
674 unsigned int *out_type)
675{
676 if (intsize != 3)
677 return -EINVAL;
678
679 if (intspec[0] == GIC_SHARED)
680 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
681 else if (intspec[0] == GIC_LOCAL)
682 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
683 else
684 return -EINVAL;
685 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
686
687 return 0;
688}
689
690static struct irq_domain_ops gic_irq_domain_ops = {
691 .map = gic_irq_domain_map,
692 .xlate = gic_irq_domain_xlate,
693};
694
695static void __init __gic_init(unsigned long gic_base_addr,
696 unsigned long gic_addrspace_size,
697 unsigned int cpu_vec, unsigned int irqbase,
698 struct device_node *node)
699{
700 unsigned int gicconfig;
701
702 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
703
704 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
705 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
706 GIC_SH_CONFIG_NUMINTRS_SHF;
707 gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
708
709 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
710 GIC_SH_CONFIG_NUMVPES_SHF;
711 gic_vpes = gic_vpes + 1;
712
713 if (cpu_has_veic) {
714 /* Always use vector 1 in EIC mode */
715 gic_cpu_pin = 0;
716 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
717 __gic_irq_dispatch);
718 } else {
719 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
720 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
721 gic_irq_dispatch);
722 }
723
724 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
725 gic_shared_intrs, irqbase,
726 &gic_irq_domain_ops, NULL);
727 if (!gic_irq_domain)
728 panic("Failed to add GIC IRQ domain");
729
730 gic_basic_init();
731
732 gic_ipi_init();
733}
734
735void __init gic_init(unsigned long gic_base_addr,
736 unsigned long gic_addrspace_size,
737 unsigned int cpu_vec, unsigned int irqbase)
738{
739 __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
740}
741
742static int __init gic_of_init(struct device_node *node,
743 struct device_node *parent)
744{
745 struct resource res;
746 unsigned int cpu_vec, i = 0, reserved = 0;
747 phys_addr_t gic_base;
748 size_t gic_len;
749
750 /* Find the first available CPU vector. */
751 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
752 i++, &cpu_vec))
753 reserved |= BIT(cpu_vec);
754 for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
755 if (!(reserved & BIT(cpu_vec)))
756 break;
757 }
758 if (cpu_vec == 8) {
759 pr_err("No CPU vectors available for GIC\n");
760 return -ENODEV;
761 }
762
763 if (of_address_to_resource(node, 0, &res)) {
764 /*
765 * Probe the CM for the GIC base address if not specified
766 * in the device-tree.
767 */
768 if (mips_cm_present()) {
769 gic_base = read_gcr_gic_base() &
770 ~CM_GCR_GIC_BASE_GICEN_MSK;
771 gic_len = 0x20000;
772 } else {
773 pr_err("Failed to get GIC memory range\n");
774 return -ENODEV;
775 }
776 } else {
777 gic_base = res.start;
778 gic_len = resource_size(&res);
779 }
780
781 if (mips_cm_present())
782 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
783 gic_present = true;
784
785 __gic_init(gic_base, gic_len, cpu_vec, 0, node);
786
787 return 0;
788}
789IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
diff --git a/drivers/isdn/mISDN/l1oip_core.c b/drivers/isdn/mISDN/l1oip_core.c
index 9f454d76cc06..67c21876c35f 100644
--- a/drivers/isdn/mISDN/l1oip_core.c
+++ b/drivers/isdn/mISDN/l1oip_core.c
@@ -1334,7 +1334,7 @@ init_card(struct l1oip *hc, int pri, int bundle)
1334 if (id[l1oip_cnt] == 0) { 1334 if (id[l1oip_cnt] == 0) {
1335 printk(KERN_WARNING "Warning: No 'id' value given or " 1335 printk(KERN_WARNING "Warning: No 'id' value given or "
1336 "0, this is highly unsecure. Please use 32 " 1336 "0, this is highly unsecure. Please use 32 "
1337 "bit randmom number 0x...\n"); 1337 "bit random number 0x...\n");
1338 } 1338 }
1339 hc->id = id[l1oip_cnt]; 1339 hc->id = id[l1oip_cnt];
1340 if (debug & DEBUG_L1OIP_INIT) 1340 if (debug & DEBUG_L1OIP_INIT)
diff --git a/drivers/media/pci/cx18/cx18-driver.h b/drivers/media/pci/cx18/cx18-driver.h
index dcfd7a1d317e..207d6e82403b 100644
--- a/drivers/media/pci/cx18/cx18-driver.h
+++ b/drivers/media/pci/cx18/cx18-driver.h
@@ -290,7 +290,7 @@ struct cx18_options {
290 * list_entry_is_past_end - check if a previous loop cursor is off list end 290 * list_entry_is_past_end - check if a previous loop cursor is off list end
291 * @pos: the type * previously used as a loop cursor. 291 * @pos: the type * previously used as a loop cursor.
292 * @head: the head for your list. 292 * @head: the head for your list.
293 * @member: the name of the list_struct within the struct. 293 * @member: the name of the list_head within the struct.
294 * 294 *
295 * Check if the entry's list_head is the head of the list, thus it's not a 295 * Check if the entry's list_head is the head of the list, thus it's not a
296 * real entry but was the loop cursor that walked past the end 296 * real entry but was the loop cursor that walked past the end
diff --git a/drivers/media/pci/ttpci/budget-patch.c b/drivers/media/pci/ttpci/budget-patch.c
index 2cb35c23d2ac..a4d8867e1d7b 100644
--- a/drivers/media/pci/ttpci/budget-patch.c
+++ b/drivers/media/pci/ttpci/budget-patch.c
@@ -490,7 +490,7 @@ static int budget_patch_attach (struct saa7146_dev* dev, struct saa7146_pci_exte
490 490
491 if(detected == 0) 491 if(detected == 0)
492 printk("budget-patch not detected or saa7146 in non-default state.\n" 492 printk("budget-patch not detected or saa7146 in non-default state.\n"
493 "try enabling ressetting of 7146 with MASK_31 in MC1 register\n"); 493 "try enabling resetting of 7146 with MASK_31 in MC1 register\n");
494 494
495 else 495 else
496 printk("BUDGET-PATCH DETECTED.\n"); 496 printk("BUDGET-PATCH DETECTED.\n");
diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h
index 3d2b8677ec8a..b5b6bda44a00 100644
--- a/drivers/misc/cxl/cxl.h
+++ b/drivers/misc/cxl/cxl.h
@@ -336,6 +336,8 @@ struct cxl_sste {
336struct cxl_afu { 336struct cxl_afu {
337 irq_hw_number_t psl_hwirq; 337 irq_hw_number_t psl_hwirq;
338 irq_hw_number_t serr_hwirq; 338 irq_hw_number_t serr_hwirq;
339 char *err_irq_name;
340 char *psl_irq_name;
339 unsigned int serr_virq; 341 unsigned int serr_virq;
340 void __iomem *p1n_mmio; 342 void __iomem *p1n_mmio;
341 void __iomem *p2n_mmio; 343 void __iomem *p2n_mmio;
@@ -379,6 +381,12 @@ struct cxl_afu {
379 bool enabled; 381 bool enabled;
380}; 382};
381 383
384
385struct cxl_irq_name {
386 struct list_head list;
387 char *name;
388};
389
382/* 390/*
383 * This is a cxl context. If the PSL is in dedicated mode, there will be one 391 * This is a cxl context. If the PSL is in dedicated mode, there will be one
384 * of these per AFU. If in AFU directed there can be lots of these. 392 * of these per AFU. If in AFU directed there can be lots of these.
@@ -403,6 +411,7 @@ struct cxl_context {
403 411
404 unsigned long *irq_bitmap; /* Accessed from IRQ context */ 412 unsigned long *irq_bitmap; /* Accessed from IRQ context */
405 struct cxl_irq_ranges irqs; 413 struct cxl_irq_ranges irqs;
414 struct list_head irq_names;
406 u64 fault_addr; 415 u64 fault_addr;
407 u64 fault_dsisr; 416 u64 fault_dsisr;
408 u64 afu_err; 417 u64 afu_err;
@@ -444,6 +453,7 @@ struct cxl {
444 struct dentry *trace; 453 struct dentry *trace;
445 struct dentry *psl_err_chk; 454 struct dentry *psl_err_chk;
446 struct dentry *debugfs; 455 struct dentry *debugfs;
456 char *irq_name;
447 struct bin_attribute cxl_attr; 457 struct bin_attribute cxl_attr;
448 int adapter_num; 458 int adapter_num;
449 int user_irqs; 459 int user_irqs;
@@ -563,9 +573,6 @@ int _cxl_afu_deactivate_mode(struct cxl_afu *afu, int mode);
563int cxl_afu_deactivate_mode(struct cxl_afu *afu); 573int cxl_afu_deactivate_mode(struct cxl_afu *afu);
564int cxl_afu_select_best_mode(struct cxl_afu *afu); 574int cxl_afu_select_best_mode(struct cxl_afu *afu);
565 575
566unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
567 irq_handler_t handler, void *cookie);
568void cxl_unmap_irq(unsigned int virq, void *cookie);
569int cxl_register_psl_irq(struct cxl_afu *afu); 576int cxl_register_psl_irq(struct cxl_afu *afu);
570void cxl_release_psl_irq(struct cxl_afu *afu); 577void cxl_release_psl_irq(struct cxl_afu *afu);
571int cxl_register_psl_err_irq(struct cxl *adapter); 578int cxl_register_psl_err_irq(struct cxl *adapter);
@@ -612,7 +619,7 @@ int cxl_attach_process(struct cxl_context *ctx, bool kernel, u64 wed,
612 u64 amr); 619 u64 amr);
613int cxl_detach_process(struct cxl_context *ctx); 620int cxl_detach_process(struct cxl_context *ctx);
614 621
615int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info); 622int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info);
616int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask); 623int cxl_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask);
617 624
618int cxl_check_error(struct cxl_afu *afu); 625int cxl_check_error(struct cxl_afu *afu);
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index c99e896604ee..f8684bca2d79 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -133,7 +133,7 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
133{ 133{
134 unsigned flt = 0; 134 unsigned flt = 0;
135 int result; 135 int result;
136 unsigned long access, flags; 136 unsigned long access, flags, inv_flags = 0;
137 137
138 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { 138 if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) {
139 pr_devel("copro_handle_mm_fault failed: %#x\n", result); 139 pr_devel("copro_handle_mm_fault failed: %#x\n", result);
@@ -149,8 +149,12 @@ static void cxl_handle_page_fault(struct cxl_context *ctx,
149 access |= _PAGE_RW; 149 access |= _PAGE_RW;
150 if ((!ctx->kernel) || ~(dar & (1ULL << 63))) 150 if ((!ctx->kernel) || ~(dar & (1ULL << 63)))
151 access |= _PAGE_USER; 151 access |= _PAGE_USER;
152
153 if (dsisr & DSISR_NOHPTE)
154 inv_flags |= HPTE_NOHPTE_UPDATE;
155
152 local_irq_save(flags); 156 local_irq_save(flags);
153 hash_page_mm(mm, dar, access, 0x300); 157 hash_page_mm(mm, dar, access, 0x300, inv_flags);
154 local_irq_restore(flags); 158 local_irq_restore(flags);
155 159
156 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); 160 pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe);
diff --git a/drivers/misc/cxl/irq.c b/drivers/misc/cxl/irq.c
index 336020c8e1af..c294925f73ee 100644
--- a/drivers/misc/cxl/irq.c
+++ b/drivers/misc/cxl/irq.c
@@ -92,20 +92,13 @@ static irqreturn_t schedule_cxl_fault(struct cxl_context *ctx, u64 dsisr, u64 da
92 return IRQ_HANDLED; 92 return IRQ_HANDLED;
93} 93}
94 94
95static irqreturn_t cxl_irq(int irq, void *data) 95static irqreturn_t cxl_irq(int irq, void *data, struct cxl_irq_info *irq_info)
96{ 96{
97 struct cxl_context *ctx = data; 97 struct cxl_context *ctx = data;
98 struct cxl_irq_info irq_info;
99 u64 dsisr, dar; 98 u64 dsisr, dar;
100 int result;
101
102 if ((result = cxl_get_irq(ctx, &irq_info))) {
103 WARN(1, "Unable to get CXL IRQ Info: %i\n", result);
104 return IRQ_HANDLED;
105 }
106 99
107 dsisr = irq_info.dsisr; 100 dsisr = irq_info->dsisr;
108 dar = irq_info.dar; 101 dar = irq_info->dar;
109 102
110 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar); 103 pr_devel("CXL interrupt %i for afu pe: %i DSISR: %#llx DAR: %#llx\n", irq, ctx->pe, dsisr, dar);
111 104
@@ -149,9 +142,9 @@ static irqreturn_t cxl_irq(int irq, void *data)
149 if (dsisr & CXL_PSL_DSISR_An_UR) 142 if (dsisr & CXL_PSL_DSISR_An_UR)
150 pr_devel("CXL interrupt: AURP PTE not found\n"); 143 pr_devel("CXL interrupt: AURP PTE not found\n");
151 if (dsisr & CXL_PSL_DSISR_An_PE) 144 if (dsisr & CXL_PSL_DSISR_An_PE)
152 return handle_psl_slice_error(ctx, dsisr, irq_info.errstat); 145 return handle_psl_slice_error(ctx, dsisr, irq_info->errstat);
153 if (dsisr & CXL_PSL_DSISR_An_AE) { 146 if (dsisr & CXL_PSL_DSISR_An_AE) {
154 pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info.afu_err); 147 pr_devel("CXL interrupt: AFU Error %.llx\n", irq_info->afu_err);
155 148
156 if (ctx->pending_afu_err) { 149 if (ctx->pending_afu_err) {
157 /* 150 /*
@@ -163,10 +156,10 @@ static irqreturn_t cxl_irq(int irq, void *data)
163 */ 156 */
164 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error " 157 dev_err_ratelimited(&ctx->afu->dev, "CXL AFU Error "
165 "undelivered to pe %i: %.llx\n", 158 "undelivered to pe %i: %.llx\n",
166 ctx->pe, irq_info.afu_err); 159 ctx->pe, irq_info->afu_err);
167 } else { 160 } else {
168 spin_lock(&ctx->lock); 161 spin_lock(&ctx->lock);
169 ctx->afu_err = irq_info.afu_err; 162 ctx->afu_err = irq_info->afu_err;
170 ctx->pending_afu_err = 1; 163 ctx->pending_afu_err = 1;
171 spin_unlock(&ctx->lock); 164 spin_unlock(&ctx->lock);
172 165
@@ -182,24 +175,43 @@ static irqreturn_t cxl_irq(int irq, void *data)
182 return IRQ_HANDLED; 175 return IRQ_HANDLED;
183} 176}
184 177
178static irqreturn_t fail_psl_irq(struct cxl_afu *afu, struct cxl_irq_info *irq_info)
179{
180 if (irq_info->dsisr & CXL_PSL_DSISR_TRANS)
181 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE);
182 else
183 cxl_p2n_write(afu, CXL_PSL_TFC_An, CXL_PSL_TFC_An_A);
184
185 return IRQ_HANDLED;
186}
187
185static irqreturn_t cxl_irq_multiplexed(int irq, void *data) 188static irqreturn_t cxl_irq_multiplexed(int irq, void *data)
186{ 189{
187 struct cxl_afu *afu = data; 190 struct cxl_afu *afu = data;
188 struct cxl_context *ctx; 191 struct cxl_context *ctx;
192 struct cxl_irq_info irq_info;
189 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff; 193 int ph = cxl_p2n_read(afu, CXL_PSL_PEHandle_An) & 0xffff;
190 int ret; 194 int ret;
191 195
196 if ((ret = cxl_get_irq(afu, &irq_info))) {
197 WARN(1, "Unable to get CXL IRQ Info: %i\n", ret);
198 return fail_psl_irq(afu, &irq_info);
199 }
200
192 rcu_read_lock(); 201 rcu_read_lock();
193 ctx = idr_find(&afu->contexts_idr, ph); 202 ctx = idr_find(&afu->contexts_idr, ph);
194 if (ctx) { 203 if (ctx) {
195 ret = cxl_irq(irq, ctx); 204 ret = cxl_irq(irq, ctx, &irq_info);
196 rcu_read_unlock(); 205 rcu_read_unlock();
197 return ret; 206 return ret;
198 } 207 }
199 rcu_read_unlock(); 208 rcu_read_unlock();
200 209
201 WARN(1, "Unable to demultiplex CXL PSL IRQ\n"); 210 WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %.16llx DAR"
202 return IRQ_HANDLED; 211 " %.16llx\n(Possible AFU HW issue - was a term/remove acked"
212 " with outstanding transactions?)\n", ph, irq_info.dsisr,
213 irq_info.dar);
214 return fail_psl_irq(afu, &irq_info);
203} 215}
204 216
205static irqreturn_t cxl_irq_afu(int irq, void *data) 217static irqreturn_t cxl_irq_afu(int irq, void *data)
@@ -243,7 +255,7 @@ static irqreturn_t cxl_irq_afu(int irq, void *data)
243} 255}
244 256
245unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq, 257unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
246 irq_handler_t handler, void *cookie) 258 irq_handler_t handler, void *cookie, const char *name)
247{ 259{
248 unsigned int virq; 260 unsigned int virq;
249 int result; 261 int result;
@@ -259,7 +271,7 @@ unsigned int cxl_map_irq(struct cxl *adapter, irq_hw_number_t hwirq,
259 271
260 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq); 272 pr_devel("hwirq %#lx mapped to virq %u\n", hwirq, virq);
261 273
262 result = request_irq(virq, handler, 0, "cxl", cookie); 274 result = request_irq(virq, handler, 0, name, cookie);
263 if (result) { 275 if (result) {
264 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result); 276 dev_warn(&adapter->dev, "cxl_map_irq: request_irq failed: %i\n", result);
265 return 0; 277 return 0;
@@ -278,14 +290,15 @@ static int cxl_register_one_irq(struct cxl *adapter,
278 irq_handler_t handler, 290 irq_handler_t handler,
279 void *cookie, 291 void *cookie,
280 irq_hw_number_t *dest_hwirq, 292 irq_hw_number_t *dest_hwirq,
281 unsigned int *dest_virq) 293 unsigned int *dest_virq,
294 const char *name)
282{ 295{
283 int hwirq, virq; 296 int hwirq, virq;
284 297
285 if ((hwirq = cxl_alloc_one_irq(adapter)) < 0) 298 if ((hwirq = cxl_alloc_one_irq(adapter)) < 0)
286 return hwirq; 299 return hwirq;
287 300
288 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie))) 301 if (!(virq = cxl_map_irq(adapter, hwirq, handler, cookie, name)))
289 goto err; 302 goto err;
290 303
291 *dest_hwirq = hwirq; 304 *dest_hwirq = hwirq;
@@ -302,10 +315,19 @@ int cxl_register_psl_err_irq(struct cxl *adapter)
302{ 315{
303 int rc; 316 int rc;
304 317
318 adapter->irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
319 dev_name(&adapter->dev));
320 if (!adapter->irq_name)
321 return -ENOMEM;
322
305 if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter, 323 if ((rc = cxl_register_one_irq(adapter, cxl_irq_err, adapter,
306 &adapter->err_hwirq, 324 &adapter->err_hwirq,
307 &adapter->err_virq))) 325 &adapter->err_virq,
326 adapter->irq_name))) {
327 kfree(adapter->irq_name);
328 adapter->irq_name = NULL;
308 return rc; 329 return rc;
330 }
309 331
310 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff); 332 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, adapter->err_hwirq & 0xffff);
311 333
@@ -317,6 +339,7 @@ void cxl_release_psl_err_irq(struct cxl *adapter)
317 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000); 339 cxl_p1_write(adapter, CXL_PSL_ErrIVTE, 0x0000000000000000);
318 cxl_unmap_irq(adapter->err_virq, adapter); 340 cxl_unmap_irq(adapter->err_virq, adapter);
319 cxl_release_one_irq(adapter, adapter->err_hwirq); 341 cxl_release_one_irq(adapter, adapter->err_hwirq);
342 kfree(adapter->irq_name);
320} 343}
321 344
322int cxl_register_serr_irq(struct cxl_afu *afu) 345int cxl_register_serr_irq(struct cxl_afu *afu)
@@ -324,10 +347,18 @@ int cxl_register_serr_irq(struct cxl_afu *afu)
324 u64 serr; 347 u64 serr;
325 int rc; 348 int rc;
326 349
350 afu->err_irq_name = kasprintf(GFP_KERNEL, "cxl-%s-err",
351 dev_name(&afu->dev));
352 if (!afu->err_irq_name)
353 return -ENOMEM;
354
327 if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu, 355 if ((rc = cxl_register_one_irq(afu->adapter, cxl_slice_irq_err, afu,
328 &afu->serr_hwirq, 356 &afu->serr_hwirq,
329 &afu->serr_virq))) 357 &afu->serr_virq, afu->err_irq_name))) {
358 kfree(afu->err_irq_name);
359 afu->err_irq_name = NULL;
330 return rc; 360 return rc;
361 }
331 362
332 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An); 363 serr = cxl_p1n_read(afu, CXL_PSL_SERR_An);
333 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); 364 serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff);
@@ -341,24 +372,50 @@ void cxl_release_serr_irq(struct cxl_afu *afu)
341 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000); 372 cxl_p1n_write(afu, CXL_PSL_SERR_An, 0x0000000000000000);
342 cxl_unmap_irq(afu->serr_virq, afu); 373 cxl_unmap_irq(afu->serr_virq, afu);
343 cxl_release_one_irq(afu->adapter, afu->serr_hwirq); 374 cxl_release_one_irq(afu->adapter, afu->serr_hwirq);
375 kfree(afu->err_irq_name);
344} 376}
345 377
346int cxl_register_psl_irq(struct cxl_afu *afu) 378int cxl_register_psl_irq(struct cxl_afu *afu)
347{ 379{
348 return cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu, 380 int rc;
349 &afu->psl_hwirq, &afu->psl_virq); 381
382 afu->psl_irq_name = kasprintf(GFP_KERNEL, "cxl-%s",
383 dev_name(&afu->dev));
384 if (!afu->psl_irq_name)
385 return -ENOMEM;
386
387 if ((rc = cxl_register_one_irq(afu->adapter, cxl_irq_multiplexed, afu,
388 &afu->psl_hwirq, &afu->psl_virq,
389 afu->psl_irq_name))) {
390 kfree(afu->psl_irq_name);
391 afu->psl_irq_name = NULL;
392 }
393 return rc;
350} 394}
351 395
352void cxl_release_psl_irq(struct cxl_afu *afu) 396void cxl_release_psl_irq(struct cxl_afu *afu)
353{ 397{
354 cxl_unmap_irq(afu->psl_virq, afu); 398 cxl_unmap_irq(afu->psl_virq, afu);
355 cxl_release_one_irq(afu->adapter, afu->psl_hwirq); 399 cxl_release_one_irq(afu->adapter, afu->psl_hwirq);
400 kfree(afu->psl_irq_name);
401}
402
403void afu_irq_name_free(struct cxl_context *ctx)
404{
405 struct cxl_irq_name *irq_name, *tmp;
406
407 list_for_each_entry_safe(irq_name, tmp, &ctx->irq_names, list) {
408 kfree(irq_name->name);
409 list_del(&irq_name->list);
410 kfree(irq_name);
411 }
356} 412}
357 413
358int afu_register_irqs(struct cxl_context *ctx, u32 count) 414int afu_register_irqs(struct cxl_context *ctx, u32 count)
359{ 415{
360 irq_hw_number_t hwirq; 416 irq_hw_number_t hwirq;
361 int rc, r, i; 417 int rc, r, i, j = 1;
418 struct cxl_irq_name *irq_name;
362 419
363 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count))) 420 if ((rc = cxl_alloc_irq_ranges(&ctx->irqs, ctx->afu->adapter, count)))
364 return rc; 421 return rc;
@@ -372,15 +429,47 @@ int afu_register_irqs(struct cxl_context *ctx, u32 count)
372 sizeof(*ctx->irq_bitmap), GFP_KERNEL); 429 sizeof(*ctx->irq_bitmap), GFP_KERNEL);
373 if (!ctx->irq_bitmap) 430 if (!ctx->irq_bitmap)
374 return -ENOMEM; 431 return -ENOMEM;
432
433 /*
434 * Allocate names first. If any fail, bail out before allocating
435 * actual hardware IRQs.
436 */
437 INIT_LIST_HEAD(&ctx->irq_names);
438 for (r = 1; r < CXL_IRQ_RANGES; r++) {
439 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
440 irq_name = kmalloc(sizeof(struct cxl_irq_name),
441 GFP_KERNEL);
442 if (!irq_name)
443 goto out;
444 irq_name->name = kasprintf(GFP_KERNEL, "cxl-%s-pe%i-%i",
445 dev_name(&ctx->afu->dev),
446 ctx->pe, j);
447 if (!irq_name->name) {
448 kfree(irq_name);
449 goto out;
450 }
451 /* Add to tail so next look get the correct order */
452 list_add_tail(&irq_name->list, &ctx->irq_names);
453 j++;
454 }
455 }
456
457 /* We've allocated all memory now, so let's do the irq allocations */
458 irq_name = list_first_entry(&ctx->irq_names, struct cxl_irq_name, list);
375 for (r = 1; r < CXL_IRQ_RANGES; r++) { 459 for (r = 1; r < CXL_IRQ_RANGES; r++) {
376 hwirq = ctx->irqs.offset[r]; 460 hwirq = ctx->irqs.offset[r];
377 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) { 461 for (i = 0; i < ctx->irqs.range[r]; hwirq++, i++) {
378 cxl_map_irq(ctx->afu->adapter, hwirq, 462 cxl_map_irq(ctx->afu->adapter, hwirq,
379 cxl_irq_afu, ctx); 463 cxl_irq_afu, ctx, irq_name->name);
464 irq_name = list_next_entry(irq_name, list);
380 } 465 }
381 } 466 }
382 467
383 return 0; 468 return 0;
469
470out:
471 afu_irq_name_free(ctx);
472 return -ENOMEM;
384} 473}
385 474
386void afu_release_irqs(struct cxl_context *ctx) 475void afu_release_irqs(struct cxl_context *ctx)
@@ -398,5 +487,6 @@ void afu_release_irqs(struct cxl_context *ctx)
398 } 487 }
399 } 488 }
400 489
490 afu_irq_name_free(ctx);
401 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter); 491 cxl_release_irq_ranges(&ctx->irqs, ctx->afu->adapter);
402} 492}
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index d47532e8f4f1..9a5a442269a8 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -637,18 +637,18 @@ int cxl_detach_process(struct cxl_context *ctx)
637 return detach_process_native_afu_directed(ctx); 637 return detach_process_native_afu_directed(ctx);
638} 638}
639 639
640int cxl_get_irq(struct cxl_context *ctx, struct cxl_irq_info *info) 640int cxl_get_irq(struct cxl_afu *afu, struct cxl_irq_info *info)
641{ 641{
642 u64 pidtid; 642 u64 pidtid;
643 643
644 info->dsisr = cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An); 644 info->dsisr = cxl_p2n_read(afu, CXL_PSL_DSISR_An);
645 info->dar = cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An); 645 info->dar = cxl_p2n_read(afu, CXL_PSL_DAR_An);
646 info->dsr = cxl_p2n_read(ctx->afu, CXL_PSL_DSR_An); 646 info->dsr = cxl_p2n_read(afu, CXL_PSL_DSR_An);
647 pidtid = cxl_p2n_read(ctx->afu, CXL_PSL_PID_TID_An); 647 pidtid = cxl_p2n_read(afu, CXL_PSL_PID_TID_An);
648 info->pid = pidtid >> 32; 648 info->pid = pidtid >> 32;
649 info->tid = pidtid & 0xffffffff; 649 info->tid = pidtid & 0xffffffff;
650 info->afu_err = cxl_p2n_read(ctx->afu, CXL_AFU_ERR_An); 650 info->afu_err = cxl_p2n_read(afu, CXL_AFU_ERR_An);
651 info->errstat = cxl_p2n_read(ctx->afu, CXL_PSL_ErrStat_An); 651 info->errstat = cxl_p2n_read(afu, CXL_PSL_ErrStat_An);
652 652
653 return 0; 653 return 0;
654} 654}
diff --git a/drivers/mtd/ubi/attach.c b/drivers/mtd/ubi/attach.c
index 6f27d9a1be3b..9d2e16f3150a 100644
--- a/drivers/mtd/ubi/attach.c
+++ b/drivers/mtd/ubi/attach.c
@@ -176,6 +176,7 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
176 176
177/** 177/**
178 * validate_vid_hdr - check volume identifier header. 178 * validate_vid_hdr - check volume identifier header.
179 * @ubi: UBI device description object
179 * @vid_hdr: the volume identifier header to check 180 * @vid_hdr: the volume identifier header to check
180 * @av: information about the volume this logical eraseblock belongs to 181 * @av: information about the volume this logical eraseblock belongs to
181 * @pnum: physical eraseblock number the VID header came from 182 * @pnum: physical eraseblock number the VID header came from
@@ -188,7 +189,8 @@ static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
188 * information in the VID header is consistent to the information in other VID 189 * information in the VID header is consistent to the information in other VID
189 * headers of the same volume. 190 * headers of the same volume.
190 */ 191 */
191static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr, 192static int validate_vid_hdr(const struct ubi_device *ubi,
193 const struct ubi_vid_hdr *vid_hdr,
192 const struct ubi_ainf_volume *av, int pnum) 194 const struct ubi_ainf_volume *av, int pnum)
193{ 195{
194 int vol_type = vid_hdr->vol_type; 196 int vol_type = vid_hdr->vol_type;
@@ -206,7 +208,7 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
206 */ 208 */
207 209
208 if (vol_id != av->vol_id) { 210 if (vol_id != av->vol_id) {
209 ubi_err("inconsistent vol_id"); 211 ubi_err(ubi, "inconsistent vol_id");
210 goto bad; 212 goto bad;
211 } 213 }
212 214
@@ -216,17 +218,17 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
216 av_vol_type = UBI_VID_DYNAMIC; 218 av_vol_type = UBI_VID_DYNAMIC;
217 219
218 if (vol_type != av_vol_type) { 220 if (vol_type != av_vol_type) {
219 ubi_err("inconsistent vol_type"); 221 ubi_err(ubi, "inconsistent vol_type");
220 goto bad; 222 goto bad;
221 } 223 }
222 224
223 if (used_ebs != av->used_ebs) { 225 if (used_ebs != av->used_ebs) {
224 ubi_err("inconsistent used_ebs"); 226 ubi_err(ubi, "inconsistent used_ebs");
225 goto bad; 227 goto bad;
226 } 228 }
227 229
228 if (data_pad != av->data_pad) { 230 if (data_pad != av->data_pad) {
229 ubi_err("inconsistent data_pad"); 231 ubi_err(ubi, "inconsistent data_pad");
230 goto bad; 232 goto bad;
231 } 233 }
232 } 234 }
@@ -234,7 +236,7 @@ static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
234 return 0; 236 return 0;
235 237
236bad: 238bad:
237 ubi_err("inconsistent VID header at PEB %d", pnum); 239 ubi_err(ubi, "inconsistent VID header at PEB %d", pnum);
238 ubi_dump_vid_hdr(vid_hdr); 240 ubi_dump_vid_hdr(vid_hdr);
239 ubi_dump_av(av); 241 ubi_dump_av(av);
240 return -EINVAL; 242 return -EINVAL;
@@ -336,7 +338,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
336 * support these images anymore. Well, those images still work, 338 * support these images anymore. Well, those images still work,
337 * but only if no unclean reboots happened. 339 * but only if no unclean reboots happened.
338 */ 340 */
339 ubi_err("unsupported on-flash UBI format"); 341 ubi_err(ubi, "unsupported on-flash UBI format");
340 return -EINVAL; 342 return -EINVAL;
341 } 343 }
342 344
@@ -377,7 +379,7 @@ int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
377 if (err == UBI_IO_BITFLIPS) 379 if (err == UBI_IO_BITFLIPS)
378 bitflips = 1; 380 bitflips = 1;
379 else { 381 else {
380 ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d", 382 ubi_err(ubi, "VID of PEB %d header is bad, but it was OK earlier, err %d",
381 pnum, err); 383 pnum, err);
382 if (err > 0) 384 if (err > 0)
383 err = -EIO; 385 err = -EIO;
@@ -507,7 +509,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
507 * logical eraseblocks because there was an unclean reboot. 509 * logical eraseblocks because there was an unclean reboot.
508 */ 510 */
509 if (aeb->sqnum == sqnum && sqnum != 0) { 511 if (aeb->sqnum == sqnum && sqnum != 0) {
510 ubi_err("two LEBs with same sequence number %llu", 512 ubi_err(ubi, "two LEBs with same sequence number %llu",
511 sqnum); 513 sqnum);
512 ubi_dump_aeb(aeb, 0); 514 ubi_dump_aeb(aeb, 0);
513 ubi_dump_vid_hdr(vid_hdr); 515 ubi_dump_vid_hdr(vid_hdr);
@@ -527,7 +529,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
527 * This logical eraseblock is newer than the one 529 * This logical eraseblock is newer than the one
528 * found earlier. 530 * found earlier.
529 */ 531 */
530 err = validate_vid_hdr(vid_hdr, av, pnum); 532 err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
531 if (err) 533 if (err)
532 return err; 534 return err;
533 535
@@ -565,7 +567,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
565 * attaching information. 567 * attaching information.
566 */ 568 */
567 569
568 err = validate_vid_hdr(vid_hdr, av, pnum); 570 err = validate_vid_hdr(ubi, vid_hdr, av, pnum);
569 if (err) 571 if (err)
570 return err; 572 return err;
571 573
@@ -668,7 +670,8 @@ static int early_erase_peb(struct ubi_device *ubi,
668 * Erase counter overflow. Upgrade UBI and use 64-bit 670 * Erase counter overflow. Upgrade UBI and use 64-bit
669 * erase counters internally. 671 * erase counters internally.
670 */ 672 */
671 ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec); 673 ubi_err(ubi, "erase counter overflow at PEB %d, EC %d",
674 pnum, ec);
672 return -EINVAL; 675 return -EINVAL;
673 } 676 }
674 677
@@ -736,7 +739,7 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
736 return aeb; 739 return aeb;
737 } 740 }
738 741
739 ubi_err("no free eraseblocks"); 742 ubi_err(ubi, "no free eraseblocks");
740 return ERR_PTR(-ENOSPC); 743 return ERR_PTR(-ENOSPC);
741} 744}
742 745
@@ -785,9 +788,9 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
785 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size)) 788 if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
786 goto out_unlock; 789 goto out_unlock;
787 790
788 ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF", 791 ubi_err(ubi, "PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
789 pnum); 792 pnum);
790 ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection"); 793 ubi_err(ubi, "this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
791 ubi_dump_vid_hdr(vid_hdr); 794 ubi_dump_vid_hdr(vid_hdr);
792 pr_err("hexdump of PEB %d offset %d, length %d", 795 pr_err("hexdump of PEB %d offset %d, length %d",
793 pnum, ubi->leb_start, ubi->leb_size); 796 pnum, ubi->leb_start, ubi->leb_size);
@@ -859,7 +862,8 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
859 bitflips = 1; 862 bitflips = 1;
860 break; 863 break;
861 default: 864 default:
862 ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err); 865 ubi_err(ubi, "'ubi_io_read_ec_hdr()' returned unknown code %d",
866 err);
863 return -EINVAL; 867 return -EINVAL;
864 } 868 }
865 869
@@ -868,7 +872,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
868 872
869 /* Make sure UBI version is OK */ 873 /* Make sure UBI version is OK */
870 if (ech->version != UBI_VERSION) { 874 if (ech->version != UBI_VERSION) {
871 ubi_err("this UBI version is %d, image version is %d", 875 ubi_err(ubi, "this UBI version is %d, image version is %d",
872 UBI_VERSION, (int)ech->version); 876 UBI_VERSION, (int)ech->version);
873 return -EINVAL; 877 return -EINVAL;
874 } 878 }
@@ -882,7 +886,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
882 * flash. Upgrade UBI and use 64-bit erase counters 886 * flash. Upgrade UBI and use 64-bit erase counters
883 * internally. 887 * internally.
884 */ 888 */
885 ubi_err("erase counter overflow, max is %d", 889 ubi_err(ubi, "erase counter overflow, max is %d",
886 UBI_MAX_ERASECOUNTER); 890 UBI_MAX_ERASECOUNTER);
887 ubi_dump_ec_hdr(ech); 891 ubi_dump_ec_hdr(ech);
888 return -EINVAL; 892 return -EINVAL;
@@ -903,7 +907,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
903 if (!ubi->image_seq) 907 if (!ubi->image_seq)
904 ubi->image_seq = image_seq; 908 ubi->image_seq = image_seq;
905 if (image_seq && ubi->image_seq != image_seq) { 909 if (image_seq && ubi->image_seq != image_seq) {
906 ubi_err("bad image sequence number %d in PEB %d, expected %d", 910 ubi_err(ubi, "bad image sequence number %d in PEB %d, expected %d",
907 image_seq, pnum, ubi->image_seq); 911 image_seq, pnum, ubi->image_seq);
908 ubi_dump_ec_hdr(ech); 912 ubi_dump_ec_hdr(ech);
909 return -EINVAL; 913 return -EINVAL;
@@ -981,7 +985,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
981 return err; 985 return err;
982 goto adjust_mean_ec; 986 goto adjust_mean_ec;
983 default: 987 default:
984 ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d", 988 ubi_err(ubi, "'ubi_io_read_vid_hdr()' returned unknown code %d",
985 err); 989 err);
986 return -EINVAL; 990 return -EINVAL;
987 } 991 }
@@ -999,7 +1003,7 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
999 case UBI_COMPAT_DELETE: 1003 case UBI_COMPAT_DELETE:
1000 if (vol_id != UBI_FM_SB_VOLUME_ID 1004 if (vol_id != UBI_FM_SB_VOLUME_ID
1001 && vol_id != UBI_FM_DATA_VOLUME_ID) { 1005 && vol_id != UBI_FM_DATA_VOLUME_ID) {
1002 ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it", 1006 ubi_msg(ubi, "\"delete\" compatible internal volume %d:%d found, will remove it",
1003 vol_id, lnum); 1007 vol_id, lnum);
1004 } 1008 }
1005 err = add_to_list(ai, pnum, vol_id, lnum, 1009 err = add_to_list(ai, pnum, vol_id, lnum,
@@ -1009,13 +1013,13 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
1009 return 0; 1013 return 0;
1010 1014
1011 case UBI_COMPAT_RO: 1015 case UBI_COMPAT_RO:
1012 ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode", 1016 ubi_msg(ubi, "read-only compatible internal volume %d:%d found, switch to read-only mode",
1013 vol_id, lnum); 1017 vol_id, lnum);
1014 ubi->ro_mode = 1; 1018 ubi->ro_mode = 1;
1015 break; 1019 break;
1016 1020
1017 case UBI_COMPAT_PRESERVE: 1021 case UBI_COMPAT_PRESERVE:
1018 ubi_msg("\"preserve\" compatible internal volume %d:%d found", 1022 ubi_msg(ubi, "\"preserve\" compatible internal volume %d:%d found",
1019 vol_id, lnum); 1023 vol_id, lnum);
1020 err = add_to_list(ai, pnum, vol_id, lnum, 1024 err = add_to_list(ai, pnum, vol_id, lnum,
1021 ec, 0, &ai->alien); 1025 ec, 0, &ai->alien);
@@ -1024,14 +1028,14 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
1024 return 0; 1028 return 0;
1025 1029
1026 case UBI_COMPAT_REJECT: 1030 case UBI_COMPAT_REJECT:
1027 ubi_err("incompatible internal volume %d:%d found", 1031 ubi_err(ubi, "incompatible internal volume %d:%d found",
1028 vol_id, lnum); 1032 vol_id, lnum);
1029 return -EINVAL; 1033 return -EINVAL;
1030 } 1034 }
1031 } 1035 }
1032 1036
1033 if (ec_err) 1037 if (ec_err)
1034 ubi_warn("valid VID header but corrupted EC header at PEB %d", 1038 ubi_warn(ubi, "valid VID header but corrupted EC header at PEB %d",
1035 pnum); 1039 pnum);
1036 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips); 1040 err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
1037 if (err) 1041 if (err)
@@ -1075,7 +1079,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1075 * with the flash HW or driver. 1079 * with the flash HW or driver.
1076 */ 1080 */
1077 if (ai->corr_peb_count) { 1081 if (ai->corr_peb_count) {
1078 ubi_err("%d PEBs are corrupted and preserved", 1082 ubi_err(ubi, "%d PEBs are corrupted and preserved",
1079 ai->corr_peb_count); 1083 ai->corr_peb_count);
1080 pr_err("Corrupted PEBs are:"); 1084 pr_err("Corrupted PEBs are:");
1081 list_for_each_entry(aeb, &ai->corr, u.list) 1085 list_for_each_entry(aeb, &ai->corr, u.list)
@@ -1087,7 +1091,7 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1087 * otherwise, only print a warning. 1091 * otherwise, only print a warning.
1088 */ 1092 */
1089 if (ai->corr_peb_count >= max_corr) { 1093 if (ai->corr_peb_count >= max_corr) {
1090 ubi_err("too many corrupted PEBs, refusing"); 1094 ubi_err(ubi, "too many corrupted PEBs, refusing");
1091 return -EINVAL; 1095 return -EINVAL;
1092 } 1096 }
1093 } 1097 }
@@ -1110,11 +1114,11 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
1110 */ 1114 */
1111 if (ai->maybe_bad_peb_count <= 2) { 1115 if (ai->maybe_bad_peb_count <= 2) {
1112 ai->is_empty = 1; 1116 ai->is_empty = 1;
1113 ubi_msg("empty MTD device detected"); 1117 ubi_msg(ubi, "empty MTD device detected");
1114 get_random_bytes(&ubi->image_seq, 1118 get_random_bytes(&ubi->image_seq,
1115 sizeof(ubi->image_seq)); 1119 sizeof(ubi->image_seq));
1116 } else { 1120 } else {
1117 ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it"); 1121 ubi_err(ubi, "MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
1118 return -EINVAL; 1122 return -EINVAL;
1119 } 1123 }
1120 1124
@@ -1248,7 +1252,7 @@ static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
1248 goto out_vidh; 1252 goto out_vidh;
1249 } 1253 }
1250 1254
1251 ubi_msg("scanning is finished"); 1255 ubi_msg(ubi, "scanning is finished");
1252 1256
1253 /* Calculate mean erase counter */ 1257 /* Calculate mean erase counter */
1254 if (ai->ec_count) 1258 if (ai->ec_count)
@@ -1515,37 +1519,37 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1515 vols_found += 1; 1519 vols_found += 1;
1516 1520
1517 if (ai->is_empty) { 1521 if (ai->is_empty) {
1518 ubi_err("bad is_empty flag"); 1522 ubi_err(ubi, "bad is_empty flag");
1519 goto bad_av; 1523 goto bad_av;
1520 } 1524 }
1521 1525
1522 if (av->vol_id < 0 || av->highest_lnum < 0 || 1526 if (av->vol_id < 0 || av->highest_lnum < 0 ||
1523 av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 || 1527 av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
1524 av->data_pad < 0 || av->last_data_size < 0) { 1528 av->data_pad < 0 || av->last_data_size < 0) {
1525 ubi_err("negative values"); 1529 ubi_err(ubi, "negative values");
1526 goto bad_av; 1530 goto bad_av;
1527 } 1531 }
1528 1532
1529 if (av->vol_id >= UBI_MAX_VOLUMES && 1533 if (av->vol_id >= UBI_MAX_VOLUMES &&
1530 av->vol_id < UBI_INTERNAL_VOL_START) { 1534 av->vol_id < UBI_INTERNAL_VOL_START) {
1531 ubi_err("bad vol_id"); 1535 ubi_err(ubi, "bad vol_id");
1532 goto bad_av; 1536 goto bad_av;
1533 } 1537 }
1534 1538
1535 if (av->vol_id > ai->highest_vol_id) { 1539 if (av->vol_id > ai->highest_vol_id) {
1536 ubi_err("highest_vol_id is %d, but vol_id %d is there", 1540 ubi_err(ubi, "highest_vol_id is %d, but vol_id %d is there",
1537 ai->highest_vol_id, av->vol_id); 1541 ai->highest_vol_id, av->vol_id);
1538 goto out; 1542 goto out;
1539 } 1543 }
1540 1544
1541 if (av->vol_type != UBI_DYNAMIC_VOLUME && 1545 if (av->vol_type != UBI_DYNAMIC_VOLUME &&
1542 av->vol_type != UBI_STATIC_VOLUME) { 1546 av->vol_type != UBI_STATIC_VOLUME) {
1543 ubi_err("bad vol_type"); 1547 ubi_err(ubi, "bad vol_type");
1544 goto bad_av; 1548 goto bad_av;
1545 } 1549 }
1546 1550
1547 if (av->data_pad > ubi->leb_size / 2) { 1551 if (av->data_pad > ubi->leb_size / 2) {
1548 ubi_err("bad data_pad"); 1552 ubi_err(ubi, "bad data_pad");
1549 goto bad_av; 1553 goto bad_av;
1550 } 1554 }
1551 1555
@@ -1557,48 +1561,48 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1557 leb_count += 1; 1561 leb_count += 1;
1558 1562
1559 if (aeb->pnum < 0 || aeb->ec < 0) { 1563 if (aeb->pnum < 0 || aeb->ec < 0) {
1560 ubi_err("negative values"); 1564 ubi_err(ubi, "negative values");
1561 goto bad_aeb; 1565 goto bad_aeb;
1562 } 1566 }
1563 1567
1564 if (aeb->ec < ai->min_ec) { 1568 if (aeb->ec < ai->min_ec) {
1565 ubi_err("bad ai->min_ec (%d), %d found", 1569 ubi_err(ubi, "bad ai->min_ec (%d), %d found",
1566 ai->min_ec, aeb->ec); 1570 ai->min_ec, aeb->ec);
1567 goto bad_aeb; 1571 goto bad_aeb;
1568 } 1572 }
1569 1573
1570 if (aeb->ec > ai->max_ec) { 1574 if (aeb->ec > ai->max_ec) {
1571 ubi_err("bad ai->max_ec (%d), %d found", 1575 ubi_err(ubi, "bad ai->max_ec (%d), %d found",
1572 ai->max_ec, aeb->ec); 1576 ai->max_ec, aeb->ec);
1573 goto bad_aeb; 1577 goto bad_aeb;
1574 } 1578 }
1575 1579
1576 if (aeb->pnum >= ubi->peb_count) { 1580 if (aeb->pnum >= ubi->peb_count) {
1577 ubi_err("too high PEB number %d, total PEBs %d", 1581 ubi_err(ubi, "too high PEB number %d, total PEBs %d",
1578 aeb->pnum, ubi->peb_count); 1582 aeb->pnum, ubi->peb_count);
1579 goto bad_aeb; 1583 goto bad_aeb;
1580 } 1584 }
1581 1585
1582 if (av->vol_type == UBI_STATIC_VOLUME) { 1586 if (av->vol_type == UBI_STATIC_VOLUME) {
1583 if (aeb->lnum >= av->used_ebs) { 1587 if (aeb->lnum >= av->used_ebs) {
1584 ubi_err("bad lnum or used_ebs"); 1588 ubi_err(ubi, "bad lnum or used_ebs");
1585 goto bad_aeb; 1589 goto bad_aeb;
1586 } 1590 }
1587 } else { 1591 } else {
1588 if (av->used_ebs != 0) { 1592 if (av->used_ebs != 0) {
1589 ubi_err("non-zero used_ebs"); 1593 ubi_err(ubi, "non-zero used_ebs");
1590 goto bad_aeb; 1594 goto bad_aeb;
1591 } 1595 }
1592 } 1596 }
1593 1597
1594 if (aeb->lnum > av->highest_lnum) { 1598 if (aeb->lnum > av->highest_lnum) {
1595 ubi_err("incorrect highest_lnum or lnum"); 1599 ubi_err(ubi, "incorrect highest_lnum or lnum");
1596 goto bad_aeb; 1600 goto bad_aeb;
1597 } 1601 }
1598 } 1602 }
1599 1603
1600 if (av->leb_count != leb_count) { 1604 if (av->leb_count != leb_count) {
1601 ubi_err("bad leb_count, %d objects in the tree", 1605 ubi_err(ubi, "bad leb_count, %d objects in the tree",
1602 leb_count); 1606 leb_count);
1603 goto bad_av; 1607 goto bad_av;
1604 } 1608 }
@@ -1609,13 +1613,13 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1609 aeb = last_aeb; 1613 aeb = last_aeb;
1610 1614
1611 if (aeb->lnum != av->highest_lnum) { 1615 if (aeb->lnum != av->highest_lnum) {
1612 ubi_err("bad highest_lnum"); 1616 ubi_err(ubi, "bad highest_lnum");
1613 goto bad_aeb; 1617 goto bad_aeb;
1614 } 1618 }
1615 } 1619 }
1616 1620
1617 if (vols_found != ai->vols_found) { 1621 if (vols_found != ai->vols_found) {
1618 ubi_err("bad ai->vols_found %d, should be %d", 1622 ubi_err(ubi, "bad ai->vols_found %d, should be %d",
1619 ai->vols_found, vols_found); 1623 ai->vols_found, vols_found);
1620 goto out; 1624 goto out;
1621 } 1625 }
@@ -1632,7 +1636,8 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1632 1636
1633 err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1); 1637 err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
1634 if (err && err != UBI_IO_BITFLIPS) { 1638 if (err && err != UBI_IO_BITFLIPS) {
1635 ubi_err("VID header is not OK (%d)", err); 1639 ubi_err(ubi, "VID header is not OK (%d)",
1640 err);
1636 if (err > 0) 1641 if (err > 0)
1637 err = -EIO; 1642 err = -EIO;
1638 return err; 1643 return err;
@@ -1641,37 +1646,37 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1641 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ? 1646 vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
1642 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME; 1647 UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
1643 if (av->vol_type != vol_type) { 1648 if (av->vol_type != vol_type) {
1644 ubi_err("bad vol_type"); 1649 ubi_err(ubi, "bad vol_type");
1645 goto bad_vid_hdr; 1650 goto bad_vid_hdr;
1646 } 1651 }
1647 1652
1648 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) { 1653 if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
1649 ubi_err("bad sqnum %llu", aeb->sqnum); 1654 ubi_err(ubi, "bad sqnum %llu", aeb->sqnum);
1650 goto bad_vid_hdr; 1655 goto bad_vid_hdr;
1651 } 1656 }
1652 1657
1653 if (av->vol_id != be32_to_cpu(vidh->vol_id)) { 1658 if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
1654 ubi_err("bad vol_id %d", av->vol_id); 1659 ubi_err(ubi, "bad vol_id %d", av->vol_id);
1655 goto bad_vid_hdr; 1660 goto bad_vid_hdr;
1656 } 1661 }
1657 1662
1658 if (av->compat != vidh->compat) { 1663 if (av->compat != vidh->compat) {
1659 ubi_err("bad compat %d", vidh->compat); 1664 ubi_err(ubi, "bad compat %d", vidh->compat);
1660 goto bad_vid_hdr; 1665 goto bad_vid_hdr;
1661 } 1666 }
1662 1667
1663 if (aeb->lnum != be32_to_cpu(vidh->lnum)) { 1668 if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
1664 ubi_err("bad lnum %d", aeb->lnum); 1669 ubi_err(ubi, "bad lnum %d", aeb->lnum);
1665 goto bad_vid_hdr; 1670 goto bad_vid_hdr;
1666 } 1671 }
1667 1672
1668 if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) { 1673 if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
1669 ubi_err("bad used_ebs %d", av->used_ebs); 1674 ubi_err(ubi, "bad used_ebs %d", av->used_ebs);
1670 goto bad_vid_hdr; 1675 goto bad_vid_hdr;
1671 } 1676 }
1672 1677
1673 if (av->data_pad != be32_to_cpu(vidh->data_pad)) { 1678 if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
1674 ubi_err("bad data_pad %d", av->data_pad); 1679 ubi_err(ubi, "bad data_pad %d", av->data_pad);
1675 goto bad_vid_hdr; 1680 goto bad_vid_hdr;
1676 } 1681 }
1677 } 1682 }
@@ -1680,12 +1685,13 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1680 continue; 1685 continue;
1681 1686
1682 if (av->highest_lnum != be32_to_cpu(vidh->lnum)) { 1687 if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
1683 ubi_err("bad highest_lnum %d", av->highest_lnum); 1688 ubi_err(ubi, "bad highest_lnum %d", av->highest_lnum);
1684 goto bad_vid_hdr; 1689 goto bad_vid_hdr;
1685 } 1690 }
1686 1691
1687 if (av->last_data_size != be32_to_cpu(vidh->data_size)) { 1692 if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
1688 ubi_err("bad last_data_size %d", av->last_data_size); 1693 ubi_err(ubi, "bad last_data_size %d",
1694 av->last_data_size);
1689 goto bad_vid_hdr; 1695 goto bad_vid_hdr;
1690 } 1696 }
1691 } 1697 }
@@ -1726,7 +1732,7 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1726 err = 0; 1732 err = 0;
1727 for (pnum = 0; pnum < ubi->peb_count; pnum++) 1733 for (pnum = 0; pnum < ubi->peb_count; pnum++)
1728 if (!buf[pnum]) { 1734 if (!buf[pnum]) {
1729 ubi_err("PEB %d is not referred", pnum); 1735 ubi_err(ubi, "PEB %d is not referred", pnum);
1730 err = 1; 1736 err = 1;
1731 } 1737 }
1732 1738
@@ -1736,18 +1742,18 @@ static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
1736 return 0; 1742 return 0;
1737 1743
1738bad_aeb: 1744bad_aeb:
1739 ubi_err("bad attaching information about LEB %d", aeb->lnum); 1745 ubi_err(ubi, "bad attaching information about LEB %d", aeb->lnum);
1740 ubi_dump_aeb(aeb, 0); 1746 ubi_dump_aeb(aeb, 0);
1741 ubi_dump_av(av); 1747 ubi_dump_av(av);
1742 goto out; 1748 goto out;
1743 1749
1744bad_av: 1750bad_av:
1745 ubi_err("bad attaching information about volume %d", av->vol_id); 1751 ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
1746 ubi_dump_av(av); 1752 ubi_dump_av(av);
1747 goto out; 1753 goto out;
1748 1754
1749bad_vid_hdr: 1755bad_vid_hdr:
1750 ubi_err("bad attaching information about volume %d", av->vol_id); 1756 ubi_err(ubi, "bad attaching information about volume %d", av->vol_id);
1751 ubi_dump_av(av); 1757 ubi_dump_av(av);
1752 ubi_dump_vid_hdr(vidh); 1758 ubi_dump_vid_hdr(vidh);
1753 1759
diff --git a/drivers/mtd/ubi/block.c b/drivers/mtd/ubi/block.c
index 8876c7d3d712..6b6bce28bd63 100644
--- a/drivers/mtd/ubi/block.c
+++ b/drivers/mtd/ubi/block.c
@@ -111,13 +111,13 @@ static int __init ubiblock_set_param(const char *val,
111 111
112 len = strnlen(val, UBIBLOCK_PARAM_LEN); 112 len = strnlen(val, UBIBLOCK_PARAM_LEN);
113 if (len == 0) { 113 if (len == 0) {
114 ubi_warn("block: empty 'block=' parameter - ignored\n"); 114 pr_warn("UBI: block: empty 'block=' parameter - ignored\n");
115 return 0; 115 return 0;
116 } 116 }
117 117
118 if (len == UBIBLOCK_PARAM_LEN) { 118 if (len == UBIBLOCK_PARAM_LEN) {
119 ubi_err("block: parameter \"%s\" is too long, max. is %d\n", 119 pr_err("UBI: block: parameter \"%s\" is too long, max. is %d\n",
120 val, UBIBLOCK_PARAM_LEN); 120 val, UBIBLOCK_PARAM_LEN);
121 return -EINVAL; 121 return -EINVAL;
122 } 122 }
123 123
@@ -188,9 +188,8 @@ static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
188 188
189 ret = ubi_read(dev->desc, leb, buffer, offset, len); 189 ret = ubi_read(dev->desc, leb, buffer, offset, len);
190 if (ret) { 190 if (ret) {
191 ubi_err("%s: error %d while reading from LEB %d (offset %d, " 191 dev_err(disk_to_dev(dev->gd), "%d while reading from LEB %d (offset %d, length %d)",
192 "length %d)", dev->gd->disk_name, ret, leb, offset, 192 ret, leb, offset, len);
193 len);
194 return ret; 193 return ret;
195 } 194 }
196 return 0; 195 return 0;
@@ -328,8 +327,8 @@ static int ubiblock_open(struct block_device *bdev, fmode_t mode)
328 327
329 dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY); 328 dev->desc = ubi_open_volume(dev->ubi_num, dev->vol_id, UBI_READONLY);
330 if (IS_ERR(dev->desc)) { 329 if (IS_ERR(dev->desc)) {
331 ubi_err("%s failed to open ubi volume %d_%d", 330 dev_err(disk_to_dev(dev->gd), "failed to open ubi volume %d_%d",
332 dev->gd->disk_name, dev->ubi_num, dev->vol_id); 331 dev->ubi_num, dev->vol_id);
333 ret = PTR_ERR(dev->desc); 332 ret = PTR_ERR(dev->desc);
334 dev->desc = NULL; 333 dev->desc = NULL;
335 goto out_unlock; 334 goto out_unlock;
@@ -405,7 +404,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
405 /* Initialize the gendisk of this ubiblock device */ 404 /* Initialize the gendisk of this ubiblock device */
406 gd = alloc_disk(1); 405 gd = alloc_disk(1);
407 if (!gd) { 406 if (!gd) {
408 ubi_err("block: alloc_disk failed"); 407 pr_err("UBI: block: alloc_disk failed");
409 ret = -ENODEV; 408 ret = -ENODEV;
410 goto out_free_dev; 409 goto out_free_dev;
411 } 410 }
@@ -421,7 +420,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
421 spin_lock_init(&dev->queue_lock); 420 spin_lock_init(&dev->queue_lock);
422 dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock); 421 dev->rq = blk_init_queue(ubiblock_request, &dev->queue_lock);
423 if (!dev->rq) { 422 if (!dev->rq) {
424 ubi_err("block: blk_init_queue failed"); 423 dev_err(disk_to_dev(gd), "blk_init_queue failed");
425 ret = -ENODEV; 424 ret = -ENODEV;
426 goto out_put_disk; 425 goto out_put_disk;
427 } 426 }
@@ -446,8 +445,8 @@ int ubiblock_create(struct ubi_volume_info *vi)
446 445
447 /* Must be the last step: anyone can call file ops from now on */ 446 /* Must be the last step: anyone can call file ops from now on */
448 add_disk(dev->gd); 447 add_disk(dev->gd);
449 ubi_msg("%s created from ubi%d:%d(%s)", 448 dev_info(disk_to_dev(dev->gd), "created from ubi%d:%d(%s)",
450 dev->gd->disk_name, dev->ubi_num, dev->vol_id, vi->name); 449 dev->ubi_num, dev->vol_id, vi->name);
451 return 0; 450 return 0;
452 451
453out_free_queue: 452out_free_queue:
@@ -464,7 +463,7 @@ static void ubiblock_cleanup(struct ubiblock *dev)
464{ 463{
465 del_gendisk(dev->gd); 464 del_gendisk(dev->gd);
466 blk_cleanup_queue(dev->rq); 465 blk_cleanup_queue(dev->rq);
467 ubi_msg("%s released", dev->gd->disk_name); 466 dev_info(disk_to_dev(dev->gd), "released");
468 put_disk(dev->gd); 467 put_disk(dev->gd);
469} 468}
470 469
@@ -518,8 +517,8 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
518 } 517 }
519 if ((sector_t)disk_capacity != disk_capacity) { 518 if ((sector_t)disk_capacity != disk_capacity) {
520 mutex_unlock(&devices_mutex); 519 mutex_unlock(&devices_mutex);
521 ubi_warn("%s: the volume is too big (%d LEBs), cannot resize", 520 dev_warn(disk_to_dev(dev->gd), "the volume is too big (%d LEBs), cannot resize",
522 dev->gd->disk_name, vi->size); 521 vi->size);
523 return -EFBIG; 522 return -EFBIG;
524 } 523 }
525 524
@@ -527,8 +526,8 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
527 526
528 if (get_capacity(dev->gd) != disk_capacity) { 527 if (get_capacity(dev->gd) != disk_capacity) {
529 set_capacity(dev->gd, disk_capacity); 528 set_capacity(dev->gd, disk_capacity);
530 ubi_msg("%s resized to %lld bytes", dev->gd->disk_name, 529 dev_info(disk_to_dev(dev->gd), "resized to %lld bytes",
531 vi->used_bytes); 530 vi->used_bytes);
532 } 531 }
533 mutex_unlock(&dev->dev_mutex); 532 mutex_unlock(&dev->dev_mutex);
534 mutex_unlock(&devices_mutex); 533 mutex_unlock(&devices_mutex);
@@ -596,8 +595,8 @@ static int __init ubiblock_create_from_param(void)
596 595
597 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id); 596 desc = open_volume_desc(p->name, p->ubi_num, p->vol_id);
598 if (IS_ERR(desc)) { 597 if (IS_ERR(desc)) {
599 ubi_err("block: can't open volume, err=%ld\n", 598 pr_err("UBI: block: can't open volume, err=%ld\n",
600 PTR_ERR(desc)); 599 PTR_ERR(desc));
601 ret = PTR_ERR(desc); 600 ret = PTR_ERR(desc);
602 break; 601 break;
603 } 602 }
@@ -607,8 +606,8 @@ static int __init ubiblock_create_from_param(void)
607 606
608 ret = ubiblock_create(&vi); 607 ret = ubiblock_create(&vi);
609 if (ret) { 608 if (ret) {
610 ubi_err("block: can't add '%s' volume, err=%d\n", 609 pr_err("UBI: block: can't add '%s' volume, err=%d\n",
611 vi.name, ret); 610 vi.name, ret);
612 break; 611 break;
613 } 612 }
614 } 613 }
diff --git a/drivers/mtd/ubi/build.c b/drivers/mtd/ubi/build.c
index 6e30a3c280d0..3405be46ebe9 100644
--- a/drivers/mtd/ubi/build.c
+++ b/drivers/mtd/ubi/build.c
@@ -166,7 +166,7 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
166 case UBI_VOLUME_RESIZED: 166 case UBI_VOLUME_RESIZED:
167 case UBI_VOLUME_RENAMED: 167 case UBI_VOLUME_RENAMED:
168 if (ubi_update_fastmap(ubi)) { 168 if (ubi_update_fastmap(ubi)) {
169 ubi_err("Unable to update fastmap!"); 169 ubi_err(ubi, "Unable to update fastmap!");
170 ubi_ro_mode(ubi); 170 ubi_ro_mode(ubi);
171 } 171 }
172 } 172 }
@@ -517,7 +517,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
517 */ 517 */
518 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name); 518 err = alloc_chrdev_region(&dev, 0, ubi->vtbl_slots + 1, ubi->ubi_name);
519 if (err) { 519 if (err) {
520 ubi_err("cannot register UBI character devices"); 520 ubi_err(ubi, "cannot register UBI character devices");
521 return err; 521 return err;
522 } 522 }
523 523
@@ -528,7 +528,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
528 528
529 err = cdev_add(&ubi->cdev, dev, 1); 529 err = cdev_add(&ubi->cdev, dev, 1);
530 if (err) { 530 if (err) {
531 ubi_err("cannot add character device"); 531 ubi_err(ubi, "cannot add character device");
532 goto out_unreg; 532 goto out_unreg;
533 } 533 }
534 534
@@ -540,7 +540,7 @@ static int uif_init(struct ubi_device *ubi, int *ref)
540 if (ubi->volumes[i]) { 540 if (ubi->volumes[i]) {
541 err = ubi_add_volume(ubi, ubi->volumes[i]); 541 err = ubi_add_volume(ubi, ubi->volumes[i]);
542 if (err) { 542 if (err) {
543 ubi_err("cannot add volume %d", i); 543 ubi_err(ubi, "cannot add volume %d", i);
544 goto out_volumes; 544 goto out_volumes;
545 } 545 }
546 } 546 }
@@ -556,7 +556,8 @@ out_sysfs:
556 cdev_del(&ubi->cdev); 556 cdev_del(&ubi->cdev);
557out_unreg: 557out_unreg:
558 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1); 558 unregister_chrdev_region(ubi->cdev.dev, ubi->vtbl_slots + 1);
559 ubi_err("cannot initialize UBI %s, error %d", ubi->ubi_name, err); 559 ubi_err(ubi, "cannot initialize UBI %s, error %d",
560 ubi->ubi_name, err);
560 return err; 561 return err;
561} 562}
562 563
@@ -650,7 +651,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
650 * guess we should just pick the largest region. But this is 651 * guess we should just pick the largest region. But this is
651 * not implemented. 652 * not implemented.
652 */ 653 */
653 ubi_err("multiple regions, not implemented"); 654 ubi_err(ubi, "multiple regions, not implemented");
654 return -EINVAL; 655 return -EINVAL;
655 } 656 }
656 657
@@ -685,7 +686,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
685 * which allows us to avoid costly division operations. 686 * which allows us to avoid costly division operations.
686 */ 687 */
687 if (!is_power_of_2(ubi->min_io_size)) { 688 if (!is_power_of_2(ubi->min_io_size)) {
688 ubi_err("min. I/O unit (%d) is not power of 2", 689 ubi_err(ubi, "min. I/O unit (%d) is not power of 2",
689 ubi->min_io_size); 690 ubi->min_io_size);
690 return -EINVAL; 691 return -EINVAL;
691 } 692 }
@@ -702,7 +703,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
702 if (ubi->max_write_size < ubi->min_io_size || 703 if (ubi->max_write_size < ubi->min_io_size ||
703 ubi->max_write_size % ubi->min_io_size || 704 ubi->max_write_size % ubi->min_io_size ||
704 !is_power_of_2(ubi->max_write_size)) { 705 !is_power_of_2(ubi->max_write_size)) {
705 ubi_err("bad write buffer size %d for %d min. I/O unit", 706 ubi_err(ubi, "bad write buffer size %d for %d min. I/O unit",
706 ubi->max_write_size, ubi->min_io_size); 707 ubi->max_write_size, ubi->min_io_size);
707 return -EINVAL; 708 return -EINVAL;
708 } 709 }
@@ -739,7 +740,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
739 740
740 /* The shift must be aligned to 32-bit boundary */ 741 /* The shift must be aligned to 32-bit boundary */
741 if (ubi->vid_hdr_shift % 4) { 742 if (ubi->vid_hdr_shift % 4) {
742 ubi_err("unaligned VID header shift %d", 743 ubi_err(ubi, "unaligned VID header shift %d",
743 ubi->vid_hdr_shift); 744 ubi->vid_hdr_shift);
744 return -EINVAL; 745 return -EINVAL;
745 } 746 }
@@ -749,7 +750,7 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
749 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || 750 ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE ||
750 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || 751 ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE ||
751 ubi->leb_start & (ubi->min_io_size - 1)) { 752 ubi->leb_start & (ubi->min_io_size - 1)) {
752 ubi_err("bad VID header (%d) or data offsets (%d)", 753 ubi_err(ubi, "bad VID header (%d) or data offsets (%d)",
753 ubi->vid_hdr_offset, ubi->leb_start); 754 ubi->vid_hdr_offset, ubi->leb_start);
754 return -EINVAL; 755 return -EINVAL;
755 } 756 }
@@ -769,14 +770,14 @@ static int io_init(struct ubi_device *ubi, int max_beb_per1024)
769 * read-only mode. 770 * read-only mode.
770 */ 771 */
771 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { 772 if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) {
772 ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode"); 773 ubi_warn(ubi, "EC and VID headers are in the same minimal I/O unit, switch to read-only mode");
773 ubi->ro_mode = 1; 774 ubi->ro_mode = 1;
774 } 775 }
775 776
776 ubi->leb_size = ubi->peb_size - ubi->leb_start; 777 ubi->leb_size = ubi->peb_size - ubi->leb_start;
777 778
778 if (!(ubi->mtd->flags & MTD_WRITEABLE)) { 779 if (!(ubi->mtd->flags & MTD_WRITEABLE)) {
779 ubi_msg("MTD device %d is write-protected, attach in read-only mode", 780 ubi_msg(ubi, "MTD device %d is write-protected, attach in read-only mode",
780 ubi->mtd->index); 781 ubi->mtd->index);
781 ubi->ro_mode = 1; 782 ubi->ro_mode = 1;
782 } 783 }
@@ -809,7 +810,7 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
809 int err, old_reserved_pebs = vol->reserved_pebs; 810 int err, old_reserved_pebs = vol->reserved_pebs;
810 811
811 if (ubi->ro_mode) { 812 if (ubi->ro_mode) {
812 ubi_warn("skip auto-resize because of R/O mode"); 813 ubi_warn(ubi, "skip auto-resize because of R/O mode");
813 return 0; 814 return 0;
814 } 815 }
815 816
@@ -830,21 +831,22 @@ static int autoresize(struct ubi_device *ubi, int vol_id)
830 vtbl_rec = ubi->vtbl[vol_id]; 831 vtbl_rec = ubi->vtbl[vol_id];
831 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec); 832 err = ubi_change_vtbl_record(ubi, vol_id, &vtbl_rec);
832 if (err) 833 if (err)
833 ubi_err("cannot clean auto-resize flag for volume %d", 834 ubi_err(ubi, "cannot clean auto-resize flag for volume %d",
834 vol_id); 835 vol_id);
835 } else { 836 } else {
836 desc.vol = vol; 837 desc.vol = vol;
837 err = ubi_resize_volume(&desc, 838 err = ubi_resize_volume(&desc,
838 old_reserved_pebs + ubi->avail_pebs); 839 old_reserved_pebs + ubi->avail_pebs);
839 if (err) 840 if (err)
840 ubi_err("cannot auto-resize volume %d", vol_id); 841 ubi_err(ubi, "cannot auto-resize volume %d",
842 vol_id);
841 } 843 }
842 844
843 if (err) 845 if (err)
844 return err; 846 return err;
845 847
846 ubi_msg("volume %d (\"%s\") re-sized from %d to %d LEBs", vol_id, 848 ubi_msg(ubi, "volume %d (\"%s\") re-sized from %d to %d LEBs",
847 vol->name, old_reserved_pebs, vol->reserved_pebs); 849 vol_id, vol->name, old_reserved_pebs, vol->reserved_pebs);
848 return 0; 850 return 0;
849} 851}
850 852
@@ -885,7 +887,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
885 for (i = 0; i < UBI_MAX_DEVICES; i++) { 887 for (i = 0; i < UBI_MAX_DEVICES; i++) {
886 ubi = ubi_devices[i]; 888 ubi = ubi_devices[i];
887 if (ubi && mtd->index == ubi->mtd->index) { 889 if (ubi && mtd->index == ubi->mtd->index) {
888 ubi_err("mtd%d is already attached to ubi%d", 890 ubi_err(ubi, "mtd%d is already attached to ubi%d",
889 mtd->index, i); 891 mtd->index, i);
890 return -EEXIST; 892 return -EEXIST;
891 } 893 }
@@ -900,7 +902,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
900 * no sense to attach emulated MTD devices, so we prohibit this. 902 * no sense to attach emulated MTD devices, so we prohibit this.
901 */ 903 */
902 if (mtd->type == MTD_UBIVOLUME) { 904 if (mtd->type == MTD_UBIVOLUME) {
903 ubi_err("refuse attaching mtd%d - it is already emulated on top of UBI", 905 ubi_err(ubi, "refuse attaching mtd%d - it is already emulated on top of UBI",
904 mtd->index); 906 mtd->index);
905 return -EINVAL; 907 return -EINVAL;
906 } 908 }
@@ -911,7 +913,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
911 if (!ubi_devices[ubi_num]) 913 if (!ubi_devices[ubi_num])
912 break; 914 break;
913 if (ubi_num == UBI_MAX_DEVICES) { 915 if (ubi_num == UBI_MAX_DEVICES) {
914 ubi_err("only %d UBI devices may be created", 916 ubi_err(ubi, "only %d UBI devices may be created",
915 UBI_MAX_DEVICES); 917 UBI_MAX_DEVICES);
916 return -ENFILE; 918 return -ENFILE;
917 } 919 }
@@ -921,7 +923,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
921 923
922 /* Make sure ubi_num is not busy */ 924 /* Make sure ubi_num is not busy */
923 if (ubi_devices[ubi_num]) { 925 if (ubi_devices[ubi_num]) {
924 ubi_err("ubi%d already exists", ubi_num); 926 ubi_err(ubi, "ubi%d already exists", ubi_num);
925 return -EEXIST; 927 return -EEXIST;
926 } 928 }
927 } 929 }
@@ -953,13 +955,14 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
953 955
954 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) 956 if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
955 <= UBI_FM_MAX_START) { 957 <= UBI_FM_MAX_START) {
956 ubi_err("More than %i PEBs are needed for fastmap, sorry.", 958 ubi_err(ubi, "More than %i PEBs are needed for fastmap, sorry.",
957 UBI_FM_MAX_START); 959 UBI_FM_MAX_START);
958 ubi->fm_disabled = 1; 960 ubi->fm_disabled = 1;
959 } 961 }
960 962
961 ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size); 963 ubi_msg(ubi, "default fastmap pool size: %d", ubi->fm_pool.max_size);
962 ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); 964 ubi_msg(ubi, "default fastmap WL pool size: %d",
965 ubi->fm_wl_pool.max_size);
963#else 966#else
964 ubi->fm_disabled = 1; 967 ubi->fm_disabled = 1;
965#endif 968#endif
@@ -970,7 +973,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
970 mutex_init(&ubi->fm_mutex); 973 mutex_init(&ubi->fm_mutex);
971 init_rwsem(&ubi->fm_sem); 974 init_rwsem(&ubi->fm_sem);
972 975
973 ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num); 976 ubi_msg(ubi, "attaching mtd%d to ubi%d", mtd->index, ubi_num);
974 977
975 err = io_init(ubi, max_beb_per1024); 978 err = io_init(ubi, max_beb_per1024);
976 if (err) 979 if (err)
@@ -989,7 +992,8 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
989#endif 992#endif
990 err = ubi_attach(ubi, 0); 993 err = ubi_attach(ubi, 0);
991 if (err) { 994 if (err) {
992 ubi_err("failed to attach mtd%d, error %d", mtd->index, err); 995 ubi_err(ubi, "failed to attach mtd%d, error %d",
996 mtd->index, err);
993 goto out_free; 997 goto out_free;
994 } 998 }
995 999
@@ -1010,28 +1014,28 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
1010 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name); 1014 ubi->bgt_thread = kthread_create(ubi_thread, ubi, "%s", ubi->bgt_name);
1011 if (IS_ERR(ubi->bgt_thread)) { 1015 if (IS_ERR(ubi->bgt_thread)) {
1012 err = PTR_ERR(ubi->bgt_thread); 1016 err = PTR_ERR(ubi->bgt_thread);
1013 ubi_err("cannot spawn \"%s\", error %d", ubi->bgt_name, 1017 ubi_err(ubi, "cannot spawn \"%s\", error %d",
1014 err); 1018 ubi->bgt_name, err);
1015 goto out_debugfs; 1019 goto out_debugfs;
1016 } 1020 }
1017 1021
1018 ubi_msg("attached mtd%d (name \"%s\", size %llu MiB) to ubi%d", 1022 ubi_msg(ubi, "attached mtd%d (name \"%s\", size %llu MiB)",
1019 mtd->index, mtd->name, ubi->flash_size >> 20, ubi_num); 1023 mtd->index, mtd->name, ubi->flash_size >> 20);
1020 ubi_msg("PEB size: %d bytes (%d KiB), LEB size: %d bytes", 1024 ubi_msg(ubi, "PEB size: %d bytes (%d KiB), LEB size: %d bytes",
1021 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size); 1025 ubi->peb_size, ubi->peb_size >> 10, ubi->leb_size);
1022 ubi_msg("min./max. I/O unit sizes: %d/%d, sub-page size %d", 1026 ubi_msg(ubi, "min./max. I/O unit sizes: %d/%d, sub-page size %d",
1023 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size); 1027 ubi->min_io_size, ubi->max_write_size, ubi->hdrs_min_io_size);
1024 ubi_msg("VID header offset: %d (aligned %d), data offset: %d", 1028 ubi_msg(ubi, "VID header offset: %d (aligned %d), data offset: %d",
1025 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start); 1029 ubi->vid_hdr_offset, ubi->vid_hdr_aloffset, ubi->leb_start);
1026 ubi_msg("good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d", 1030 ubi_msg(ubi, "good PEBs: %d, bad PEBs: %d, corrupted PEBs: %d",
1027 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count); 1031 ubi->good_peb_count, ubi->bad_peb_count, ubi->corr_peb_count);
1028 ubi_msg("user volume: %d, internal volumes: %d, max. volumes count: %d", 1032 ubi_msg(ubi, "user volume: %d, internal volumes: %d, max. volumes count: %d",
1029 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT, 1033 ubi->vol_count - UBI_INT_VOL_COUNT, UBI_INT_VOL_COUNT,
1030 ubi->vtbl_slots); 1034 ubi->vtbl_slots);
1031 ubi_msg("max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u", 1035 ubi_msg(ubi, "max/mean erase counter: %d/%d, WL threshold: %d, image sequence number: %u",
1032 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD, 1036 ubi->max_ec, ubi->mean_ec, CONFIG_MTD_UBI_WL_THRESHOLD,
1033 ubi->image_seq); 1037 ubi->image_seq);
1034 ubi_msg("available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d", 1038 ubi_msg(ubi, "available PEBs: %d, total reserved PEBs: %d, PEBs reserved for bad PEB handling: %d",
1035 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs); 1039 ubi->avail_pebs, ubi->rsvd_pebs, ubi->beb_rsvd_pebs);
1036 1040
1037 /* 1041 /*
@@ -1100,7 +1104,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1100 return -EBUSY; 1104 return -EBUSY;
1101 } 1105 }
1102 /* This may only happen if there is a bug */ 1106 /* This may only happen if there is a bug */
1103 ubi_err("%s reference count %d, destroy anyway", 1107 ubi_err(ubi, "%s reference count %d, destroy anyway",
1104 ubi->ubi_name, ubi->ref_count); 1108 ubi->ubi_name, ubi->ref_count);
1105 } 1109 }
1106 ubi_devices[ubi_num] = NULL; 1110 ubi_devices[ubi_num] = NULL;
@@ -1108,7 +1112,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1108 1112
1109 ubi_assert(ubi_num == ubi->ubi_num); 1113 ubi_assert(ubi_num == ubi->ubi_num);
1110 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL); 1114 ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
1111 ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num); 1115 ubi_msg(ubi, "detaching mtd%d", ubi->mtd->index);
1112#ifdef CONFIG_MTD_UBI_FASTMAP 1116#ifdef CONFIG_MTD_UBI_FASTMAP
1113 /* If we don't write a new fastmap at detach time we lose all 1117 /* If we don't write a new fastmap at detach time we lose all
1114 * EC updates that have been made since the last written fastmap. */ 1118 * EC updates that have been made since the last written fastmap. */
@@ -1136,7 +1140,7 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
1136 put_mtd_device(ubi->mtd); 1140 put_mtd_device(ubi->mtd);
1137 vfree(ubi->peb_buf); 1141 vfree(ubi->peb_buf);
1138 vfree(ubi->fm_buf); 1142 vfree(ubi->fm_buf);
1139 ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num); 1143 ubi_msg(ubi, "mtd%d is detached", ubi->mtd->index);
1140 put_device(&ubi->dev); 1144 put_device(&ubi->dev);
1141 return 0; 1145 return 0;
1142} 1146}
@@ -1218,7 +1222,8 @@ static int __init ubi_init(void)
1218 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64); 1222 BUILD_BUG_ON(sizeof(struct ubi_vid_hdr) != 64);
1219 1223
1220 if (mtd_devs > UBI_MAX_DEVICES) { 1224 if (mtd_devs > UBI_MAX_DEVICES) {
1221 ubi_err("too many MTD devices, maximum is %d", UBI_MAX_DEVICES); 1225 pr_err("UBI error: too many MTD devices, maximum is %d",
1226 UBI_MAX_DEVICES);
1222 return -EINVAL; 1227 return -EINVAL;
1223 } 1228 }
1224 1229
@@ -1226,19 +1231,19 @@ static int __init ubi_init(void)
1226 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR); 1231 ubi_class = class_create(THIS_MODULE, UBI_NAME_STR);
1227 if (IS_ERR(ubi_class)) { 1232 if (IS_ERR(ubi_class)) {
1228 err = PTR_ERR(ubi_class); 1233 err = PTR_ERR(ubi_class);
1229 ubi_err("cannot create UBI class"); 1234 pr_err("UBI error: cannot create UBI class");
1230 goto out; 1235 goto out;
1231 } 1236 }
1232 1237
1233 err = class_create_file(ubi_class, &ubi_version); 1238 err = class_create_file(ubi_class, &ubi_version);
1234 if (err) { 1239 if (err) {
1235 ubi_err("cannot create sysfs file"); 1240 pr_err("UBI error: cannot create sysfs file");
1236 goto out_class; 1241 goto out_class;
1237 } 1242 }
1238 1243
1239 err = misc_register(&ubi_ctrl_cdev); 1244 err = misc_register(&ubi_ctrl_cdev);
1240 if (err) { 1245 if (err) {
1241 ubi_err("cannot register device"); 1246 pr_err("UBI error: cannot register device");
1242 goto out_version; 1247 goto out_version;
1243 } 1248 }
1244 1249
@@ -1265,7 +1270,8 @@ static int __init ubi_init(void)
1265 mtd = open_mtd_device(p->name); 1270 mtd = open_mtd_device(p->name);
1266 if (IS_ERR(mtd)) { 1271 if (IS_ERR(mtd)) {
1267 err = PTR_ERR(mtd); 1272 err = PTR_ERR(mtd);
1268 ubi_err("cannot open mtd %s, error %d", p->name, err); 1273 pr_err("UBI error: cannot open mtd %s, error %d",
1274 p->name, err);
1269 /* See comment below re-ubi_is_module(). */ 1275 /* See comment below re-ubi_is_module(). */
1270 if (ubi_is_module()) 1276 if (ubi_is_module())
1271 goto out_detach; 1277 goto out_detach;
@@ -1277,7 +1283,8 @@ static int __init ubi_init(void)
1277 p->vid_hdr_offs, p->max_beb_per1024); 1283 p->vid_hdr_offs, p->max_beb_per1024);
1278 mutex_unlock(&ubi_devices_mutex); 1284 mutex_unlock(&ubi_devices_mutex);
1279 if (err < 0) { 1285 if (err < 0) {
1280 ubi_err("cannot attach mtd%d", mtd->index); 1286 pr_err("UBI error: cannot attach mtd%d",
1287 mtd->index);
1281 put_mtd_device(mtd); 1288 put_mtd_device(mtd);
1282 1289
1283 /* 1290 /*
@@ -1300,7 +1307,7 @@ static int __init ubi_init(void)
1300 1307
1301 err = ubiblock_init(); 1308 err = ubiblock_init();
1302 if (err) { 1309 if (err) {
1303 ubi_err("block: cannot initialize, error %d", err); 1310 pr_err("UBI error: block: cannot initialize, error %d", err);
1304 1311
1305 /* See comment above re-ubi_is_module(). */ 1312 /* See comment above re-ubi_is_module(). */
1306 if (ubi_is_module()) 1313 if (ubi_is_module())
@@ -1326,7 +1333,7 @@ out_version:
1326out_class: 1333out_class:
1327 class_destroy(ubi_class); 1334 class_destroy(ubi_class);
1328out: 1335out:
1329 ubi_err("cannot initialize UBI, error %d", err); 1336 pr_err("UBI error: cannot initialize UBI, error %d", err);
1330 return err; 1337 return err;
1331} 1338}
1332late_initcall(ubi_init); 1339late_initcall(ubi_init);
@@ -1365,7 +1372,7 @@ static int __init bytes_str_to_int(const char *str)
1365 1372
1366 result = simple_strtoul(str, &endp, 0); 1373 result = simple_strtoul(str, &endp, 0);
1367 if (str == endp || result >= INT_MAX) { 1374 if (str == endp || result >= INT_MAX) {
1368 ubi_err("incorrect bytes count: \"%s\"\n", str); 1375 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1369 return -EINVAL; 1376 return -EINVAL;
1370 } 1377 }
1371 1378
@@ -1381,7 +1388,7 @@ static int __init bytes_str_to_int(const char *str)
1381 case '\0': 1388 case '\0':
1382 break; 1389 break;
1383 default: 1390 default:
1384 ubi_err("incorrect bytes count: \"%s\"\n", str); 1391 pr_err("UBI error: incorrect bytes count: \"%s\"\n", str);
1385 return -EINVAL; 1392 return -EINVAL;
1386 } 1393 }
1387 1394
@@ -1408,20 +1415,20 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1408 return -EINVAL; 1415 return -EINVAL;
1409 1416
1410 if (mtd_devs == UBI_MAX_DEVICES) { 1417 if (mtd_devs == UBI_MAX_DEVICES) {
1411 ubi_err("too many parameters, max. is %d\n", 1418 pr_err("UBI error: too many parameters, max. is %d\n",
1412 UBI_MAX_DEVICES); 1419 UBI_MAX_DEVICES);
1413 return -EINVAL; 1420 return -EINVAL;
1414 } 1421 }
1415 1422
1416 len = strnlen(val, MTD_PARAM_LEN_MAX); 1423 len = strnlen(val, MTD_PARAM_LEN_MAX);
1417 if (len == MTD_PARAM_LEN_MAX) { 1424 if (len == MTD_PARAM_LEN_MAX) {
1418 ubi_err("parameter \"%s\" is too long, max. is %d\n", 1425 pr_err("UBI error: parameter \"%s\" is too long, max. is %d\n",
1419 val, MTD_PARAM_LEN_MAX); 1426 val, MTD_PARAM_LEN_MAX);
1420 return -EINVAL; 1427 return -EINVAL;
1421 } 1428 }
1422 1429
1423 if (len == 0) { 1430 if (len == 0) {
1424 pr_warn("UBI warning: empty 'mtd=' parameter - ignored\n"); 1431 pr_err("UBI warning: empty 'mtd=' parameter - ignored\n");
1425 return 0; 1432 return 0;
1426 } 1433 }
1427 1434
@@ -1435,7 +1442,7 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1435 tokens[i] = strsep(&pbuf, ","); 1442 tokens[i] = strsep(&pbuf, ",");
1436 1443
1437 if (pbuf) { 1444 if (pbuf) {
1438 ubi_err("too many arguments at \"%s\"\n", val); 1445 pr_err("UBI error: too many arguments at \"%s\"\n", val);
1439 return -EINVAL; 1446 return -EINVAL;
1440 } 1447 }
1441 1448
@@ -1455,8 +1462,8 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1455 int err = kstrtoint(token, 10, &p->max_beb_per1024); 1462 int err = kstrtoint(token, 10, &p->max_beb_per1024);
1456 1463
1457 if (err) { 1464 if (err) {
1458 ubi_err("bad value for max_beb_per1024 parameter: %s", 1465 pr_err("UBI error: bad value for max_beb_per1024 parameter: %s",
1459 token); 1466 token);
1460 return -EINVAL; 1467 return -EINVAL;
1461 } 1468 }
1462 } 1469 }
@@ -1466,7 +1473,8 @@ static int __init ubi_mtd_param_parse(const char *val, struct kernel_param *kp)
1466 int err = kstrtoint(token, 10, &p->ubi_num); 1473 int err = kstrtoint(token, 10, &p->ubi_num);
1467 1474
1468 if (err) { 1475 if (err) {
1469 ubi_err("bad value for ubi_num parameter: %s", token); 1476 pr_err("UBI error: bad value for ubi_num parameter: %s",
1477 token);
1470 return -EINVAL; 1478 return -EINVAL;
1471 } 1479 }
1472 } else 1480 } else
diff --git a/drivers/mtd/ubi/cdev.c b/drivers/mtd/ubi/cdev.c
index 59de69a24e40..3410ea8109f8 100644
--- a/drivers/mtd/ubi/cdev.c
+++ b/drivers/mtd/ubi/cdev.c
@@ -48,13 +48,14 @@
48 48
49/** 49/**
50 * get_exclusive - get exclusive access to an UBI volume. 50 * get_exclusive - get exclusive access to an UBI volume.
51 * @ubi: UBI device description object
51 * @desc: volume descriptor 52 * @desc: volume descriptor
52 * 53 *
53 * This function changes UBI volume open mode to "exclusive". Returns previous 54 * This function changes UBI volume open mode to "exclusive". Returns previous
54 * mode value (positive integer) in case of success and a negative error code 55 * mode value (positive integer) in case of success and a negative error code
55 * in case of failure. 56 * in case of failure.
56 */ 57 */
57static int get_exclusive(struct ubi_volume_desc *desc) 58static int get_exclusive(struct ubi_device *ubi, struct ubi_volume_desc *desc)
58{ 59{
59 int users, err; 60 int users, err;
60 struct ubi_volume *vol = desc->vol; 61 struct ubi_volume *vol = desc->vol;
@@ -63,7 +64,7 @@ static int get_exclusive(struct ubi_volume_desc *desc)
63 users = vol->readers + vol->writers + vol->exclusive; 64 users = vol->readers + vol->writers + vol->exclusive;
64 ubi_assert(users > 0); 65 ubi_assert(users > 0);
65 if (users > 1) { 66 if (users > 1) {
66 ubi_err("%d users for volume %d", users, vol->vol_id); 67 ubi_err(ubi, "%d users for volume %d", users, vol->vol_id);
67 err = -EBUSY; 68 err = -EBUSY;
68 } else { 69 } else {
69 vol->readers = vol->writers = 0; 70 vol->readers = vol->writers = 0;
@@ -134,7 +135,7 @@ static int vol_cdev_release(struct inode *inode, struct file *file)
134 vol->ubi->ubi_num, vol->vol_id, desc->mode); 135 vol->ubi->ubi_num, vol->vol_id, desc->mode);
135 136
136 if (vol->updating) { 137 if (vol->updating) {
137 ubi_warn("update of volume %d not finished, volume is damaged", 138 ubi_warn(vol->ubi, "update of volume %d not finished, volume is damaged",
138 vol->vol_id); 139 vol->vol_id);
139 ubi_assert(!vol->changing_leb); 140 ubi_assert(!vol->changing_leb);
140 vol->updating = 0; 141 vol->updating = 0;
@@ -158,7 +159,7 @@ static loff_t vol_cdev_llseek(struct file *file, loff_t offset, int origin)
158 159
159 if (vol->updating) { 160 if (vol->updating) {
160 /* Update is in progress, seeking is prohibited */ 161 /* Update is in progress, seeking is prohibited */
161 ubi_err("updating"); 162 ubi_err(vol->ubi, "updating");
162 return -EBUSY; 163 return -EBUSY;
163 } 164 }
164 165
@@ -193,11 +194,11 @@ static ssize_t vol_cdev_read(struct file *file, __user char *buf, size_t count,
193 count, *offp, vol->vol_id); 194 count, *offp, vol->vol_id);
194 195
195 if (vol->updating) { 196 if (vol->updating) {
196 ubi_err("updating"); 197 ubi_err(vol->ubi, "updating");
197 return -EBUSY; 198 return -EBUSY;
198 } 199 }
199 if (vol->upd_marker) { 200 if (vol->upd_marker) {
200 ubi_err("damaged volume, update marker is set"); 201 ubi_err(vol->ubi, "damaged volume, update marker is set");
201 return -EBADF; 202 return -EBADF;
202 } 203 }
203 if (*offp == vol->used_bytes || count == 0) 204 if (*offp == vol->used_bytes || count == 0)
@@ -277,7 +278,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
277 278
278 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off); 279 lnum = div_u64_rem(*offp, vol->usable_leb_size, &off);
279 if (off & (ubi->min_io_size - 1)) { 280 if (off & (ubi->min_io_size - 1)) {
280 ubi_err("unaligned position"); 281 ubi_err(ubi, "unaligned position");
281 return -EINVAL; 282 return -EINVAL;
282 } 283 }
283 284
@@ -286,7 +287,7 @@ static ssize_t vol_cdev_direct_write(struct file *file, const char __user *buf,
286 287
287 /* We can write only in fractions of the minimum I/O unit */ 288 /* We can write only in fractions of the minimum I/O unit */
288 if (count & (ubi->min_io_size - 1)) { 289 if (count & (ubi->min_io_size - 1)) {
289 ubi_err("unaligned write length"); 290 ubi_err(ubi, "unaligned write length");
290 return -EINVAL; 291 return -EINVAL;
291 } 292 }
292 293
@@ -348,7 +349,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
348 err = ubi_more_leb_change_data(ubi, vol, buf, count); 349 err = ubi_more_leb_change_data(ubi, vol, buf, count);
349 350
350 if (err < 0) { 351 if (err < 0) {
351 ubi_err("cannot accept more %zd bytes of data, error %d", 352 ubi_err(ubi, "cannot accept more %zd bytes of data, error %d",
352 count, err); 353 count, err);
353 return err; 354 return err;
354 } 355 }
@@ -370,7 +371,7 @@ static ssize_t vol_cdev_write(struct file *file, const char __user *buf,
370 return err; 371 return err;
371 372
372 if (err) { 373 if (err) {
373 ubi_warn("volume %d on UBI device %d is corrupted", 374 ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
374 vol->vol_id, ubi->ubi_num); 375 vol->vol_id, ubi->ubi_num);
375 vol->corrupted = 1; 376 vol->corrupted = 1;
376 } 377 }
@@ -420,7 +421,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
420 break; 421 break;
421 } 422 }
422 423
423 err = get_exclusive(desc); 424 err = get_exclusive(ubi, desc);
424 if (err < 0) 425 if (err < 0)
425 break; 426 break;
426 427
@@ -456,7 +457,7 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
456 req.bytes < 0 || req.lnum >= vol->usable_leb_size) 457 req.bytes < 0 || req.lnum >= vol->usable_leb_size)
457 break; 458 break;
458 459
459 err = get_exclusive(desc); 460 err = get_exclusive(ubi, desc);
460 if (err < 0) 461 if (err < 0)
461 break; 462 break;
462 463
@@ -642,7 +643,7 @@ static int verify_mkvol_req(const struct ubi_device *ubi,
642 return 0; 643 return 0;
643 644
644bad: 645bad:
645 ubi_err("bad volume creation request"); 646 ubi_err(ubi, "bad volume creation request");
646 ubi_dump_mkvol_req(req); 647 ubi_dump_mkvol_req(req);
647 return err; 648 return err;
648} 649}
@@ -708,12 +709,12 @@ static int rename_volumes(struct ubi_device *ubi,
708 for (i = 0; i < req->count - 1; i++) { 709 for (i = 0; i < req->count - 1; i++) {
709 for (n = i + 1; n < req->count; n++) { 710 for (n = i + 1; n < req->count; n++) {
710 if (req->ents[i].vol_id == req->ents[n].vol_id) { 711 if (req->ents[i].vol_id == req->ents[n].vol_id) {
711 ubi_err("duplicated volume id %d", 712 ubi_err(ubi, "duplicated volume id %d",
712 req->ents[i].vol_id); 713 req->ents[i].vol_id);
713 return -EINVAL; 714 return -EINVAL;
714 } 715 }
715 if (!strcmp(req->ents[i].name, req->ents[n].name)) { 716 if (!strcmp(req->ents[i].name, req->ents[n].name)) {
716 ubi_err("duplicated volume name \"%s\"", 717 ubi_err(ubi, "duplicated volume name \"%s\"",
717 req->ents[i].name); 718 req->ents[i].name);
718 return -EINVAL; 719 return -EINVAL;
719 } 720 }
@@ -736,7 +737,8 @@ static int rename_volumes(struct ubi_device *ubi,
736 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE); 737 re->desc = ubi_open_volume(ubi->ubi_num, vol_id, UBI_READWRITE);
737 if (IS_ERR(re->desc)) { 738 if (IS_ERR(re->desc)) {
738 err = PTR_ERR(re->desc); 739 err = PTR_ERR(re->desc);
739 ubi_err("cannot open volume %d, error %d", vol_id, err); 740 ubi_err(ubi, "cannot open volume %d, error %d",
741 vol_id, err);
740 kfree(re); 742 kfree(re);
741 goto out_free; 743 goto out_free;
742 } 744 }
@@ -795,7 +797,7 @@ static int rename_volumes(struct ubi_device *ubi,
795 continue; 797 continue;
796 798
797 /* The volume exists but busy, or an error occurred */ 799 /* The volume exists but busy, or an error occurred */
798 ubi_err("cannot open volume \"%s\", error %d", 800 ubi_err(ubi, "cannot open volume \"%s\", error %d",
799 re->new_name, err); 801 re->new_name, err);
800 goto out_free; 802 goto out_free;
801 } 803 }
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 63cb1d7236ce..7335c9ff9d99 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -43,12 +43,12 @@ void ubi_dump_flash(struct ubi_device *ubi, int pnum, int offset, int len)
43 return; 43 return;
44 err = mtd_read(ubi->mtd, addr, len, &read, buf); 44 err = mtd_read(ubi->mtd, addr, len, &read, buf);
45 if (err && err != -EUCLEAN) { 45 if (err && err != -EUCLEAN) {
46 ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes", 46 ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
47 err, len, pnum, offset, read); 47 err, len, pnum, offset, read);
48 goto out; 48 goto out;
49 } 49 }
50 50
51 ubi_msg("dumping %d bytes of data from PEB %d, offset %d", 51 ubi_msg(ubi, "dumping %d bytes of data from PEB %d, offset %d",
52 len, pnum, offset); 52 len, pnum, offset);
53 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); 53 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
54out: 54out:
@@ -238,8 +238,8 @@ int ubi_debugfs_init(void)
238 if (IS_ERR_OR_NULL(dfs_rootdir)) { 238 if (IS_ERR_OR_NULL(dfs_rootdir)) {
239 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); 239 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
240 240
241 ubi_err("cannot create \"ubi\" debugfs directory, error %d\n", 241 pr_err("UBI error: cannot create \"ubi\" debugfs directory, error %d\n",
242 err); 242 err);
243 return err; 243 return err;
244 } 244 }
245 245
@@ -433,7 +433,7 @@ out_remove:
433 debugfs_remove_recursive(d->dfs_dir); 433 debugfs_remove_recursive(d->dfs_dir);
434out: 434out:
435 err = dent ? PTR_ERR(dent) : -ENODEV; 435 err = dent ? PTR_ERR(dent) : -ENODEV;
436 ubi_err("cannot create \"%s\" debugfs file or directory, error %d\n", 436 ubi_err(ubi, "cannot create \"%s\" debugfs file or directory, error %d\n",
437 fname, err); 437 fname, err);
438 return err; 438 return err;
439} 439}
diff --git a/drivers/mtd/ubi/eba.c b/drivers/mtd/ubi/eba.c
index 2402d3b50171..a40020cf0923 100644
--- a/drivers/mtd/ubi/eba.c
+++ b/drivers/mtd/ubi/eba.c
@@ -422,7 +422,7 @@ retry:
422 */ 422 */
423 if (err == UBI_IO_BAD_HDR_EBADMSG || 423 if (err == UBI_IO_BAD_HDR_EBADMSG ||
424 err == UBI_IO_BAD_HDR) { 424 err == UBI_IO_BAD_HDR) {
425 ubi_warn("corrupted VID header at PEB %d, LEB %d:%d", 425 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
426 pnum, vol_id, lnum); 426 pnum, vol_id, lnum);
427 err = -EBADMSG; 427 err = -EBADMSG;
428 } else 428 } else
@@ -448,7 +448,7 @@ retry:
448 goto out_unlock; 448 goto out_unlock;
449 scrub = 1; 449 scrub = 1;
450 if (!check) { 450 if (!check) {
451 ubi_msg("force data checking"); 451 ubi_msg(ubi, "force data checking");
452 check = 1; 452 check = 1;
453 goto retry; 453 goto retry;
454 } 454 }
@@ -459,7 +459,7 @@ retry:
459 if (check) { 459 if (check) {
460 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len); 460 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
461 if (crc1 != crc) { 461 if (crc1 != crc) {
462 ubi_warn("CRC error: calculated %#08x, must be %#08x", 462 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
463 crc1, crc); 463 crc1, crc);
464 err = -EBADMSG; 464 err = -EBADMSG;
465 goto out_unlock; 465 goto out_unlock;
@@ -513,7 +513,8 @@ retry:
513 return new_pnum; 513 return new_pnum;
514 } 514 }
515 515
516 ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum); 516 ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
517 pnum, new_pnum);
517 518
518 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1); 519 err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
519 if (err && err != UBI_IO_BITFLIPS) { 520 if (err && err != UBI_IO_BITFLIPS) {
@@ -554,7 +555,7 @@ retry:
554 up_read(&ubi->fm_sem); 555 up_read(&ubi->fm_sem);
555 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); 556 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
556 557
557 ubi_msg("data was successfully recovered"); 558 ubi_msg(ubi, "data was successfully recovered");
558 return 0; 559 return 0;
559 560
560out_unlock: 561out_unlock:
@@ -569,13 +570,13 @@ write_error:
569 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to 570 * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
570 * get another one. 571 * get another one.
571 */ 572 */
572 ubi_warn("failed to write to PEB %d", new_pnum); 573 ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
573 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); 574 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
574 if (++tries > UBI_IO_RETRIES) { 575 if (++tries > UBI_IO_RETRIES) {
575 ubi_free_vid_hdr(ubi, vid_hdr); 576 ubi_free_vid_hdr(ubi, vid_hdr);
576 return err; 577 return err;
577 } 578 }
578 ubi_msg("try again"); 579 ubi_msg(ubi, "try again");
579 goto retry; 580 goto retry;
580} 581}
581 582
@@ -613,7 +614,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
613 614
614 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 615 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
615 if (err) { 616 if (err) {
616 ubi_warn("failed to write data to PEB %d", pnum); 617 ubi_warn(ubi, "failed to write data to PEB %d", pnum);
617 if (err == -EIO && ubi->bad_allowed) 618 if (err == -EIO && ubi->bad_allowed)
618 err = recover_peb(ubi, pnum, vol_id, lnum, buf, 619 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
619 offset, len); 620 offset, len);
@@ -654,7 +655,7 @@ retry:
654 655
655 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 656 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
656 if (err) { 657 if (err) {
657 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 658 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
658 vol_id, lnum, pnum); 659 vol_id, lnum, pnum);
659 goto write_error; 660 goto write_error;
660 } 661 }
@@ -662,7 +663,7 @@ retry:
662 if (len) { 663 if (len) {
663 err = ubi_io_write_data(ubi, buf, pnum, offset, len); 664 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
664 if (err) { 665 if (err) {
665 ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d", 666 ubi_warn(ubi, "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
666 len, offset, vol_id, lnum, pnum); 667 len, offset, vol_id, lnum, pnum);
667 goto write_error; 668 goto write_error;
668 } 669 }
@@ -698,7 +699,7 @@ write_error:
698 } 699 }
699 700
700 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 701 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
701 ubi_msg("try another PEB"); 702 ubi_msg(ubi, "try another PEB");
702 goto retry; 703 goto retry;
703} 704}
704 705
@@ -775,14 +776,14 @@ retry:
775 776
776 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 777 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
777 if (err) { 778 if (err) {
778 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 779 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
779 vol_id, lnum, pnum); 780 vol_id, lnum, pnum);
780 goto write_error; 781 goto write_error;
781 } 782 }
782 783
783 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 784 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
784 if (err) { 785 if (err) {
785 ubi_warn("failed to write %d bytes of data to PEB %d", 786 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
786 len, pnum); 787 len, pnum);
787 goto write_error; 788 goto write_error;
788 } 789 }
@@ -818,7 +819,7 @@ write_error:
818 } 819 }
819 820
820 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 821 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
821 ubi_msg("try another PEB"); 822 ubi_msg(ubi, "try another PEB");
822 goto retry; 823 goto retry;
823} 824}
824 825
@@ -893,14 +894,14 @@ retry:
893 894
894 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr); 895 err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
895 if (err) { 896 if (err) {
896 ubi_warn("failed to write VID header to LEB %d:%d, PEB %d", 897 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
897 vol_id, lnum, pnum); 898 vol_id, lnum, pnum);
898 goto write_error; 899 goto write_error;
899 } 900 }
900 901
901 err = ubi_io_write_data(ubi, buf, pnum, 0, len); 902 err = ubi_io_write_data(ubi, buf, pnum, 0, len);
902 if (err) { 903 if (err) {
903 ubi_warn("failed to write %d bytes of data to PEB %d", 904 ubi_warn(ubi, "failed to write %d bytes of data to PEB %d",
904 len, pnum); 905 len, pnum);
905 goto write_error; 906 goto write_error;
906 } 907 }
@@ -940,7 +941,7 @@ write_error:
940 } 941 }
941 942
942 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); 943 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
943 ubi_msg("try another PEB"); 944 ubi_msg(ubi, "try another PEB");
944 goto retry; 945 goto retry;
945} 946}
946 947
@@ -1063,7 +1064,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1063 dbg_wl("read %d bytes of data", aldata_size); 1064 dbg_wl("read %d bytes of data", aldata_size);
1064 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); 1065 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1065 if (err && err != UBI_IO_BITFLIPS) { 1066 if (err && err != UBI_IO_BITFLIPS) {
1066 ubi_warn("error %d while reading data from PEB %d", 1067 ubi_warn(ubi, "error %d while reading data from PEB %d",
1067 err, from); 1068 err, from);
1068 err = MOVE_SOURCE_RD_ERR; 1069 err = MOVE_SOURCE_RD_ERR;
1069 goto out_unlock_buf; 1070 goto out_unlock_buf;
@@ -1113,7 +1114,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1113 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1); 1114 err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
1114 if (err) { 1115 if (err) {
1115 if (err != UBI_IO_BITFLIPS) { 1116 if (err != UBI_IO_BITFLIPS) {
1116 ubi_warn("error %d while reading VID header back from PEB %d", 1117 ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
1117 err, to); 1118 err, to);
1118 if (is_error_sane(err)) 1119 if (is_error_sane(err))
1119 err = MOVE_TARGET_RD_ERR; 1120 err = MOVE_TARGET_RD_ERR;
@@ -1140,7 +1141,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1140 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size); 1141 err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1141 if (err) { 1142 if (err) {
1142 if (err != UBI_IO_BITFLIPS) { 1143 if (err != UBI_IO_BITFLIPS) {
1143 ubi_warn("error %d while reading data back from PEB %d", 1144 ubi_warn(ubi, "error %d while reading data back from PEB %d",
1144 err, to); 1145 err, to);
1145 if (is_error_sane(err)) 1146 if (is_error_sane(err))
1146 err = MOVE_TARGET_RD_ERR; 1147 err = MOVE_TARGET_RD_ERR;
@@ -1152,7 +1153,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1152 cond_resched(); 1153 cond_resched();
1153 1154
1154 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) { 1155 if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
1155 ubi_warn("read data back from PEB %d and it is different", 1156 ubi_warn(ubi, "read data back from PEB %d and it is different",
1156 to); 1157 to);
1157 err = -EINVAL; 1158 err = -EINVAL;
1158 goto out_unlock_buf; 1159 goto out_unlock_buf;
@@ -1205,10 +1206,10 @@ static void print_rsvd_warning(struct ubi_device *ubi,
1205 return; 1206 return;
1206 } 1207 }
1207 1208
1208 ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d", 1209 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1209 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); 1210 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1210 if (ubi->corr_peb_count) 1211 if (ubi->corr_peb_count)
1211 ubi_warn("%d PEBs are corrupted and not used", 1212 ubi_warn(ubi, "%d PEBs are corrupted and not used",
1212 ubi->corr_peb_count); 1213 ubi->corr_peb_count);
1213} 1214}
1214 1215
@@ -1286,7 +1287,7 @@ int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1286 fm_eba[i][j] == UBI_LEB_UNMAPPED) 1287 fm_eba[i][j] == UBI_LEB_UNMAPPED)
1287 continue; 1288 continue;
1288 1289
1289 ubi_err("LEB:%i:%i is PEB:%i instead of %i!", 1290 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
1290 vol->vol_id, i, fm_eba[i][j], 1291 vol->vol_id, i, fm_eba[i][j],
1291 scan_eba[i][j]); 1292 scan_eba[i][j]);
1292 ubi_assert(0); 1293 ubi_assert(0);
@@ -1366,10 +1367,10 @@ int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1366 } 1367 }
1367 1368
1368 if (ubi->avail_pebs < EBA_RESERVED_PEBS) { 1369 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1369 ubi_err("no enough physical eraseblocks (%d, need %d)", 1370 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1370 ubi->avail_pebs, EBA_RESERVED_PEBS); 1371 ubi->avail_pebs, EBA_RESERVED_PEBS);
1371 if (ubi->corr_peb_count) 1372 if (ubi->corr_peb_count)
1372 ubi_err("%d PEBs are corrupted and not used", 1373 ubi_err(ubi, "%d PEBs are corrupted and not used",
1373 ubi->corr_peb_count); 1374 ubi->corr_peb_count);
1374 err = -ENOSPC; 1375 err = -ENOSPC;
1375 goto out_free; 1376 goto out_free;
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index cfd5b5e90156..b56672bf3294 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -330,7 +330,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
330 if (found) 330 if (found)
331 av = tmp_av; 331 av = tmp_av;
332 else { 332 else {
333 ubi_err("orphaned volume in fastmap pool!"); 333 ubi_err(ubi, "orphaned volume in fastmap pool!");
334 kmem_cache_free(ai->aeb_slab_cache, new_aeb); 334 kmem_cache_free(ai->aeb_slab_cache, new_aeb);
335 return UBI_BAD_FASTMAP; 335 return UBI_BAD_FASTMAP;
336 } 336 }
@@ -414,14 +414,14 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
414 pnum = be32_to_cpu(pebs[i]); 414 pnum = be32_to_cpu(pebs[i]);
415 415
416 if (ubi_io_is_bad(ubi, pnum)) { 416 if (ubi_io_is_bad(ubi, pnum)) {
417 ubi_err("bad PEB in fastmap pool!"); 417 ubi_err(ubi, "bad PEB in fastmap pool!");
418 ret = UBI_BAD_FASTMAP; 418 ret = UBI_BAD_FASTMAP;
419 goto out; 419 goto out;
420 } 420 }
421 421
422 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 422 err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
423 if (err && err != UBI_IO_BITFLIPS) { 423 if (err && err != UBI_IO_BITFLIPS) {
424 ubi_err("unable to read EC header! PEB:%i err:%i", 424 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
425 pnum, err); 425 pnum, err);
426 ret = err > 0 ? UBI_BAD_FASTMAP : err; 426 ret = err > 0 ? UBI_BAD_FASTMAP : err;
427 goto out; 427 goto out;
@@ -435,7 +435,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
435 image_seq = be32_to_cpu(ech->image_seq); 435 image_seq = be32_to_cpu(ech->image_seq);
436 436
437 if (image_seq && (image_seq != ubi->image_seq)) { 437 if (image_seq && (image_seq != ubi->image_seq)) {
438 ubi_err("bad image seq: 0x%x, expected: 0x%x", 438 ubi_err(ubi, "bad image seq: 0x%x, expected: 0x%x",
439 be32_to_cpu(ech->image_seq), ubi->image_seq); 439 be32_to_cpu(ech->image_seq), ubi->image_seq);
440 ret = UBI_BAD_FASTMAP; 440 ret = UBI_BAD_FASTMAP;
441 goto out; 441 goto out;
@@ -493,7 +493,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
493 } 493 }
494 } else { 494 } else {
495 /* We are paranoid and fall back to scanning mode */ 495 /* We are paranoid and fall back to scanning mode */
496 ubi_err("fastmap pool PEBs contains damaged PEBs!"); 496 ubi_err(ubi, "fastmap pool PEBs contains damaged PEBs!");
497 ret = err > 0 ? UBI_BAD_FASTMAP : err; 497 ret = err > 0 ? UBI_BAD_FASTMAP : err;
498 goto out; 498 goto out;
499 } 499 }
@@ -588,7 +588,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
588 goto fail_bad; 588 goto fail_bad;
589 589
590 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) { 590 if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
591 ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x", 591 ubi_err(ubi, "bad fastmap header magic: 0x%x, expected: 0x%x",
592 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC); 592 be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
593 goto fail_bad; 593 goto fail_bad;
594 } 594 }
@@ -598,7 +598,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
598 if (fm_pos >= fm_size) 598 if (fm_pos >= fm_size)
599 goto fail_bad; 599 goto fail_bad;
600 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) { 600 if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
601 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", 601 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
602 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC); 602 be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
603 goto fail_bad; 603 goto fail_bad;
604 } 604 }
@@ -608,7 +608,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
608 if (fm_pos >= fm_size) 608 if (fm_pos >= fm_size)
609 goto fail_bad; 609 goto fail_bad;
610 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) { 610 if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
611 ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x", 611 ubi_err(ubi, "bad fastmap pool magic: 0x%x, expected: 0x%x",
612 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC); 612 be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
613 goto fail_bad; 613 goto fail_bad;
614 } 614 }
@@ -619,25 +619,26 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
619 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size); 619 fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
620 620
621 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) { 621 if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
622 ubi_err("bad pool size: %i", pool_size); 622 ubi_err(ubi, "bad pool size: %i", pool_size);
623 goto fail_bad; 623 goto fail_bad;
624 } 624 }
625 625
626 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) { 626 if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
627 ubi_err("bad WL pool size: %i", wl_pool_size); 627 ubi_err(ubi, "bad WL pool size: %i", wl_pool_size);
628 goto fail_bad; 628 goto fail_bad;
629 } 629 }
630 630
631 631
632 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE || 632 if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
633 fm->max_pool_size < 0) { 633 fm->max_pool_size < 0) {
634 ubi_err("bad maximal pool size: %i", fm->max_pool_size); 634 ubi_err(ubi, "bad maximal pool size: %i", fm->max_pool_size);
635 goto fail_bad; 635 goto fail_bad;
636 } 636 }
637 637
638 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE || 638 if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
639 fm->max_wl_pool_size < 0) { 639 fm->max_wl_pool_size < 0) {
640 ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size); 640 ubi_err(ubi, "bad maximal WL pool size: %i",
641 fm->max_wl_pool_size);
641 goto fail_bad; 642 goto fail_bad;
642 } 643 }
643 644
@@ -696,8 +697,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
696 goto fail_bad; 697 goto fail_bad;
697 698
698 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) { 699 if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
699 ubi_err("bad fastmap vol header magic: 0x%x, " \ 700 ubi_err(ubi, "bad fastmap vol header magic: 0x%x, expected: 0x%x",
700 "expected: 0x%x",
701 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC); 701 be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
702 goto fail_bad; 702 goto fail_bad;
703 } 703 }
@@ -722,8 +722,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
722 goto fail_bad; 722 goto fail_bad;
723 723
724 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) { 724 if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
725 ubi_err("bad fastmap EBA header magic: 0x%x, " \ 725 ubi_err(ubi, "bad fastmap EBA header magic: 0x%x, expected: 0x%x",
726 "expected: 0x%x",
727 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC); 726 be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
728 goto fail_bad; 727 goto fail_bad;
729 } 728 }
@@ -788,7 +787,7 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
788 int err; 787 int err;
789 788
790 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) { 789 if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
791 ubi_err("bad PEB in fastmap EBA orphan list"); 790 ubi_err(ubi, "bad PEB in fastmap EBA orphan list");
792 ret = UBI_BAD_FASTMAP; 791 ret = UBI_BAD_FASTMAP;
793 kfree(ech); 792 kfree(ech);
794 goto fail; 793 goto fail;
@@ -796,8 +795,8 @@ static int ubi_attach_fastmap(struct ubi_device *ubi,
796 795
797 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0); 796 err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
798 if (err && err != UBI_IO_BITFLIPS) { 797 if (err && err != UBI_IO_BITFLIPS) {
799 ubi_err("unable to read EC header! PEB:%i " \ 798 ubi_err(ubi, "unable to read EC header! PEB:%i err:%i",
800 "err:%i", tmp_aeb->pnum, err); 799 tmp_aeb->pnum, err);
801 ret = err > 0 ? UBI_BAD_FASTMAP : err; 800 ret = err > 0 ? UBI_BAD_FASTMAP : err;
802 kfree(ech); 801 kfree(ech);
803 802
@@ -908,14 +907,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
908 fm->to_be_tortured[0] = 1; 907 fm->to_be_tortured[0] = 1;
909 908
910 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) { 909 if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
911 ubi_err("bad super block magic: 0x%x, expected: 0x%x", 910 ubi_err(ubi, "bad super block magic: 0x%x, expected: 0x%x",
912 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC); 911 be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
913 ret = UBI_BAD_FASTMAP; 912 ret = UBI_BAD_FASTMAP;
914 goto free_fm_sb; 913 goto free_fm_sb;
915 } 914 }
916 915
917 if (fmsb->version != UBI_FM_FMT_VERSION) { 916 if (fmsb->version != UBI_FM_FMT_VERSION) {
918 ubi_err("bad fastmap version: %i, expected: %i", 917 ubi_err(ubi, "bad fastmap version: %i, expected: %i",
919 fmsb->version, UBI_FM_FMT_VERSION); 918 fmsb->version, UBI_FM_FMT_VERSION);
920 ret = UBI_BAD_FASTMAP; 919 ret = UBI_BAD_FASTMAP;
921 goto free_fm_sb; 920 goto free_fm_sb;
@@ -923,15 +922,16 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
923 922
924 used_blocks = be32_to_cpu(fmsb->used_blocks); 923 used_blocks = be32_to_cpu(fmsb->used_blocks);
925 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) { 924 if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
926 ubi_err("number of fastmap blocks is invalid: %i", used_blocks); 925 ubi_err(ubi, "number of fastmap blocks is invalid: %i",
926 used_blocks);
927 ret = UBI_BAD_FASTMAP; 927 ret = UBI_BAD_FASTMAP;
928 goto free_fm_sb; 928 goto free_fm_sb;
929 } 929 }
930 930
931 fm_size = ubi->leb_size * used_blocks; 931 fm_size = ubi->leb_size * used_blocks;
932 if (fm_size != ubi->fm_size) { 932 if (fm_size != ubi->fm_size) {
933 ubi_err("bad fastmap size: %zi, expected: %zi", fm_size, 933 ubi_err(ubi, "bad fastmap size: %zi, expected: %zi",
934 ubi->fm_size); 934 fm_size, ubi->fm_size);
935 ret = UBI_BAD_FASTMAP; 935 ret = UBI_BAD_FASTMAP;
936 goto free_fm_sb; 936 goto free_fm_sb;
937 } 937 }
@@ -960,7 +960,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
960 960
961 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0); 961 ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
962 if (ret && ret != UBI_IO_BITFLIPS) { 962 if (ret && ret != UBI_IO_BITFLIPS) {
963 ubi_err("unable to read fastmap block# %i EC (PEB: %i)", 963 ubi_err(ubi, "unable to read fastmap block# %i EC (PEB: %i)",
964 i, pnum); 964 i, pnum);
965 if (ret > 0) 965 if (ret > 0)
966 ret = UBI_BAD_FASTMAP; 966 ret = UBI_BAD_FASTMAP;
@@ -977,7 +977,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
977 * we shouldn't fail if image_seq == 0. 977 * we shouldn't fail if image_seq == 0.
978 */ 978 */
979 if (image_seq && (image_seq != ubi->image_seq)) { 979 if (image_seq && (image_seq != ubi->image_seq)) {
980 ubi_err("wrong image seq:%d instead of %d", 980 ubi_err(ubi, "wrong image seq:%d instead of %d",
981 be32_to_cpu(ech->image_seq), ubi->image_seq); 981 be32_to_cpu(ech->image_seq), ubi->image_seq);
982 ret = UBI_BAD_FASTMAP; 982 ret = UBI_BAD_FASTMAP;
983 goto free_hdr; 983 goto free_hdr;
@@ -985,15 +985,14 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
985 985
986 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0); 986 ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
987 if (ret && ret != UBI_IO_BITFLIPS) { 987 if (ret && ret != UBI_IO_BITFLIPS) {
988 ubi_err("unable to read fastmap block# %i (PEB: %i)", 988 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i)",
989 i, pnum); 989 i, pnum);
990 goto free_hdr; 990 goto free_hdr;
991 } 991 }
992 992
993 if (i == 0) { 993 if (i == 0) {
994 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) { 994 if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
995 ubi_err("bad fastmap anchor vol_id: 0x%x," \ 995 ubi_err(ubi, "bad fastmap anchor vol_id: 0x%x, expected: 0x%x",
996 " expected: 0x%x",
997 be32_to_cpu(vh->vol_id), 996 be32_to_cpu(vh->vol_id),
998 UBI_FM_SB_VOLUME_ID); 997 UBI_FM_SB_VOLUME_ID);
999 ret = UBI_BAD_FASTMAP; 998 ret = UBI_BAD_FASTMAP;
@@ -1001,8 +1000,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1001 } 1000 }
1002 } else { 1001 } else {
1003 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) { 1002 if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
1004 ubi_err("bad fastmap data vol_id: 0x%x," \ 1003 ubi_err(ubi, "bad fastmap data vol_id: 0x%x, expected: 0x%x",
1005 " expected: 0x%x",
1006 be32_to_cpu(vh->vol_id), 1004 be32_to_cpu(vh->vol_id),
1007 UBI_FM_DATA_VOLUME_ID); 1005 UBI_FM_DATA_VOLUME_ID);
1008 ret = UBI_BAD_FASTMAP; 1006 ret = UBI_BAD_FASTMAP;
@@ -1016,7 +1014,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1016 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum, 1014 ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
1017 ubi->leb_start, ubi->leb_size); 1015 ubi->leb_start, ubi->leb_size);
1018 if (ret && ret != UBI_IO_BITFLIPS) { 1016 if (ret && ret != UBI_IO_BITFLIPS) {
1019 ubi_err("unable to read fastmap block# %i (PEB: %i, " \ 1017 ubi_err(ubi, "unable to read fastmap block# %i (PEB: %i, "
1020 "err: %i)", i, pnum, ret); 1018 "err: %i)", i, pnum, ret);
1021 goto free_hdr; 1019 goto free_hdr;
1022 } 1020 }
@@ -1030,8 +1028,9 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1030 fmsb2->data_crc = 0; 1028 fmsb2->data_crc = 0;
1031 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size); 1029 crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
1032 if (crc != tmp_crc) { 1030 if (crc != tmp_crc) {
1033 ubi_err("fastmap data CRC is invalid"); 1031 ubi_err(ubi, "fastmap data CRC is invalid");
1034 ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc); 1032 ubi_err(ubi, "CRC should be: 0x%x, calc: 0x%x",
1033 tmp_crc, crc);
1035 ret = UBI_BAD_FASTMAP; 1034 ret = UBI_BAD_FASTMAP;
1036 goto free_hdr; 1035 goto free_hdr;
1037 } 1036 }
@@ -1067,9 +1066,10 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1067 ubi->fm = fm; 1066 ubi->fm = fm;
1068 ubi->fm_pool.max_size = ubi->fm->max_pool_size; 1067 ubi->fm_pool.max_size = ubi->fm->max_pool_size;
1069 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size; 1068 ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
1070 ubi_msg("attached by fastmap"); 1069 ubi_msg(ubi, "attached by fastmap");
1071 ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size); 1070 ubi_msg(ubi, "fastmap pool size: %d", ubi->fm_pool.max_size);
1072 ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size); 1071 ubi_msg(ubi, "fastmap WL pool size: %d",
1072 ubi->fm_wl_pool.max_size);
1073 ubi->fm_disabled = 0; 1073 ubi->fm_disabled = 0;
1074 1074
1075 ubi_free_vid_hdr(ubi, vh); 1075 ubi_free_vid_hdr(ubi, vh);
@@ -1077,7 +1077,7 @@ int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
1077out: 1077out:
1078 mutex_unlock(&ubi->fm_mutex); 1078 mutex_unlock(&ubi->fm_mutex);
1079 if (ret == UBI_BAD_FASTMAP) 1079 if (ret == UBI_BAD_FASTMAP)
1080 ubi_err("Attach by fastmap failed, doing a full scan!"); 1080 ubi_err(ubi, "Attach by fastmap failed, doing a full scan!");
1081 return ret; 1081 return ret;
1082 1082
1083free_hdr: 1083free_hdr:
@@ -1273,7 +1273,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1273 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum); 1273 dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
1274 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr); 1274 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
1275 if (ret) { 1275 if (ret) {
1276 ubi_err("unable to write vid_hdr to fastmap SB!"); 1276 ubi_err(ubi, "unable to write vid_hdr to fastmap SB!");
1277 goto out_kfree; 1277 goto out_kfree;
1278 } 1278 }
1279 1279
@@ -1293,7 +1293,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1293 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum)); 1293 new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
1294 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr); 1294 ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
1295 if (ret) { 1295 if (ret) {
1296 ubi_err("unable to write vid_hdr to PEB %i!", 1296 ubi_err(ubi, "unable to write vid_hdr to PEB %i!",
1297 new_fm->e[i]->pnum); 1297 new_fm->e[i]->pnum);
1298 goto out_kfree; 1298 goto out_kfree;
1299 } 1299 }
@@ -1303,7 +1303,7 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
1303 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size), 1303 ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
1304 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size); 1304 new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
1305 if (ret) { 1305 if (ret) {
1306 ubi_err("unable to write fastmap to PEB %i!", 1306 ubi_err(ubi, "unable to write fastmap to PEB %i!",
1307 new_fm->e[i]->pnum); 1307 new_fm->e[i]->pnum);
1308 goto out_kfree; 1308 goto out_kfree;
1309 } 1309 }
@@ -1450,7 +1450,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1450 ubi->fm = NULL; 1450 ubi->fm = NULL;
1451 1451
1452 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) { 1452 if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
1453 ubi_err("fastmap too large"); 1453 ubi_err(ubi, "fastmap too large");
1454 ret = -ENOSPC; 1454 ret = -ENOSPC;
1455 goto err; 1455 goto err;
1456 } 1456 }
@@ -1462,7 +1462,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1462 1462
1463 if (!tmp_e && !old_fm) { 1463 if (!tmp_e && !old_fm) {
1464 int j; 1464 int j;
1465 ubi_err("could not get any free erase block"); 1465 ubi_err(ubi, "could not get any free erase block");
1466 1466
1467 for (j = 1; j < i; j++) 1467 for (j = 1; j < i; j++)
1468 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0); 1468 ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
@@ -1478,7 +1478,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1478 ubi_wl_put_fm_peb(ubi, new_fm->e[j], 1478 ubi_wl_put_fm_peb(ubi, new_fm->e[j],
1479 j, 0); 1479 j, 0);
1480 1480
1481 ubi_err("could not erase old fastmap PEB"); 1481 ubi_err(ubi, "could not erase old fastmap PEB");
1482 goto err; 1482 goto err;
1483 } 1483 }
1484 1484
@@ -1504,7 +1504,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1504 ret = erase_block(ubi, old_fm->e[0]->pnum); 1504 ret = erase_block(ubi, old_fm->e[0]->pnum);
1505 if (ret < 0) { 1505 if (ret < 0) {
1506 int i; 1506 int i;
1507 ubi_err("could not erase old anchor PEB"); 1507 ubi_err(ubi, "could not erase old anchor PEB");
1508 1508
1509 for (i = 1; i < new_fm->used_blocks; i++) 1509 for (i = 1; i < new_fm->used_blocks; i++)
1510 ubi_wl_put_fm_peb(ubi, new_fm->e[i], 1510 ubi_wl_put_fm_peb(ubi, new_fm->e[i],
@@ -1525,7 +1525,7 @@ int ubi_update_fastmap(struct ubi_device *ubi)
1525 } else { 1525 } else {
1526 if (!tmp_e) { 1526 if (!tmp_e) {
1527 int i; 1527 int i;
1528 ubi_err("could not find any anchor PEB"); 1528 ubi_err(ubi, "could not find any anchor PEB");
1529 1529
1530 for (i = 1; i < new_fm->used_blocks; i++) 1530 for (i = 1; i < new_fm->used_blocks; i++)
1531 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0); 1531 ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
@@ -1555,13 +1555,13 @@ out_unlock:
1555err: 1555err:
1556 kfree(new_fm); 1556 kfree(new_fm);
1557 1557
1558 ubi_warn("Unable to write new fastmap, err=%i", ret); 1558 ubi_warn(ubi, "Unable to write new fastmap, err=%i", ret);
1559 1559
1560 ret = 0; 1560 ret = 0;
1561 if (old_fm) { 1561 if (old_fm) {
1562 ret = invalidate_fastmap(ubi, old_fm); 1562 ret = invalidate_fastmap(ubi, old_fm);
1563 if (ret < 0) 1563 if (ret < 0)
1564 ubi_err("Unable to invalidiate current fastmap!"); 1564 ubi_err(ubi, "Unable to invalidiate current fastmap!");
1565 else if (ret) 1565 else if (ret)
1566 ret = 0; 1566 ret = 0;
1567 } 1567 }
diff --git a/drivers/mtd/ubi/io.c b/drivers/mtd/ubi/io.c
index d36134925d31..396aaa543362 100644
--- a/drivers/mtd/ubi/io.c
+++ b/drivers/mtd/ubi/io.c
@@ -177,19 +177,20 @@ retry:
177 * enabled. A corresponding message will be printed 177 * enabled. A corresponding message will be printed
178 * later, when it is has been scrubbed. 178 * later, when it is has been scrubbed.
179 */ 179 */
180 ubi_msg("fixable bit-flip detected at PEB %d", pnum); 180 ubi_msg(ubi, "fixable bit-flip detected at PEB %d",
181 pnum);
181 ubi_assert(len == read); 182 ubi_assert(len == read);
182 return UBI_IO_BITFLIPS; 183 return UBI_IO_BITFLIPS;
183 } 184 }
184 185
185 if (retries++ < UBI_IO_RETRIES) { 186 if (retries++ < UBI_IO_RETRIES) {
186 ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry", 187 ubi_warn(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
187 err, errstr, len, pnum, offset, read); 188 err, errstr, len, pnum, offset, read);
188 yield(); 189 yield();
189 goto retry; 190 goto retry;
190 } 191 }
191 192
192 ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes", 193 ubi_err(ubi, "error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
193 err, errstr, len, pnum, offset, read); 194 err, errstr, len, pnum, offset, read);
194 dump_stack(); 195 dump_stack();
195 196
@@ -246,7 +247,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
246 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0); 247 ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
247 248
248 if (ubi->ro_mode) { 249 if (ubi->ro_mode) {
249 ubi_err("read-only mode"); 250 ubi_err(ubi, "read-only mode");
250 return -EROFS; 251 return -EROFS;
251 } 252 }
252 253
@@ -273,7 +274,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
273 } 274 }
274 275
275 if (ubi_dbg_is_write_failure(ubi)) { 276 if (ubi_dbg_is_write_failure(ubi)) {
276 ubi_err("cannot write %d bytes to PEB %d:%d (emulated)", 277 ubi_err(ubi, "cannot write %d bytes to PEB %d:%d (emulated)",
277 len, pnum, offset); 278 len, pnum, offset);
278 dump_stack(); 279 dump_stack();
279 return -EIO; 280 return -EIO;
@@ -282,7 +283,7 @@ int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
282 addr = (loff_t)pnum * ubi->peb_size + offset; 283 addr = (loff_t)pnum * ubi->peb_size + offset;
283 err = mtd_write(ubi->mtd, addr, len, &written, buf); 284 err = mtd_write(ubi->mtd, addr, len, &written, buf);
284 if (err) { 285 if (err) {
285 ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes", 286 ubi_err(ubi, "error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
286 err, len, pnum, offset, written); 287 err, len, pnum, offset, written);
287 dump_stack(); 288 dump_stack();
288 ubi_dump_flash(ubi, pnum, offset, len); 289 ubi_dump_flash(ubi, pnum, offset, len);
@@ -338,7 +339,7 @@ static int do_sync_erase(struct ubi_device *ubi, int pnum)
338 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 339 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
339 340
340 if (ubi->ro_mode) { 341 if (ubi->ro_mode) {
341 ubi_err("read-only mode"); 342 ubi_err(ubi, "read-only mode");
342 return -EROFS; 343 return -EROFS;
343 } 344 }
344 345
@@ -355,12 +356,12 @@ retry:
355 err = mtd_erase(ubi->mtd, &ei); 356 err = mtd_erase(ubi->mtd, &ei);
356 if (err) { 357 if (err) {
357 if (retries++ < UBI_IO_RETRIES) { 358 if (retries++ < UBI_IO_RETRIES) {
358 ubi_warn("error %d while erasing PEB %d, retry", 359 ubi_warn(ubi, "error %d while erasing PEB %d, retry",
359 err, pnum); 360 err, pnum);
360 yield(); 361 yield();
361 goto retry; 362 goto retry;
362 } 363 }
363 ubi_err("cannot erase PEB %d, error %d", pnum, err); 364 ubi_err(ubi, "cannot erase PEB %d, error %d", pnum, err);
364 dump_stack(); 365 dump_stack();
365 return err; 366 return err;
366 } 367 }
@@ -368,17 +369,18 @@ retry:
368 err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE || 369 err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
369 ei.state == MTD_ERASE_FAILED); 370 ei.state == MTD_ERASE_FAILED);
370 if (err) { 371 if (err) {
371 ubi_err("interrupted PEB %d erasure", pnum); 372 ubi_err(ubi, "interrupted PEB %d erasure", pnum);
372 return -EINTR; 373 return -EINTR;
373 } 374 }
374 375
375 if (ei.state == MTD_ERASE_FAILED) { 376 if (ei.state == MTD_ERASE_FAILED) {
376 if (retries++ < UBI_IO_RETRIES) { 377 if (retries++ < UBI_IO_RETRIES) {
377 ubi_warn("error while erasing PEB %d, retry", pnum); 378 ubi_warn(ubi, "error while erasing PEB %d, retry",
379 pnum);
378 yield(); 380 yield();
379 goto retry; 381 goto retry;
380 } 382 }
381 ubi_err("cannot erase PEB %d", pnum); 383 ubi_err(ubi, "cannot erase PEB %d", pnum);
382 dump_stack(); 384 dump_stack();
383 return -EIO; 385 return -EIO;
384 } 386 }
@@ -388,7 +390,7 @@ retry:
388 return err; 390 return err;
389 391
390 if (ubi_dbg_is_erase_failure(ubi)) { 392 if (ubi_dbg_is_erase_failure(ubi)) {
391 ubi_err("cannot erase PEB %d (emulated)", pnum); 393 ubi_err(ubi, "cannot erase PEB %d (emulated)", pnum);
392 return -EIO; 394 return -EIO;
393 } 395 }
394 396
@@ -411,7 +413,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
411{ 413{
412 int err, i, patt_count; 414 int err, i, patt_count;
413 415
414 ubi_msg("run torture test for PEB %d", pnum); 416 ubi_msg(ubi, "run torture test for PEB %d", pnum);
415 patt_count = ARRAY_SIZE(patterns); 417 patt_count = ARRAY_SIZE(patterns);
416 ubi_assert(patt_count > 0); 418 ubi_assert(patt_count > 0);
417 419
@@ -428,7 +430,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
428 430
429 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size); 431 err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
430 if (err == 0) { 432 if (err == 0) {
431 ubi_err("erased PEB %d, but a non-0xFF byte found", 433 ubi_err(ubi, "erased PEB %d, but a non-0xFF byte found",
432 pnum); 434 pnum);
433 err = -EIO; 435 err = -EIO;
434 goto out; 436 goto out;
@@ -448,7 +450,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
448 err = ubi_check_pattern(ubi->peb_buf, patterns[i], 450 err = ubi_check_pattern(ubi->peb_buf, patterns[i],
449 ubi->peb_size); 451 ubi->peb_size);
450 if (err == 0) { 452 if (err == 0) {
451 ubi_err("pattern %x checking failed for PEB %d", 453 ubi_err(ubi, "pattern %x checking failed for PEB %d",
452 patterns[i], pnum); 454 patterns[i], pnum);
453 err = -EIO; 455 err = -EIO;
454 goto out; 456 goto out;
@@ -456,7 +458,7 @@ static int torture_peb(struct ubi_device *ubi, int pnum)
456 } 458 }
457 459
458 err = patt_count; 460 err = patt_count;
459 ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum); 461 ubi_msg(ubi, "PEB %d passed torture test, do not mark it as bad", pnum);
460 462
461out: 463out:
462 mutex_unlock(&ubi->buf_mutex); 464 mutex_unlock(&ubi->buf_mutex);
@@ -466,7 +468,7 @@ out:
466 * has not passed because it happened on a freshly erased 468 * has not passed because it happened on a freshly erased
467 * physical eraseblock which means something is wrong with it. 469 * physical eraseblock which means something is wrong with it.
468 */ 470 */
469 ubi_err("read problems on freshly erased PEB %d, must be bad", 471 ubi_err(ubi, "read problems on freshly erased PEB %d, must be bad",
470 pnum); 472 pnum);
471 err = -EIO; 473 err = -EIO;
472 } 474 }
@@ -542,7 +544,7 @@ error:
542 * it. Supposedly the flash media or the driver is screwed up, so 544 * it. Supposedly the flash media or the driver is screwed up, so
543 * return an error. 545 * return an error.
544 */ 546 */
545 ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err); 547 ubi_err(ubi, "cannot invalidate PEB %d, write returned %d", pnum, err);
546 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size); 548 ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
547 return -EIO; 549 return -EIO;
548} 550}
@@ -574,7 +576,7 @@ int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
574 return err; 576 return err;
575 577
576 if (ubi->ro_mode) { 578 if (ubi->ro_mode) {
577 ubi_err("read-only mode"); 579 ubi_err(ubi, "read-only mode");
578 return -EROFS; 580 return -EROFS;
579 } 581 }
580 582
@@ -616,7 +618,7 @@ int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
616 618
617 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size); 619 ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
618 if (ret < 0) 620 if (ret < 0)
619 ubi_err("error %d while checking if PEB %d is bad", 621 ubi_err(ubi, "error %d while checking if PEB %d is bad",
620 ret, pnum); 622 ret, pnum);
621 else if (ret) 623 else if (ret)
622 dbg_io("PEB %d is bad", pnum); 624 dbg_io("PEB %d is bad", pnum);
@@ -642,7 +644,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
642 ubi_assert(pnum >= 0 && pnum < ubi->peb_count); 644 ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
643 645
644 if (ubi->ro_mode) { 646 if (ubi->ro_mode) {
645 ubi_err("read-only mode"); 647 ubi_err(ubi, "read-only mode");
646 return -EROFS; 648 return -EROFS;
647 } 649 }
648 650
@@ -651,7 +653,7 @@ int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
651 653
652 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size); 654 err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
653 if (err) 655 if (err)
654 ubi_err("cannot mark PEB %d bad, error %d", pnum, err); 656 ubi_err(ubi, "cannot mark PEB %d bad, error %d", pnum, err);
655 return err; 657 return err;
656} 658}
657 659
@@ -674,32 +676,32 @@ static int validate_ec_hdr(const struct ubi_device *ubi,
674 leb_start = be32_to_cpu(ec_hdr->data_offset); 676 leb_start = be32_to_cpu(ec_hdr->data_offset);
675 677
676 if (ec_hdr->version != UBI_VERSION) { 678 if (ec_hdr->version != UBI_VERSION) {
677 ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d", 679 ubi_err(ubi, "node with incompatible UBI version found: this UBI version is %d, image version is %d",
678 UBI_VERSION, (int)ec_hdr->version); 680 UBI_VERSION, (int)ec_hdr->version);
679 goto bad; 681 goto bad;
680 } 682 }
681 683
682 if (vid_hdr_offset != ubi->vid_hdr_offset) { 684 if (vid_hdr_offset != ubi->vid_hdr_offset) {
683 ubi_err("bad VID header offset %d, expected %d", 685 ubi_err(ubi, "bad VID header offset %d, expected %d",
684 vid_hdr_offset, ubi->vid_hdr_offset); 686 vid_hdr_offset, ubi->vid_hdr_offset);
685 goto bad; 687 goto bad;
686 } 688 }
687 689
688 if (leb_start != ubi->leb_start) { 690 if (leb_start != ubi->leb_start) {
689 ubi_err("bad data offset %d, expected %d", 691 ubi_err(ubi, "bad data offset %d, expected %d",
690 leb_start, ubi->leb_start); 692 leb_start, ubi->leb_start);
691 goto bad; 693 goto bad;
692 } 694 }
693 695
694 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) { 696 if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
695 ubi_err("bad erase counter %lld", ec); 697 ubi_err(ubi, "bad erase counter %lld", ec);
696 goto bad; 698 goto bad;
697 } 699 }
698 700
699 return 0; 701 return 0;
700 702
701bad: 703bad:
702 ubi_err("bad EC header"); 704 ubi_err(ubi, "bad EC header");
703 ubi_dump_ec_hdr(ec_hdr); 705 ubi_dump_ec_hdr(ec_hdr);
704 dump_stack(); 706 dump_stack();
705 return 1; 707 return 1;
@@ -765,7 +767,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
765 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) { 767 if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
766 /* The physical eraseblock is supposedly empty */ 768 /* The physical eraseblock is supposedly empty */
767 if (verbose) 769 if (verbose)
768 ubi_warn("no EC header found at PEB %d, only 0xFF bytes", 770 ubi_warn(ubi, "no EC header found at PEB %d, only 0xFF bytes",
769 pnum); 771 pnum);
770 dbg_bld("no EC header found at PEB %d, only 0xFF bytes", 772 dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
771 pnum); 773 pnum);
@@ -780,7 +782,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
780 * 0xFF bytes. Report that the header is corrupted. 782 * 0xFF bytes. Report that the header is corrupted.
781 */ 783 */
782 if (verbose) { 784 if (verbose) {
783 ubi_warn("bad magic number at PEB %d: %08x instead of %08x", 785 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
784 pnum, magic, UBI_EC_HDR_MAGIC); 786 pnum, magic, UBI_EC_HDR_MAGIC);
785 ubi_dump_ec_hdr(ec_hdr); 787 ubi_dump_ec_hdr(ec_hdr);
786 } 788 }
@@ -794,7 +796,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
794 796
795 if (hdr_crc != crc) { 797 if (hdr_crc != crc) {
796 if (verbose) { 798 if (verbose) {
797 ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x", 799 ubi_warn(ubi, "bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
798 pnum, crc, hdr_crc); 800 pnum, crc, hdr_crc);
799 ubi_dump_ec_hdr(ec_hdr); 801 ubi_dump_ec_hdr(ec_hdr);
800 } 802 }
@@ -810,7 +812,7 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
810 /* And of course validate what has just been read from the media */ 812 /* And of course validate what has just been read from the media */
811 err = validate_ec_hdr(ubi, ec_hdr); 813 err = validate_ec_hdr(ubi, ec_hdr);
812 if (err) { 814 if (err) {
813 ubi_err("validation failed for PEB %d", pnum); 815 ubi_err(ubi, "validation failed for PEB %d", pnum);
814 return -EINVAL; 816 return -EINVAL;
815 } 817 }
816 818
@@ -884,40 +886,40 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
884 int usable_leb_size = ubi->leb_size - data_pad; 886 int usable_leb_size = ubi->leb_size - data_pad;
885 887
886 if (copy_flag != 0 && copy_flag != 1) { 888 if (copy_flag != 0 && copy_flag != 1) {
887 ubi_err("bad copy_flag"); 889 ubi_err(ubi, "bad copy_flag");
888 goto bad; 890 goto bad;
889 } 891 }
890 892
891 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 || 893 if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
892 data_pad < 0) { 894 data_pad < 0) {
893 ubi_err("negative values"); 895 ubi_err(ubi, "negative values");
894 goto bad; 896 goto bad;
895 } 897 }
896 898
897 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) { 899 if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
898 ubi_err("bad vol_id"); 900 ubi_err(ubi, "bad vol_id");
899 goto bad; 901 goto bad;
900 } 902 }
901 903
902 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) { 904 if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
903 ubi_err("bad compat"); 905 ubi_err(ubi, "bad compat");
904 goto bad; 906 goto bad;
905 } 907 }
906 908
907 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE && 909 if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
908 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE && 910 compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
909 compat != UBI_COMPAT_REJECT) { 911 compat != UBI_COMPAT_REJECT) {
910 ubi_err("bad compat"); 912 ubi_err(ubi, "bad compat");
911 goto bad; 913 goto bad;
912 } 914 }
913 915
914 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) { 916 if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
915 ubi_err("bad vol_type"); 917 ubi_err(ubi, "bad vol_type");
916 goto bad; 918 goto bad;
917 } 919 }
918 920
919 if (data_pad >= ubi->leb_size / 2) { 921 if (data_pad >= ubi->leb_size / 2) {
920 ubi_err("bad data_pad"); 922 ubi_err(ubi, "bad data_pad");
921 goto bad; 923 goto bad;
922 } 924 }
923 925
@@ -929,45 +931,45 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
929 * mapped logical eraseblocks. 931 * mapped logical eraseblocks.
930 */ 932 */
931 if (used_ebs == 0) { 933 if (used_ebs == 0) {
932 ubi_err("zero used_ebs"); 934 ubi_err(ubi, "zero used_ebs");
933 goto bad; 935 goto bad;
934 } 936 }
935 if (data_size == 0) { 937 if (data_size == 0) {
936 ubi_err("zero data_size"); 938 ubi_err(ubi, "zero data_size");
937 goto bad; 939 goto bad;
938 } 940 }
939 if (lnum < used_ebs - 1) { 941 if (lnum < used_ebs - 1) {
940 if (data_size != usable_leb_size) { 942 if (data_size != usable_leb_size) {
941 ubi_err("bad data_size"); 943 ubi_err(ubi, "bad data_size");
942 goto bad; 944 goto bad;
943 } 945 }
944 } else if (lnum == used_ebs - 1) { 946 } else if (lnum == used_ebs - 1) {
945 if (data_size == 0) { 947 if (data_size == 0) {
946 ubi_err("bad data_size at last LEB"); 948 ubi_err(ubi, "bad data_size at last LEB");
947 goto bad; 949 goto bad;
948 } 950 }
949 } else { 951 } else {
950 ubi_err("too high lnum"); 952 ubi_err(ubi, "too high lnum");
951 goto bad; 953 goto bad;
952 } 954 }
953 } else { 955 } else {
954 if (copy_flag == 0) { 956 if (copy_flag == 0) {
955 if (data_crc != 0) { 957 if (data_crc != 0) {
956 ubi_err("non-zero data CRC"); 958 ubi_err(ubi, "non-zero data CRC");
957 goto bad; 959 goto bad;
958 } 960 }
959 if (data_size != 0) { 961 if (data_size != 0) {
960 ubi_err("non-zero data_size"); 962 ubi_err(ubi, "non-zero data_size");
961 goto bad; 963 goto bad;
962 } 964 }
963 } else { 965 } else {
964 if (data_size == 0) { 966 if (data_size == 0) {
965 ubi_err("zero data_size of copy"); 967 ubi_err(ubi, "zero data_size of copy");
966 goto bad; 968 goto bad;
967 } 969 }
968 } 970 }
969 if (used_ebs != 0) { 971 if (used_ebs != 0) {
970 ubi_err("bad used_ebs"); 972 ubi_err(ubi, "bad used_ebs");
971 goto bad; 973 goto bad;
972 } 974 }
973 } 975 }
@@ -975,7 +977,7 @@ static int validate_vid_hdr(const struct ubi_device *ubi,
975 return 0; 977 return 0;
976 978
977bad: 979bad:
978 ubi_err("bad VID header"); 980 ubi_err(ubi, "bad VID header");
979 ubi_dump_vid_hdr(vid_hdr); 981 ubi_dump_vid_hdr(vid_hdr);
980 dump_stack(); 982 dump_stack();
981 return 1; 983 return 1;
@@ -1020,7 +1022,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1020 1022
1021 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) { 1023 if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
1022 if (verbose) 1024 if (verbose)
1023 ubi_warn("no VID header found at PEB %d, only 0xFF bytes", 1025 ubi_warn(ubi, "no VID header found at PEB %d, only 0xFF bytes",
1024 pnum); 1026 pnum);
1025 dbg_bld("no VID header found at PEB %d, only 0xFF bytes", 1027 dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
1026 pnum); 1028 pnum);
@@ -1031,7 +1033,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1031 } 1033 }
1032 1034
1033 if (verbose) { 1035 if (verbose) {
1034 ubi_warn("bad magic number at PEB %d: %08x instead of %08x", 1036 ubi_warn(ubi, "bad magic number at PEB %d: %08x instead of %08x",
1035 pnum, magic, UBI_VID_HDR_MAGIC); 1037 pnum, magic, UBI_VID_HDR_MAGIC);
1036 ubi_dump_vid_hdr(vid_hdr); 1038 ubi_dump_vid_hdr(vid_hdr);
1037 } 1039 }
@@ -1045,7 +1047,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1045 1047
1046 if (hdr_crc != crc) { 1048 if (hdr_crc != crc) {
1047 if (verbose) { 1049 if (verbose) {
1048 ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x", 1050 ubi_warn(ubi, "bad CRC at PEB %d, calculated %#08x, read %#08x",
1049 pnum, crc, hdr_crc); 1051 pnum, crc, hdr_crc);
1050 ubi_dump_vid_hdr(vid_hdr); 1052 ubi_dump_vid_hdr(vid_hdr);
1051 } 1053 }
@@ -1059,7 +1061,7 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
1059 1061
1060 err = validate_vid_hdr(ubi, vid_hdr); 1062 err = validate_vid_hdr(ubi, vid_hdr);
1061 if (err) { 1063 if (err) {
1062 ubi_err("validation failed for PEB %d", pnum); 1064 ubi_err(ubi, "validation failed for PEB %d", pnum);
1063 return -EINVAL; 1065 return -EINVAL;
1064 } 1066 }
1065 1067
@@ -1129,7 +1131,7 @@ static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
1129 if (!err) 1131 if (!err)
1130 return err; 1132 return err;
1131 1133
1132 ubi_err("self-check failed for PEB %d", pnum); 1134 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1133 dump_stack(); 1135 dump_stack();
1134 return err > 0 ? -EINVAL : err; 1136 return err > 0 ? -EINVAL : err;
1135} 1137}
@@ -1154,14 +1156,14 @@ static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
1154 1156
1155 magic = be32_to_cpu(ec_hdr->magic); 1157 magic = be32_to_cpu(ec_hdr->magic);
1156 if (magic != UBI_EC_HDR_MAGIC) { 1158 if (magic != UBI_EC_HDR_MAGIC) {
1157 ubi_err("bad magic %#08x, must be %#08x", 1159 ubi_err(ubi, "bad magic %#08x, must be %#08x",
1158 magic, UBI_EC_HDR_MAGIC); 1160 magic, UBI_EC_HDR_MAGIC);
1159 goto fail; 1161 goto fail;
1160 } 1162 }
1161 1163
1162 err = validate_ec_hdr(ubi, ec_hdr); 1164 err = validate_ec_hdr(ubi, ec_hdr);
1163 if (err) { 1165 if (err) {
1164 ubi_err("self-check failed for PEB %d", pnum); 1166 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1165 goto fail; 1167 goto fail;
1166 } 1168 }
1167 1169
@@ -1201,8 +1203,9 @@ static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
1201 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC); 1203 crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
1202 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc); 1204 hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
1203 if (hdr_crc != crc) { 1205 if (hdr_crc != crc) {
1204 ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc); 1206 ubi_err(ubi, "bad CRC, calculated %#08x, read %#08x",
1205 ubi_err("self-check failed for PEB %d", pnum); 1207 crc, hdr_crc);
1208 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1206 ubi_dump_ec_hdr(ec_hdr); 1209 ubi_dump_ec_hdr(ec_hdr);
1207 dump_stack(); 1210 dump_stack();
1208 err = -EINVAL; 1211 err = -EINVAL;
@@ -1236,21 +1239,21 @@ static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
1236 1239
1237 magic = be32_to_cpu(vid_hdr->magic); 1240 magic = be32_to_cpu(vid_hdr->magic);
1238 if (magic != UBI_VID_HDR_MAGIC) { 1241 if (magic != UBI_VID_HDR_MAGIC) {
1239 ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x", 1242 ubi_err(ubi, "bad VID header magic %#08x at PEB %d, must be %#08x",
1240 magic, pnum, UBI_VID_HDR_MAGIC); 1243 magic, pnum, UBI_VID_HDR_MAGIC);
1241 goto fail; 1244 goto fail;
1242 } 1245 }
1243 1246
1244 err = validate_vid_hdr(ubi, vid_hdr); 1247 err = validate_vid_hdr(ubi, vid_hdr);
1245 if (err) { 1248 if (err) {
1246 ubi_err("self-check failed for PEB %d", pnum); 1249 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1247 goto fail; 1250 goto fail;
1248 } 1251 }
1249 1252
1250 return err; 1253 return err;
1251 1254
1252fail: 1255fail:
1253 ubi_err("self-check failed for PEB %d", pnum); 1256 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1254 ubi_dump_vid_hdr(vid_hdr); 1257 ubi_dump_vid_hdr(vid_hdr);
1255 dump_stack(); 1258 dump_stack();
1256 return -EINVAL; 1259 return -EINVAL;
@@ -1288,9 +1291,9 @@ static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
1288 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC); 1291 crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
1289 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc); 1292 hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
1290 if (hdr_crc != crc) { 1293 if (hdr_crc != crc) {
1291 ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x", 1294 ubi_err(ubi, "bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
1292 pnum, crc, hdr_crc); 1295 pnum, crc, hdr_crc);
1293 ubi_err("self-check failed for PEB %d", pnum); 1296 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1294 ubi_dump_vid_hdr(vid_hdr); 1297 ubi_dump_vid_hdr(vid_hdr);
1295 dump_stack(); 1298 dump_stack();
1296 err = -EINVAL; 1299 err = -EINVAL;
@@ -1329,7 +1332,7 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1329 1332
1330 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); 1333 buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1331 if (!buf1) { 1334 if (!buf1) {
1332 ubi_err("cannot allocate memory to check writes"); 1335 ubi_err(ubi, "cannot allocate memory to check writes");
1333 return 0; 1336 return 0;
1334 } 1337 }
1335 1338
@@ -1345,15 +1348,15 @@ static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
1345 if (c == c1) 1348 if (c == c1)
1346 continue; 1349 continue;
1347 1350
1348 ubi_err("self-check failed for PEB %d:%d, len %d", 1351 ubi_err(ubi, "self-check failed for PEB %d:%d, len %d",
1349 pnum, offset, len); 1352 pnum, offset, len);
1350 ubi_msg("data differ at position %d", i); 1353 ubi_msg(ubi, "data differ at position %d", i);
1351 dump_len = max_t(int, 128, len - i); 1354 dump_len = max_t(int, 128, len - i);
1352 ubi_msg("hex dump of the original buffer from %d to %d", 1355 ubi_msg(ubi, "hex dump of the original buffer from %d to %d",
1353 i, i + dump_len); 1356 i, i + dump_len);
1354 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 1357 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1355 buf + i, dump_len, 1); 1358 buf + i, dump_len, 1);
1356 ubi_msg("hex dump of the read buffer from %d to %d", 1359 ubi_msg(ubi, "hex dump of the read buffer from %d to %d",
1357 i, i + dump_len); 1360 i, i + dump_len);
1358 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, 1361 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
1359 buf1 + i, dump_len, 1); 1362 buf1 + i, dump_len, 1);
@@ -1393,20 +1396,20 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1393 1396
1394 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL); 1397 buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
1395 if (!buf) { 1398 if (!buf) {
1396 ubi_err("cannot allocate memory to check for 0xFFs"); 1399 ubi_err(ubi, "cannot allocate memory to check for 0xFFs");
1397 return 0; 1400 return 0;
1398 } 1401 }
1399 1402
1400 err = mtd_read(ubi->mtd, addr, len, &read, buf); 1403 err = mtd_read(ubi->mtd, addr, len, &read, buf);
1401 if (err && !mtd_is_bitflip(err)) { 1404 if (err && !mtd_is_bitflip(err)) {
1402 ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes", 1405 ubi_err(ubi, "err %d while reading %d bytes from PEB %d:%d, read %zd bytes",
1403 err, len, pnum, offset, read); 1406 err, len, pnum, offset, read);
1404 goto error; 1407 goto error;
1405 } 1408 }
1406 1409
1407 err = ubi_check_pattern(buf, 0xFF, len); 1410 err = ubi_check_pattern(buf, 0xFF, len);
1408 if (err == 0) { 1411 if (err == 0) {
1409 ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes", 1412 ubi_err(ubi, "flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
1410 pnum, offset, len); 1413 pnum, offset, len);
1411 goto fail; 1414 goto fail;
1412 } 1415 }
@@ -1415,8 +1418,9 @@ int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
1415 return 0; 1418 return 0;
1416 1419
1417fail: 1420fail:
1418 ubi_err("self-check failed for PEB %d", pnum); 1421 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1419 ubi_msg("hex dump of the %d-%d region", offset, offset + len); 1422 ubi_msg(ubi, "hex dump of the %d-%d region",
1423 offset, offset + len);
1420 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1); 1424 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
1421 err = -EINVAL; 1425 err = -EINVAL;
1422error: 1426error:
diff --git a/drivers/mtd/ubi/kapi.c b/drivers/mtd/ubi/kapi.c
index 3aac1acceeb4..f3bab669f6bb 100644
--- a/drivers/mtd/ubi/kapi.c
+++ b/drivers/mtd/ubi/kapi.c
@@ -204,7 +204,7 @@ struct ubi_volume_desc *ubi_open_volume(int ubi_num, int vol_id, int mode)
204 return ERR_PTR(err); 204 return ERR_PTR(err);
205 } 205 }
206 if (err == 1) { 206 if (err == 1) {
207 ubi_warn("volume %d on UBI device %d is corrupted", 207 ubi_warn(ubi, "volume %d on UBI device %d is corrupted",
208 vol_id, ubi->ubi_num); 208 vol_id, ubi->ubi_num);
209 vol->corrupted = 1; 209 vol->corrupted = 1;
210 } 210 }
@@ -221,7 +221,7 @@ out_free:
221 kfree(desc); 221 kfree(desc);
222out_put_ubi: 222out_put_ubi:
223 ubi_put_device(ubi); 223 ubi_put_device(ubi);
224 ubi_err("cannot open device %d, volume %d, error %d", 224 ubi_err(ubi, "cannot open device %d, volume %d, error %d",
225 ubi_num, vol_id, err); 225 ubi_num, vol_id, err);
226 return ERR_PTR(err); 226 return ERR_PTR(err);
227} 227}
@@ -411,7 +411,7 @@ int ubi_leb_read(struct ubi_volume_desc *desc, int lnum, char *buf, int offset,
411 411
412 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check); 412 err = ubi_eba_read_leb(ubi, vol, lnum, buf, offset, len, check);
413 if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) { 413 if (err && mtd_is_eccerr(err) && vol->vol_type == UBI_STATIC_VOLUME) {
414 ubi_warn("mark volume %d as corrupted", vol_id); 414 ubi_warn(ubi, "mark volume %d as corrupted", vol_id);
415 vol->corrupted = 1; 415 vol->corrupted = 1;
416 } 416 }
417 417
diff --git a/drivers/mtd/ubi/misc.c b/drivers/mtd/ubi/misc.c
index f913d701a5b3..dbda77e556cb 100644
--- a/drivers/mtd/ubi/misc.c
+++ b/drivers/mtd/ubi/misc.c
@@ -111,7 +111,7 @@ void ubi_update_reserved(struct ubi_device *ubi)
111 ubi->avail_pebs -= need; 111 ubi->avail_pebs -= need;
112 ubi->rsvd_pebs += need; 112 ubi->rsvd_pebs += need;
113 ubi->beb_rsvd_pebs += need; 113 ubi->beb_rsvd_pebs += need;
114 ubi_msg("reserved more %d PEBs for bad PEB handling", need); 114 ubi_msg(ubi, "reserved more %d PEBs for bad PEB handling", need);
115} 115}
116 116
117/** 117/**
@@ -128,7 +128,7 @@ void ubi_calculate_reserved(struct ubi_device *ubi)
128 ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count; 128 ubi->beb_rsvd_level = ubi->bad_peb_limit - ubi->bad_peb_count;
129 if (ubi->beb_rsvd_level < 0) { 129 if (ubi->beb_rsvd_level < 0) {
130 ubi->beb_rsvd_level = 0; 130 ubi->beb_rsvd_level = 0;
131 ubi_warn("number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)", 131 ubi_warn(ubi, "number of bad PEBs (%d) is above the expected limit (%d), not reserving any PEBs for bad PEB handling, will use available PEBs (if any)",
132 ubi->bad_peb_count, ubi->bad_peb_limit); 132 ubi->bad_peb_count, ubi->bad_peb_limit);
133 } 133 }
134} 134}
diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index 320fc38fa2a1..f80ffaba9058 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -50,13 +50,14 @@
50#define UBI_NAME_STR "ubi" 50#define UBI_NAME_STR "ubi"
51 51
52/* Normal UBI messages */ 52/* Normal UBI messages */
53#define ubi_msg(fmt, ...) pr_notice("UBI: " fmt "\n", ##__VA_ARGS__) 53#define ubi_msg(ubi, fmt, ...) pr_notice("UBI-%d: %s:" fmt "\n", \
54 ubi->ubi_num, __func__, ##__VA_ARGS__)
54/* UBI warning messages */ 55/* UBI warning messages */
55#define ubi_warn(fmt, ...) pr_warn("UBI warning: %s: " fmt "\n", \ 56#define ubi_warn(ubi, fmt, ...) pr_warn("UBI-%d warning: %s: " fmt "\n", \
56 __func__, ##__VA_ARGS__) 57 ubi->ubi_num, __func__, ##__VA_ARGS__)
57/* UBI error messages */ 58/* UBI error messages */
58#define ubi_err(fmt, ...) pr_err("UBI error: %s: " fmt "\n", \ 59#define ubi_err(ubi, fmt, ...) pr_err("UBI-%d error: %s: " fmt "\n", \
59 __func__, ##__VA_ARGS__) 60 ubi->ubi_num, __func__, ##__VA_ARGS__)
60 61
61/* Background thread name pattern */ 62/* Background thread name pattern */
62#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd" 63#define UBI_BGT_NAME_PATTERN "ubi_bgt%dd"
@@ -987,7 +988,7 @@ static inline void ubi_ro_mode(struct ubi_device *ubi)
987{ 988{
988 if (!ubi->ro_mode) { 989 if (!ubi->ro_mode) {
989 ubi->ro_mode = 1; 990 ubi->ro_mode = 1;
990 ubi_warn("switch to read-only mode"); 991 ubi_warn(ubi, "switch to read-only mode");
991 dump_stack(); 992 dump_stack();
992 } 993 }
993} 994}
diff --git a/drivers/mtd/ubi/upd.c b/drivers/mtd/ubi/upd.c
index ec2c2dc1c1ca..2a1b6e037e1a 100644
--- a/drivers/mtd/ubi/upd.c
+++ b/drivers/mtd/ubi/upd.c
@@ -133,6 +133,10 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
133 ubi_assert(!vol->updating && !vol->changing_leb); 133 ubi_assert(!vol->updating && !vol->changing_leb);
134 vol->updating = 1; 134 vol->updating = 1;
135 135
136 vol->upd_buf = vmalloc(ubi->leb_size);
137 if (!vol->upd_buf)
138 return -ENOMEM;
139
136 err = set_update_marker(ubi, vol); 140 err = set_update_marker(ubi, vol);
137 if (err) 141 if (err)
138 return err; 142 return err;
@@ -152,14 +156,12 @@ int ubi_start_update(struct ubi_device *ubi, struct ubi_volume *vol,
152 err = clear_update_marker(ubi, vol, 0); 156 err = clear_update_marker(ubi, vol, 0);
153 if (err) 157 if (err)
154 return err; 158 return err;
159
160 vfree(vol->upd_buf);
155 vol->updating = 0; 161 vol->updating = 0;
156 return 0; 162 return 0;
157 } 163 }
158 164
159 vol->upd_buf = vmalloc(ubi->leb_size);
160 if (!vol->upd_buf)
161 return -ENOMEM;
162
163 vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1, 165 vol->upd_ebs = div_u64(bytes + vol->usable_leb_size - 1,
164 vol->usable_leb_size); 166 vol->usable_leb_size);
165 vol->upd_bytes = bytes; 167 vol->upd_bytes = bytes;
diff --git a/drivers/mtd/ubi/vmt.c b/drivers/mtd/ubi/vmt.c
index 8330703c098f..ff4d97848d1c 100644
--- a/drivers/mtd/ubi/vmt.c
+++ b/drivers/mtd/ubi/vmt.c
@@ -223,7 +223,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
223 } 223 }
224 224
225 if (vol_id == UBI_VOL_NUM_AUTO) { 225 if (vol_id == UBI_VOL_NUM_AUTO) {
226 ubi_err("out of volume IDs"); 226 ubi_err(ubi, "out of volume IDs");
227 err = -ENFILE; 227 err = -ENFILE;
228 goto out_unlock; 228 goto out_unlock;
229 } 229 }
@@ -237,7 +237,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
237 /* Ensure that this volume does not exist */ 237 /* Ensure that this volume does not exist */
238 err = -EEXIST; 238 err = -EEXIST;
239 if (ubi->volumes[vol_id]) { 239 if (ubi->volumes[vol_id]) {
240 ubi_err("volume %d already exists", vol_id); 240 ubi_err(ubi, "volume %d already exists", vol_id);
241 goto out_unlock; 241 goto out_unlock;
242 } 242 }
243 243
@@ -246,7 +246,8 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
246 if (ubi->volumes[i] && 246 if (ubi->volumes[i] &&
247 ubi->volumes[i]->name_len == req->name_len && 247 ubi->volumes[i]->name_len == req->name_len &&
248 !strcmp(ubi->volumes[i]->name, req->name)) { 248 !strcmp(ubi->volumes[i]->name, req->name)) {
249 ubi_err("volume \"%s\" exists (ID %d)", req->name, i); 249 ubi_err(ubi, "volume \"%s\" exists (ID %d)",
250 req->name, i);
250 goto out_unlock; 251 goto out_unlock;
251 } 252 }
252 253
@@ -257,9 +258,10 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
257 258
258 /* Reserve physical eraseblocks */ 259 /* Reserve physical eraseblocks */
259 if (vol->reserved_pebs > ubi->avail_pebs) { 260 if (vol->reserved_pebs > ubi->avail_pebs) {
260 ubi_err("not enough PEBs, only %d available", ubi->avail_pebs); 261 ubi_err(ubi, "not enough PEBs, only %d available",
262 ubi->avail_pebs);
261 if (ubi->corr_peb_count) 263 if (ubi->corr_peb_count)
262 ubi_err("%d PEBs are corrupted and not used", 264 ubi_err(ubi, "%d PEBs are corrupted and not used",
263 ubi->corr_peb_count); 265 ubi->corr_peb_count);
264 err = -ENOSPC; 266 err = -ENOSPC;
265 goto out_unlock; 267 goto out_unlock;
@@ -314,7 +316,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
314 dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1); 316 dev = MKDEV(MAJOR(ubi->cdev.dev), vol_id + 1);
315 err = cdev_add(&vol->cdev, dev, 1); 317 err = cdev_add(&vol->cdev, dev, 1);
316 if (err) { 318 if (err) {
317 ubi_err("cannot add character device"); 319 ubi_err(ubi, "cannot add character device");
318 goto out_mapping; 320 goto out_mapping;
319 } 321 }
320 322
@@ -326,7 +328,7 @@ int ubi_create_volume(struct ubi_device *ubi, struct ubi_mkvol_req *req)
326 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id); 328 dev_set_name(&vol->dev, "%s_%d", ubi->ubi_name, vol->vol_id);
327 err = device_register(&vol->dev); 329 err = device_register(&vol->dev);
328 if (err) { 330 if (err) {
329 ubi_err("cannot register device"); 331 ubi_err(ubi, "cannot register device");
330 goto out_cdev; 332 goto out_cdev;
331 } 333 }
332 334
@@ -386,7 +388,7 @@ out_unlock:
386 kfree(vol); 388 kfree(vol);
387 else 389 else
388 put_device(&vol->dev); 390 put_device(&vol->dev);
389 ubi_err("cannot create volume %d, error %d", vol_id, err); 391 ubi_err(ubi, "cannot create volume %d, error %d", vol_id, err);
390 return err; 392 return err;
391} 393}
392 394
@@ -454,7 +456,7 @@ int ubi_remove_volume(struct ubi_volume_desc *desc, int no_vtbl)
454 return err; 456 return err;
455 457
456out_err: 458out_err:
457 ubi_err("cannot remove volume %d, error %d", vol_id, err); 459 ubi_err(ubi, "cannot remove volume %d, error %d", vol_id, err);
458 spin_lock(&ubi->volumes_lock); 460 spin_lock(&ubi->volumes_lock);
459 ubi->volumes[vol_id] = vol; 461 ubi->volumes[vol_id] = vol;
460out_unlock: 462out_unlock:
@@ -487,7 +489,7 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
487 489
488 if (vol->vol_type == UBI_STATIC_VOLUME && 490 if (vol->vol_type == UBI_STATIC_VOLUME &&
489 reserved_pebs < vol->used_ebs) { 491 reserved_pebs < vol->used_ebs) {
490 ubi_err("too small size %d, %d LEBs contain data", 492 ubi_err(ubi, "too small size %d, %d LEBs contain data",
491 reserved_pebs, vol->used_ebs); 493 reserved_pebs, vol->used_ebs);
492 return -EINVAL; 494 return -EINVAL;
493 } 495 }
@@ -516,10 +518,10 @@ int ubi_resize_volume(struct ubi_volume_desc *desc, int reserved_pebs)
516 if (pebs > 0) { 518 if (pebs > 0) {
517 spin_lock(&ubi->volumes_lock); 519 spin_lock(&ubi->volumes_lock);
518 if (pebs > ubi->avail_pebs) { 520 if (pebs > ubi->avail_pebs) {
519 ubi_err("not enough PEBs: requested %d, available %d", 521 ubi_err(ubi, "not enough PEBs: requested %d, available %d",
520 pebs, ubi->avail_pebs); 522 pebs, ubi->avail_pebs);
521 if (ubi->corr_peb_count) 523 if (ubi->corr_peb_count)
522 ubi_err("%d PEBs are corrupted and not used", 524 ubi_err(ubi, "%d PEBs are corrupted and not used",
523 ubi->corr_peb_count); 525 ubi->corr_peb_count);
524 spin_unlock(&ubi->volumes_lock); 526 spin_unlock(&ubi->volumes_lock);
525 err = -ENOSPC; 527 err = -ENOSPC;
@@ -643,7 +645,7 @@ int ubi_add_volume(struct ubi_device *ubi, struct ubi_volume *vol)
643 dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1); 645 dev = MKDEV(MAJOR(ubi->cdev.dev), vol->vol_id + 1);
644 err = cdev_add(&vol->cdev, dev, 1); 646 err = cdev_add(&vol->cdev, dev, 1);
645 if (err) { 647 if (err) {
646 ubi_err("cannot add character device for volume %d, error %d", 648 ubi_err(ubi, "cannot add character device for volume %d, error %d",
647 vol_id, err); 649 vol_id, err);
648 return err; 650 return err;
649 } 651 }
@@ -710,7 +712,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
710 712
711 if (!vol) { 713 if (!vol) {
712 if (reserved_pebs) { 714 if (reserved_pebs) {
713 ubi_err("no volume info, but volume exists"); 715 ubi_err(ubi, "no volume info, but volume exists");
714 goto fail; 716 goto fail;
715 } 717 }
716 spin_unlock(&ubi->volumes_lock); 718 spin_unlock(&ubi->volumes_lock);
@@ -719,90 +721,91 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
719 721
720 if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 || 722 if (vol->reserved_pebs < 0 || vol->alignment < 0 || vol->data_pad < 0 ||
721 vol->name_len < 0) { 723 vol->name_len < 0) {
722 ubi_err("negative values"); 724 ubi_err(ubi, "negative values");
723 goto fail; 725 goto fail;
724 } 726 }
725 if (vol->alignment > ubi->leb_size || vol->alignment == 0) { 727 if (vol->alignment > ubi->leb_size || vol->alignment == 0) {
726 ubi_err("bad alignment"); 728 ubi_err(ubi, "bad alignment");
727 goto fail; 729 goto fail;
728 } 730 }
729 731
730 n = vol->alignment & (ubi->min_io_size - 1); 732 n = vol->alignment & (ubi->min_io_size - 1);
731 if (vol->alignment != 1 && n) { 733 if (vol->alignment != 1 && n) {
732 ubi_err("alignment is not multiple of min I/O unit"); 734 ubi_err(ubi, "alignment is not multiple of min I/O unit");
733 goto fail; 735 goto fail;
734 } 736 }
735 737
736 n = ubi->leb_size % vol->alignment; 738 n = ubi->leb_size % vol->alignment;
737 if (vol->data_pad != n) { 739 if (vol->data_pad != n) {
738 ubi_err("bad data_pad, has to be %lld", n); 740 ubi_err(ubi, "bad data_pad, has to be %lld", n);
739 goto fail; 741 goto fail;
740 } 742 }
741 743
742 if (vol->vol_type != UBI_DYNAMIC_VOLUME && 744 if (vol->vol_type != UBI_DYNAMIC_VOLUME &&
743 vol->vol_type != UBI_STATIC_VOLUME) { 745 vol->vol_type != UBI_STATIC_VOLUME) {
744 ubi_err("bad vol_type"); 746 ubi_err(ubi, "bad vol_type");
745 goto fail; 747 goto fail;
746 } 748 }
747 749
748 if (vol->upd_marker && vol->corrupted) { 750 if (vol->upd_marker && vol->corrupted) {
749 ubi_err("update marker and corrupted simultaneously"); 751 ubi_err(ubi, "update marker and corrupted simultaneously");
750 goto fail; 752 goto fail;
751 } 753 }
752 754
753 if (vol->reserved_pebs > ubi->good_peb_count) { 755 if (vol->reserved_pebs > ubi->good_peb_count) {
754 ubi_err("too large reserved_pebs"); 756 ubi_err(ubi, "too large reserved_pebs");
755 goto fail; 757 goto fail;
756 } 758 }
757 759
758 n = ubi->leb_size - vol->data_pad; 760 n = ubi->leb_size - vol->data_pad;
759 if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) { 761 if (vol->usable_leb_size != ubi->leb_size - vol->data_pad) {
760 ubi_err("bad usable_leb_size, has to be %lld", n); 762 ubi_err(ubi, "bad usable_leb_size, has to be %lld", n);
761 goto fail; 763 goto fail;
762 } 764 }
763 765
764 if (vol->name_len > UBI_VOL_NAME_MAX) { 766 if (vol->name_len > UBI_VOL_NAME_MAX) {
765 ubi_err("too long volume name, max is %d", UBI_VOL_NAME_MAX); 767 ubi_err(ubi, "too long volume name, max is %d",
768 UBI_VOL_NAME_MAX);
766 goto fail; 769 goto fail;
767 } 770 }
768 771
769 n = strnlen(vol->name, vol->name_len + 1); 772 n = strnlen(vol->name, vol->name_len + 1);
770 if (n != vol->name_len) { 773 if (n != vol->name_len) {
771 ubi_err("bad name_len %lld", n); 774 ubi_err(ubi, "bad name_len %lld", n);
772 goto fail; 775 goto fail;
773 } 776 }
774 777
775 n = (long long)vol->used_ebs * vol->usable_leb_size; 778 n = (long long)vol->used_ebs * vol->usable_leb_size;
776 if (vol->vol_type == UBI_DYNAMIC_VOLUME) { 779 if (vol->vol_type == UBI_DYNAMIC_VOLUME) {
777 if (vol->corrupted) { 780 if (vol->corrupted) {
778 ubi_err("corrupted dynamic volume"); 781 ubi_err(ubi, "corrupted dynamic volume");
779 goto fail; 782 goto fail;
780 } 783 }
781 if (vol->used_ebs != vol->reserved_pebs) { 784 if (vol->used_ebs != vol->reserved_pebs) {
782 ubi_err("bad used_ebs"); 785 ubi_err(ubi, "bad used_ebs");
783 goto fail; 786 goto fail;
784 } 787 }
785 if (vol->last_eb_bytes != vol->usable_leb_size) { 788 if (vol->last_eb_bytes != vol->usable_leb_size) {
786 ubi_err("bad last_eb_bytes"); 789 ubi_err(ubi, "bad last_eb_bytes");
787 goto fail; 790 goto fail;
788 } 791 }
789 if (vol->used_bytes != n) { 792 if (vol->used_bytes != n) {
790 ubi_err("bad used_bytes"); 793 ubi_err(ubi, "bad used_bytes");
791 goto fail; 794 goto fail;
792 } 795 }
793 } else { 796 } else {
794 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) { 797 if (vol->used_ebs < 0 || vol->used_ebs > vol->reserved_pebs) {
795 ubi_err("bad used_ebs"); 798 ubi_err(ubi, "bad used_ebs");
796 goto fail; 799 goto fail;
797 } 800 }
798 if (vol->last_eb_bytes < 0 || 801 if (vol->last_eb_bytes < 0 ||
799 vol->last_eb_bytes > vol->usable_leb_size) { 802 vol->last_eb_bytes > vol->usable_leb_size) {
800 ubi_err("bad last_eb_bytes"); 803 ubi_err(ubi, "bad last_eb_bytes");
801 goto fail; 804 goto fail;
802 } 805 }
803 if (vol->used_bytes < 0 || vol->used_bytes > n || 806 if (vol->used_bytes < 0 || vol->used_bytes > n ||
804 vol->used_bytes < n - vol->usable_leb_size) { 807 vol->used_bytes < n - vol->usable_leb_size) {
805 ubi_err("bad used_bytes"); 808 ubi_err(ubi, "bad used_bytes");
806 goto fail; 809 goto fail;
807 } 810 }
808 } 811 }
@@ -820,7 +823,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
820 if (alignment != vol->alignment || data_pad != vol->data_pad || 823 if (alignment != vol->alignment || data_pad != vol->data_pad ||
821 upd_marker != vol->upd_marker || vol_type != vol->vol_type || 824 upd_marker != vol->upd_marker || vol_type != vol->vol_type ||
822 name_len != vol->name_len || strncmp(name, vol->name, name_len)) { 825 name_len != vol->name_len || strncmp(name, vol->name, name_len)) {
823 ubi_err("volume info is different"); 826 ubi_err(ubi, "volume info is different");
824 goto fail; 827 goto fail;
825 } 828 }
826 829
@@ -828,7 +831,7 @@ static int self_check_volume(struct ubi_device *ubi, int vol_id)
828 return 0; 831 return 0;
829 832
830fail: 833fail:
831 ubi_err("self-check failed for volume %d", vol_id); 834 ubi_err(ubi, "self-check failed for volume %d", vol_id);
832 if (vol) 835 if (vol)
833 ubi_dump_vol_info(vol); 836 ubi_dump_vol_info(vol);
834 ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id); 837 ubi_dump_vtbl_record(&ubi->vtbl[vol_id], vol_id);
diff --git a/drivers/mtd/ubi/vtbl.c b/drivers/mtd/ubi/vtbl.c
index 07cac5f9ffb8..f8fc3081bbb4 100644
--- a/drivers/mtd/ubi/vtbl.c
+++ b/drivers/mtd/ubi/vtbl.c
@@ -30,9 +30,12 @@
30 * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each 30 * eraseblock stores one volume table copy, i.e. LEB 0 and LEB 1 duplicate each
31 * other. This redundancy guarantees robustness to unclean reboots. The volume 31 * other. This redundancy guarantees robustness to unclean reboots. The volume
32 * table is basically an array of volume table records. Each record contains 32 * table is basically an array of volume table records. Each record contains
33 * full information about the volume and protected by a CRC checksum. 33 * full information about the volume and protected by a CRC checksum. Note,
34 * nowadays we use the atomic LEB change operation when updating the volume
35 * table, so we do not really need 2 LEBs anymore, but we preserve the older
36 * design for the backward compatibility reasons.
34 * 37 *
35 * The volume table is changed, it is first changed in RAM. Then LEB 0 is 38 * When the volume table is changed, it is first changed in RAM. Then LEB 0 is
36 * erased, and the updated volume table is written back to LEB 0. Then same for 39 * erased, and the updated volume table is written back to LEB 0. Then same for
37 * LEB 1. This scheme guarantees recoverability from unclean reboots. 40 * LEB 1. This scheme guarantees recoverability from unclean reboots.
38 * 41 *
@@ -96,12 +99,8 @@ int ubi_change_vtbl_record(struct ubi_device *ubi, int idx,
96 99
97 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record)); 100 memcpy(&ubi->vtbl[idx], vtbl_rec, sizeof(struct ubi_vtbl_record));
98 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 101 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
99 err = ubi_eba_unmap_leb(ubi, layout_vol, i); 102 err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
100 if (err) 103 ubi->vtbl_size);
101 return err;
102
103 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
104 ubi->vtbl_size);
105 if (err) 104 if (err)
106 return err; 105 return err;
107 } 106 }
@@ -148,12 +147,8 @@ int ubi_vtbl_rename_volumes(struct ubi_device *ubi,
148 147
149 layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)]; 148 layout_vol = ubi->volumes[vol_id2idx(ubi, UBI_LAYOUT_VOLUME_ID)];
150 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) { 149 for (i = 0; i < UBI_LAYOUT_VOLUME_EBS; i++) {
151 err = ubi_eba_unmap_leb(ubi, layout_vol, i); 150 err = ubi_eba_atomic_leb_change(ubi, layout_vol, i, ubi->vtbl,
152 if (err) 151 ubi->vtbl_size);
153 return err;
154
155 err = ubi_eba_write_leb(ubi, layout_vol, i, ubi->vtbl, 0,
156 ubi->vtbl_size);
157 if (err) 152 if (err)
158 return err; 153 return err;
159 } 154 }
@@ -190,7 +185,7 @@ static int vtbl_check(const struct ubi_device *ubi,
190 185
191 crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC); 186 crc = crc32(UBI_CRC32_INIT, &vtbl[i], UBI_VTBL_RECORD_SIZE_CRC);
192 if (be32_to_cpu(vtbl[i].crc) != crc) { 187 if (be32_to_cpu(vtbl[i].crc) != crc) {
193 ubi_err("bad CRC at record %u: %#08x, not %#08x", 188 ubi_err(ubi, "bad CRC at record %u: %#08x, not %#08x",
194 i, crc, be32_to_cpu(vtbl[i].crc)); 189 i, crc, be32_to_cpu(vtbl[i].crc));
195 ubi_dump_vtbl_record(&vtbl[i], i); 190 ubi_dump_vtbl_record(&vtbl[i], i);
196 return 1; 191 return 1;
@@ -224,7 +219,7 @@ static int vtbl_check(const struct ubi_device *ubi,
224 219
225 n = ubi->leb_size % alignment; 220 n = ubi->leb_size % alignment;
226 if (data_pad != n) { 221 if (data_pad != n) {
227 ubi_err("bad data_pad, has to be %d", n); 222 ubi_err(ubi, "bad data_pad, has to be %d", n);
228 err = 6; 223 err = 6;
229 goto bad; 224 goto bad;
230 } 225 }
@@ -240,7 +235,7 @@ static int vtbl_check(const struct ubi_device *ubi,
240 } 235 }
241 236
242 if (reserved_pebs > ubi->good_peb_count) { 237 if (reserved_pebs > ubi->good_peb_count) {
243 ubi_err("too large reserved_pebs %d, good PEBs %d", 238 ubi_err(ubi, "too large reserved_pebs %d, good PEBs %d",
244 reserved_pebs, ubi->good_peb_count); 239 reserved_pebs, ubi->good_peb_count);
245 err = 9; 240 err = 9;
246 goto bad; 241 goto bad;
@@ -270,7 +265,7 @@ static int vtbl_check(const struct ubi_device *ubi,
270 265
271 if (len1 > 0 && len1 == len2 && 266 if (len1 > 0 && len1 == len2 &&
272 !strncmp(vtbl[i].name, vtbl[n].name, len1)) { 267 !strncmp(vtbl[i].name, vtbl[n].name, len1)) {
273 ubi_err("volumes %d and %d have the same name \"%s\"", 268 ubi_err(ubi, "volumes %d and %d have the same name \"%s\"",
274 i, n, vtbl[i].name); 269 i, n, vtbl[i].name);
275 ubi_dump_vtbl_record(&vtbl[i], i); 270 ubi_dump_vtbl_record(&vtbl[i], i);
276 ubi_dump_vtbl_record(&vtbl[n], n); 271 ubi_dump_vtbl_record(&vtbl[n], n);
@@ -282,7 +277,7 @@ static int vtbl_check(const struct ubi_device *ubi,
282 return 0; 277 return 0;
283 278
284bad: 279bad:
285 ubi_err("volume table check failed: record %d, error %d", i, err); 280 ubi_err(ubi, "volume table check failed: record %d, error %d", i, err);
286 ubi_dump_vtbl_record(&vtbl[i], i); 281 ubi_dump_vtbl_record(&vtbl[i], i);
287 return -EINVAL; 282 return -EINVAL;
288} 283}
@@ -446,11 +441,11 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
446 leb_corrupted[1] = memcmp(leb[0], leb[1], 441 leb_corrupted[1] = memcmp(leb[0], leb[1],
447 ubi->vtbl_size); 442 ubi->vtbl_size);
448 if (leb_corrupted[1]) { 443 if (leb_corrupted[1]) {
449 ubi_warn("volume table copy #2 is corrupted"); 444 ubi_warn(ubi, "volume table copy #2 is corrupted");
450 err = create_vtbl(ubi, ai, 1, leb[0]); 445 err = create_vtbl(ubi, ai, 1, leb[0]);
451 if (err) 446 if (err)
452 goto out_free; 447 goto out_free;
453 ubi_msg("volume table was restored"); 448 ubi_msg(ubi, "volume table was restored");
454 } 449 }
455 450
456 /* Both LEB 1 and LEB 2 are OK and consistent */ 451 /* Both LEB 1 and LEB 2 are OK and consistent */
@@ -465,15 +460,15 @@ static struct ubi_vtbl_record *process_lvol(struct ubi_device *ubi,
465 } 460 }
466 if (leb_corrupted[1]) { 461 if (leb_corrupted[1]) {
467 /* Both LEB 0 and LEB 1 are corrupted */ 462 /* Both LEB 0 and LEB 1 are corrupted */
468 ubi_err("both volume tables are corrupted"); 463 ubi_err(ubi, "both volume tables are corrupted");
469 goto out_free; 464 goto out_free;
470 } 465 }
471 466
472 ubi_warn("volume table copy #1 is corrupted"); 467 ubi_warn(ubi, "volume table copy #1 is corrupted");
473 err = create_vtbl(ubi, ai, 0, leb[1]); 468 err = create_vtbl(ubi, ai, 0, leb[1]);
474 if (err) 469 if (err)
475 goto out_free; 470 goto out_free;
476 ubi_msg("volume table was restored"); 471 ubi_msg(ubi, "volume table was restored");
477 472
478 vfree(leb[0]); 473 vfree(leb[0]);
479 return leb[1]; 474 return leb[1];
@@ -562,7 +557,7 @@ static int init_volumes(struct ubi_device *ubi,
562 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) { 557 if (vtbl[i].flags & UBI_VTBL_AUTORESIZE_FLG) {
563 /* Auto re-size flag may be set only for one volume */ 558 /* Auto re-size flag may be set only for one volume */
564 if (ubi->autoresize_vol_id != -1) { 559 if (ubi->autoresize_vol_id != -1) {
565 ubi_err("more than one auto-resize volume (%d and %d)", 560 ubi_err(ubi, "more than one auto-resize volume (%d and %d)",
566 ubi->autoresize_vol_id, i); 561 ubi->autoresize_vol_id, i);
567 kfree(vol); 562 kfree(vol);
568 return -EINVAL; 563 return -EINVAL;
@@ -608,7 +603,7 @@ static int init_volumes(struct ubi_device *ubi,
608 * We found a static volume which misses several 603 * We found a static volume which misses several
609 * eraseblocks. Treat it as corrupted. 604 * eraseblocks. Treat it as corrupted.
610 */ 605 */
611 ubi_warn("static volume %d misses %d LEBs - corrupted", 606 ubi_warn(ubi, "static volume %d misses %d LEBs - corrupted",
612 av->vol_id, av->used_ebs - av->leb_count); 607 av->vol_id, av->used_ebs - av->leb_count);
613 vol->corrupted = 1; 608 vol->corrupted = 1;
614 continue; 609 continue;
@@ -646,10 +641,10 @@ static int init_volumes(struct ubi_device *ubi,
646 vol->ubi = ubi; 641 vol->ubi = ubi;
647 642
648 if (reserved_pebs > ubi->avail_pebs) { 643 if (reserved_pebs > ubi->avail_pebs) {
649 ubi_err("not enough PEBs, required %d, available %d", 644 ubi_err(ubi, "not enough PEBs, required %d, available %d",
650 reserved_pebs, ubi->avail_pebs); 645 reserved_pebs, ubi->avail_pebs);
651 if (ubi->corr_peb_count) 646 if (ubi->corr_peb_count)
652 ubi_err("%d PEBs are corrupted and not used", 647 ubi_err(ubi, "%d PEBs are corrupted and not used",
653 ubi->corr_peb_count); 648 ubi->corr_peb_count);
654 } 649 }
655 ubi->rsvd_pebs += reserved_pebs; 650 ubi->rsvd_pebs += reserved_pebs;
@@ -660,13 +655,14 @@ static int init_volumes(struct ubi_device *ubi,
660 655
661/** 656/**
662 * check_av - check volume attaching information. 657 * check_av - check volume attaching information.
658 * @ubi: UBI device description object
663 * @vol: UBI volume description object 659 * @vol: UBI volume description object
664 * @av: volume attaching information 660 * @av: volume attaching information
665 * 661 *
666 * This function returns zero if the volume attaching information is consistent 662 * This function returns zero if the volume attaching information is consistent
667 * to the data read from the volume tabla, and %-EINVAL if not. 663 * to the data read from the volume tabla, and %-EINVAL if not.
668 */ 664 */
669static int check_av(const struct ubi_volume *vol, 665static int check_av(const struct ubi_device *ubi, const struct ubi_volume *vol,
670 const struct ubi_ainf_volume *av) 666 const struct ubi_ainf_volume *av)
671{ 667{
672 int err; 668 int err;
@@ -694,7 +690,7 @@ static int check_av(const struct ubi_volume *vol,
694 return 0; 690 return 0;
695 691
696bad: 692bad:
697 ubi_err("bad attaching information, error %d", err); 693 ubi_err(ubi, "bad attaching information, error %d", err);
698 ubi_dump_av(av); 694 ubi_dump_av(av);
699 ubi_dump_vol_info(vol); 695 ubi_dump_vol_info(vol);
700 return -EINVAL; 696 return -EINVAL;
@@ -718,14 +714,15 @@ static int check_attaching_info(const struct ubi_device *ubi,
718 struct ubi_volume *vol; 714 struct ubi_volume *vol;
719 715
720 if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) { 716 if (ai->vols_found > UBI_INT_VOL_COUNT + ubi->vtbl_slots) {
721 ubi_err("found %d volumes while attaching, maximum is %d + %d", 717 ubi_err(ubi, "found %d volumes while attaching, maximum is %d + %d",
722 ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots); 718 ai->vols_found, UBI_INT_VOL_COUNT, ubi->vtbl_slots);
723 return -EINVAL; 719 return -EINVAL;
724 } 720 }
725 721
726 if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT && 722 if (ai->highest_vol_id >= ubi->vtbl_slots + UBI_INT_VOL_COUNT &&
727 ai->highest_vol_id < UBI_INTERNAL_VOL_START) { 723 ai->highest_vol_id < UBI_INTERNAL_VOL_START) {
728 ubi_err("too large volume ID %d found", ai->highest_vol_id); 724 ubi_err(ubi, "too large volume ID %d found",
725 ai->highest_vol_id);
729 return -EINVAL; 726 return -EINVAL;
730 } 727 }
731 728
@@ -753,10 +750,10 @@ static int check_attaching_info(const struct ubi_device *ubi,
753 * reboot while the volume was being removed. Discard 750 * reboot while the volume was being removed. Discard
754 * these eraseblocks. 751 * these eraseblocks.
755 */ 752 */
756 ubi_msg("finish volume %d removal", av->vol_id); 753 ubi_msg(ubi, "finish volume %d removal", av->vol_id);
757 ubi_remove_av(ai, av); 754 ubi_remove_av(ai, av);
758 } else if (av) { 755 } else if (av) {
759 err = check_av(vol, av); 756 err = check_av(ubi, vol, av);
760 if (err) 757 if (err)
761 return err; 758 return err;
762 } 759 }
@@ -807,13 +804,13 @@ int ubi_read_volume_table(struct ubi_device *ubi, struct ubi_attach_info *ai)
807 if (IS_ERR(ubi->vtbl)) 804 if (IS_ERR(ubi->vtbl))
808 return PTR_ERR(ubi->vtbl); 805 return PTR_ERR(ubi->vtbl);
809 } else { 806 } else {
810 ubi_err("the layout volume was not found"); 807 ubi_err(ubi, "the layout volume was not found");
811 return -EINVAL; 808 return -EINVAL;
812 } 809 }
813 } else { 810 } else {
814 if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) { 811 if (av->leb_count > UBI_LAYOUT_VOLUME_EBS) {
815 /* This must not happen with proper UBI images */ 812 /* This must not happen with proper UBI images */
816 ubi_err("too many LEBs (%d) in layout volume", 813 ubi_err(ubi, "too many LEBs (%d) in layout volume",
817 av->leb_count); 814 av->leb_count);
818 return -EINVAL; 815 return -EINVAL;
819 } 816 }
@@ -862,7 +859,7 @@ static void self_vtbl_check(const struct ubi_device *ubi)
862 return; 859 return;
863 860
864 if (vtbl_check(ubi, ubi->vtbl)) { 861 if (vtbl_check(ubi, ubi->vtbl)) {
865 ubi_err("self-check failed"); 862 ubi_err(ubi, "self-check failed");
866 BUG(); 863 BUG();
867 } 864 }
868} 865}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 6654f191868e..834f6fe1f5fa 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -253,7 +253,7 @@ static int do_work(struct ubi_device *ubi)
253 */ 253 */
254 err = wrk->func(ubi, wrk, 0); 254 err = wrk->func(ubi, wrk, 0);
255 if (err) 255 if (err)
256 ubi_err("work failed with error code %d", err); 256 ubi_err(ubi, "work failed with error code %d", err);
257 up_read(&ubi->work_sem); 257 up_read(&ubi->work_sem);
258 258
259 return err; 259 return err;
@@ -470,8 +470,11 @@ struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
470{ 470{
471 struct ubi_wl_entry *e = NULL; 471 struct ubi_wl_entry *e = NULL;
472 472
473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) 473 if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1)) {
474 ubi_warn(ubi, "Can't get peb for fastmap:anchor=%d, free_cnt=%d, reserved=%d",
475 anchor, ubi->free_count, ubi->beb_rsvd_pebs);
474 goto out; 476 goto out;
477 }
475 478
476 if (anchor) 479 if (anchor)
477 e = find_anchor_wl_entry(&ubi->free); 480 e = find_anchor_wl_entry(&ubi->free);
@@ -507,7 +510,7 @@ static int __wl_get_peb(struct ubi_device *ubi)
507retry: 510retry:
508 if (!ubi->free.rb_node) { 511 if (!ubi->free.rb_node) {
509 if (ubi->works_count == 0) { 512 if (ubi->works_count == 0) {
510 ubi_err("no free eraseblocks"); 513 ubi_err(ubi, "no free eraseblocks");
511 ubi_assert(list_empty(&ubi->works)); 514 ubi_assert(list_empty(&ubi->works));
512 return -ENOSPC; 515 return -ENOSPC;
513 } 516 }
@@ -520,7 +523,7 @@ retry:
520 523
521 e = find_mean_wl_entry(ubi, &ubi->free); 524 e = find_mean_wl_entry(ubi, &ubi->free);
522 if (!e) { 525 if (!e) {
523 ubi_err("no free eraseblocks"); 526 ubi_err(ubi, "no free eraseblocks");
524 return -ENOSPC; 527 return -ENOSPC;
525 } 528 }
526 529
@@ -692,7 +695,8 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
692 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset, 695 err = ubi_self_check_all_ff(ubi, peb, ubi->vid_hdr_aloffset,
693 ubi->peb_size - ubi->vid_hdr_aloffset); 696 ubi->peb_size - ubi->vid_hdr_aloffset);
694 if (err) { 697 if (err) {
695 ubi_err("new PEB %d does not contain all 0xFF bytes", peb); 698 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes",
699 peb);
696 return err; 700 return err;
697 } 701 }
698 702
@@ -760,7 +764,7 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
760 * Erase counter overflow. Upgrade UBI and use 64-bit 764 * Erase counter overflow. Upgrade UBI and use 64-bit
761 * erase counters internally. 765 * erase counters internally.
762 */ 766 */
763 ubi_err("erase counter overflow at PEB %d, EC %llu", 767 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
764 e->pnum, ec); 768 e->pnum, ec);
765 err = -EINVAL; 769 err = -EINVAL;
766 goto out_free; 770 goto out_free;
@@ -1137,7 +1141,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1137 goto out_not_moved; 1141 goto out_not_moved;
1138 } 1142 }
1139 1143
1140 ubi_err("error %d while reading VID header from PEB %d", 1144 ubi_err(ubi, "error %d while reading VID header from PEB %d",
1141 err, e1->pnum); 1145 err, e1->pnum);
1142 goto out_error; 1146 goto out_error;
1143 } 1147 }
@@ -1181,7 +1185,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1181 * UBI from trying to move it over and over again. 1185 * UBI from trying to move it over and over again.
1182 */ 1186 */
1183 if (ubi->erroneous_peb_count > ubi->max_erroneous) { 1187 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
1184 ubi_err("too many erroneous eraseblocks (%d)", 1188 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
1185 ubi->erroneous_peb_count); 1189 ubi->erroneous_peb_count);
1186 goto out_error; 1190 goto out_error;
1187 } 1191 }
@@ -1197,7 +1201,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1197 1201
1198 /* The PEB has been successfully moved */ 1202 /* The PEB has been successfully moved */
1199 if (scrubbing) 1203 if (scrubbing)
1200 ubi_msg("scrubbed PEB %d (LEB %d:%d), data moved to PEB %d", 1204 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
1201 e1->pnum, vol_id, lnum, e2->pnum); 1205 e1->pnum, vol_id, lnum, e2->pnum);
1202 ubi_free_vid_hdr(ubi, vid_hdr); 1206 ubi_free_vid_hdr(ubi, vid_hdr);
1203 1207
@@ -1212,7 +1216,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1212 1216
1213 err = do_sync_erase(ubi, e1, vol_id, lnum, 0); 1217 err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
1214 if (err) { 1218 if (err) {
1215 kmem_cache_free(ubi_wl_entry_slab, e1);
1216 if (e2) 1219 if (e2)
1217 kmem_cache_free(ubi_wl_entry_slab, e2); 1220 kmem_cache_free(ubi_wl_entry_slab, e2);
1218 goto out_ro; 1221 goto out_ro;
@@ -1226,10 +1229,8 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
1226 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase", 1229 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
1227 e2->pnum, vol_id, lnum); 1230 e2->pnum, vol_id, lnum);
1228 err = do_sync_erase(ubi, e2, vol_id, lnum, 0); 1231 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
1229 if (err) { 1232 if (err)
1230 kmem_cache_free(ubi_wl_entry_slab, e2);
1231 goto out_ro; 1233 goto out_ro;
1232 }
1233 } 1234 }
1234 1235
1235 dbg_wl("done"); 1236 dbg_wl("done");
@@ -1265,19 +1266,18 @@ out_not_moved:
1265 1266
1266 ubi_free_vid_hdr(ubi, vid_hdr); 1267 ubi_free_vid_hdr(ubi, vid_hdr);
1267 err = do_sync_erase(ubi, e2, vol_id, lnum, torture); 1268 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
1268 if (err) { 1269 if (err)
1269 kmem_cache_free(ubi_wl_entry_slab, e2);
1270 goto out_ro; 1270 goto out_ro;
1271 } 1271
1272 mutex_unlock(&ubi->move_mutex); 1272 mutex_unlock(&ubi->move_mutex);
1273 return 0; 1273 return 0;
1274 1274
1275out_error: 1275out_error:
1276 if (vol_id != -1) 1276 if (vol_id != -1)
1277 ubi_err("error %d while moving PEB %d to PEB %d", 1277 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
1278 err, e1->pnum, e2->pnum); 1278 err, e1->pnum, e2->pnum);
1279 else 1279 else
1280 ubi_err("error %d while moving PEB %d (LEB %d:%d) to PEB %d", 1280 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
1281 err, e1->pnum, vol_id, lnum, e2->pnum); 1281 err, e1->pnum, vol_id, lnum, e2->pnum);
1282 spin_lock(&ubi->wl_lock); 1282 spin_lock(&ubi->wl_lock);
1283 ubi->move_from = ubi->move_to = NULL; 1283 ubi->move_from = ubi->move_to = NULL;
@@ -1458,7 +1458,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1458 return err; 1458 return err;
1459 } 1459 }
1460 1460
1461 ubi_err("failed to erase PEB %d, error %d", pnum, err); 1461 ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1462 kfree(wl_wrk); 1462 kfree(wl_wrk);
1463 1463
1464 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || 1464 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
@@ -1486,7 +1486,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1486 /* It is %-EIO, the PEB went bad */ 1486 /* It is %-EIO, the PEB went bad */
1487 1487
1488 if (!ubi->bad_allowed) { 1488 if (!ubi->bad_allowed) {
1489 ubi_err("bad physical eraseblock %d detected", pnum); 1489 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1490 goto out_ro; 1490 goto out_ro;
1491 } 1491 }
1492 1492
@@ -1494,7 +1494,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1494 if (ubi->beb_rsvd_pebs == 0) { 1494 if (ubi->beb_rsvd_pebs == 0) {
1495 if (ubi->avail_pebs == 0) { 1495 if (ubi->avail_pebs == 0) {
1496 spin_unlock(&ubi->volumes_lock); 1496 spin_unlock(&ubi->volumes_lock);
1497 ubi_err("no reserved/available physical eraseblocks"); 1497 ubi_err(ubi, "no reserved/available physical eraseblocks");
1498 goto out_ro; 1498 goto out_ro;
1499 } 1499 }
1500 ubi->avail_pebs -= 1; 1500 ubi->avail_pebs -= 1;
@@ -1502,7 +1502,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1502 } 1502 }
1503 spin_unlock(&ubi->volumes_lock); 1503 spin_unlock(&ubi->volumes_lock);
1504 1504
1505 ubi_msg("mark PEB %d as bad", pnum); 1505 ubi_msg(ubi, "mark PEB %d as bad", pnum);
1506 err = ubi_io_mark_bad(ubi, pnum); 1506 err = ubi_io_mark_bad(ubi, pnum);
1507 if (err) 1507 if (err)
1508 goto out_ro; 1508 goto out_ro;
@@ -1523,11 +1523,12 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1523 ubi->good_peb_count -= 1; 1523 ubi->good_peb_count -= 1;
1524 ubi_calculate_reserved(ubi); 1524 ubi_calculate_reserved(ubi);
1525 if (available_consumed) 1525 if (available_consumed)
1526 ubi_warn("no PEBs in the reserved pool, used an available PEB"); 1526 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1527 else if (ubi->beb_rsvd_pebs) 1527 else if (ubi->beb_rsvd_pebs)
1528 ubi_msg("%d PEBs left in the reserve", ubi->beb_rsvd_pebs); 1528 ubi_msg(ubi, "%d PEBs left in the reserve",
1529 ubi->beb_rsvd_pebs);
1529 else 1530 else
1530 ubi_warn("last PEB from the reserve was used"); 1531 ubi_warn(ubi, "last PEB from the reserve was used");
1531 spin_unlock(&ubi->volumes_lock); 1532 spin_unlock(&ubi->volumes_lock);
1532 1533
1533 return err; 1534 return err;
@@ -1613,7 +1614,7 @@ retry:
1613 } else { 1614 } else {
1614 err = prot_queue_del(ubi, e->pnum); 1615 err = prot_queue_del(ubi, e->pnum);
1615 if (err) { 1616 if (err) {
1616 ubi_err("PEB %d not found", pnum); 1617 ubi_err(ubi, "PEB %d not found", pnum);
1617 ubi_ro_mode(ubi); 1618 ubi_ro_mode(ubi);
1618 spin_unlock(&ubi->wl_lock); 1619 spin_unlock(&ubi->wl_lock);
1619 return err; 1620 return err;
@@ -1646,7 +1647,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1646{ 1647{
1647 struct ubi_wl_entry *e; 1648 struct ubi_wl_entry *e;
1648 1649
1649 ubi_msg("schedule PEB %d for scrubbing", pnum); 1650 ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1650 1651
1651retry: 1652retry:
1652 spin_lock(&ubi->wl_lock); 1653 spin_lock(&ubi->wl_lock);
@@ -1678,7 +1679,7 @@ retry:
1678 1679
1679 err = prot_queue_del(ubi, e->pnum); 1680 err = prot_queue_del(ubi, e->pnum);
1680 if (err) { 1681 if (err) {
1681 ubi_err("PEB %d not found", pnum); 1682 ubi_err(ubi, "PEB %d not found", pnum);
1682 ubi_ro_mode(ubi); 1683 ubi_ro_mode(ubi);
1683 spin_unlock(&ubi->wl_lock); 1684 spin_unlock(&ubi->wl_lock);
1684 return err; 1685 return err;
@@ -1798,15 +1799,18 @@ int ubi_thread(void *u)
1798 int failures = 0; 1799 int failures = 0;
1799 struct ubi_device *ubi = u; 1800 struct ubi_device *ubi = u;
1800 1801
1801 ubi_msg("background thread \"%s\" started, PID %d", 1802 ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1802 ubi->bgt_name, task_pid_nr(current)); 1803 ubi->bgt_name, task_pid_nr(current));
1803 1804
1804 set_freezable(); 1805 set_freezable();
1805 for (;;) { 1806 for (;;) {
1806 int err; 1807 int err;
1807 1808
1808 if (kthread_should_stop()) 1809 if (kthread_should_stop()) {
1810 ubi_msg(ubi, "background thread \"%s\" should stop, PID %d",
1811 ubi->bgt_name, task_pid_nr(current));
1809 break; 1812 break;
1813 }
1810 1814
1811 if (try_to_freeze()) 1815 if (try_to_freeze())
1812 continue; 1816 continue;
@@ -1823,14 +1827,14 @@ int ubi_thread(void *u)
1823 1827
1824 err = do_work(ubi); 1828 err = do_work(ubi);
1825 if (err) { 1829 if (err) {
1826 ubi_err("%s: work failed with error code %d", 1830 ubi_err(ubi, "%s: work failed with error code %d",
1827 ubi->bgt_name, err); 1831 ubi->bgt_name, err);
1828 if (failures++ > WL_MAX_FAILURES) { 1832 if (failures++ > WL_MAX_FAILURES) {
1829 /* 1833 /*
1830 * Too many failures, disable the thread and 1834 * Too many failures, disable the thread and
1831 * switch to read-only mode. 1835 * switch to read-only mode.
1832 */ 1836 */
1833 ubi_msg("%s: %d consecutive failures", 1837 ubi_msg(ubi, "%s: %d consecutive failures",
1834 ubi->bgt_name, WL_MAX_FAILURES); 1838 ubi->bgt_name, WL_MAX_FAILURES);
1835 ubi_ro_mode(ubi); 1839 ubi_ro_mode(ubi);
1836 ubi->thread_enabled = 0; 1840 ubi->thread_enabled = 0;
@@ -1981,10 +1985,10 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1981#endif 1985#endif
1982 1986
1983 if (ubi->avail_pebs < reserved_pebs) { 1987 if (ubi->avail_pebs < reserved_pebs) {
1984 ubi_err("no enough physical eraseblocks (%d, need %d)", 1988 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1985 ubi->avail_pebs, reserved_pebs); 1989 ubi->avail_pebs, reserved_pebs);
1986 if (ubi->corr_peb_count) 1990 if (ubi->corr_peb_count)
1987 ubi_err("%d PEBs are corrupted and not used", 1991 ubi_err(ubi, "%d PEBs are corrupted and not used",
1988 ubi->corr_peb_count); 1992 ubi->corr_peb_count);
1989 goto out_free; 1993 goto out_free;
1990 } 1994 }
@@ -2072,8 +2076,8 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
2072 2076
2073 read_ec = be64_to_cpu(ec_hdr->ec); 2077 read_ec = be64_to_cpu(ec_hdr->ec);
2074 if (ec != read_ec && read_ec - ec > 1) { 2078 if (ec != read_ec && read_ec - ec > 1) {
2075 ubi_err("self-check failed for PEB %d", pnum); 2079 ubi_err(ubi, "self-check failed for PEB %d", pnum);
2076 ubi_err("read EC is %lld, should be %d", read_ec, ec); 2080 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
2077 dump_stack(); 2081 dump_stack();
2078 err = 1; 2082 err = 1;
2079 } else 2083 } else
@@ -2102,7 +2106,7 @@ static int self_check_in_wl_tree(const struct ubi_device *ubi,
2102 if (in_wl_tree(e, root)) 2106 if (in_wl_tree(e, root))
2103 return 0; 2107 return 0;
2104 2108
2105 ubi_err("self-check failed for PEB %d, EC %d, RB-tree %p ", 2109 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2106 e->pnum, e->ec, root); 2110 e->pnum, e->ec, root);
2107 dump_stack(); 2111 dump_stack();
2108 return -EINVAL; 2112 return -EINVAL;
@@ -2130,7 +2134,7 @@ static int self_check_in_pq(const struct ubi_device *ubi,
2130 if (p == e) 2134 if (p == e)
2131 return 0; 2135 return 0;
2132 2136
2133 ubi_err("self-check failed for PEB %d, EC %d, Protect queue", 2137 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2134 e->pnum, e->ec); 2138 e->pnum, e->ec);
2135 dump_stack(); 2139 dump_stack();
2136 return -EINVAL; 2140 return -EINVAL;
diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
index 72fb86b9aa24..c9946c6c119e 100644
--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
@@ -48,7 +48,7 @@ MODULE_DEVICE_TABLE(pci, atl1c_pci_tbl);
48 48
49MODULE_AUTHOR("Jie Yang"); 49MODULE_AUTHOR("Jie Yang");
50MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>"); 50MODULE_AUTHOR("Qualcomm Atheros Inc., <nic-devel@qualcomm.com>");
51MODULE_DESCRIPTION("Qualcom Atheros 100/1000M Ethernet Network Driver"); 51MODULE_DESCRIPTION("Qualcomm Atheros 100/1000M Ethernet Network Driver");
52MODULE_LICENSE("GPL"); 52MODULE_LICENSE("GPL");
53MODULE_VERSION(ATL1C_DRV_VERSION); 53MODULE_VERSION(ATL1C_DRV_VERSION);
54 54
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 93caf8e68901..2399a3921762 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,12 +1,13 @@
1config ATH5K 1config ATH5K
2 tristate "Atheros 5xxx wireless cards support" 2 tristate "Atheros 5xxx wireless cards support"
3 depends on PCI && MAC80211 3 depends on (PCI || ATH25) && MAC80211
4 select ATH_COMMON 4 select ATH_COMMON
5 select MAC80211_LEDS 5 select MAC80211_LEDS
6 select LEDS_CLASS 6 select LEDS_CLASS
7 select NEW_LEDS 7 select NEW_LEDS
8 select AVERAGE 8 select AVERAGE
9 select ATH5K_PCI 9 select ATH5K_AHB if ATH25
10 select ATH5K_PCI if !ATH25
10 ---help--- 11 ---help---
11 This module adds support for wireless adapters based on 12 This module adds support for wireless adapters based on
12 Atheros 5xxx chipset. 13 Atheros 5xxx chipset.
@@ -51,9 +52,16 @@ config ATH5K_TRACER
51 52
52 If unsure, say N. 53 If unsure, say N.
53 54
55config ATH5K_AHB
56 bool "Atheros 5xxx AHB bus support"
57 depends on ATH25
58 ---help---
59 This adds support for WiSoC type chipsets of the 5xxx Atheros
60 family.
61
54config ATH5K_PCI 62config ATH5K_PCI
55 bool "Atheros 5xxx PCI bus support" 63 bool "Atheros 5xxx PCI bus support"
56 depends on PCI 64 depends on (!ATH25 && PCI)
57 ---help--- 65 ---help---
58 This adds support for PCI type chipsets of the 5xxx Atheros 66 This adds support for PCI type chipsets of the 5xxx Atheros
59 family. 67 family.
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 51e2d8668041..1b3a34f7f224 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -17,5 +17,6 @@ ath5k-y += ani.o
17ath5k-y += sysfs.o 17ath5k-y += sysfs.o
18ath5k-y += mac80211-ops.o 18ath5k-y += mac80211-ops.o
19ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o 19ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
20ath5k-$(CONFIG_ATH5K_AHB) += ahb.o
20ath5k-$(CONFIG_ATH5K_PCI) += pci.o 21ath5k-$(CONFIG_ATH5K_PCI) += pci.o
21obj-$(CONFIG_ATH5K) += ath5k.o 22obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
new file mode 100644
index 000000000000..8f387cf67340
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -0,0 +1,234 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <linux/nl80211.h>
20#include <linux/platform_device.h>
21#include <linux/etherdevice.h>
22#include <linux/export.h>
23#include <ath25_platform.h>
24#include "ath5k.h"
25#include "debug.h"
26#include "base.h"
27#include "reg.h"
28
29/* return bus cachesize in 4B word units */
30static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
31{
32 *csz = L1_CACHE_BYTES >> 2;
33}
34
35static bool
36ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
37{
38 struct ath5k_hw *ah = common->priv;
39 struct platform_device *pdev = to_platform_device(ah->dev);
40 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
41 u16 *eeprom, *eeprom_end;
42
43 eeprom = (u16 *) bcfg->radio;
44 eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
45
46 eeprom += off;
47 if (eeprom > eeprom_end)
48 return false;
49
50 *data = *eeprom;
51 return true;
52}
53
54int ath5k_hw_read_srev(struct ath5k_hw *ah)
55{
56 struct platform_device *pdev = to_platform_device(ah->dev);
57 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
58 ah->ah_mac_srev = bcfg->devid;
59 return 0;
60}
61
62static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
63{
64 struct platform_device *pdev = to_platform_device(ah->dev);
65 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
66 u8 *cfg_mac;
67
68 if (to_platform_device(ah->dev)->id == 0)
69 cfg_mac = bcfg->config->wlan0_mac;
70 else
71 cfg_mac = bcfg->config->wlan1_mac;
72
73 memcpy(mac, cfg_mac, ETH_ALEN);
74 return 0;
75}
76
77static const struct ath_bus_ops ath_ahb_bus_ops = {
78 .ath_bus_type = ATH_AHB,
79 .read_cachesize = ath5k_ahb_read_cachesize,
80 .eeprom_read = ath5k_ahb_eeprom_read,
81 .eeprom_read_mac = ath5k_ahb_eeprom_read_mac,
82};
83
84/*Initialization*/
85static int ath_ahb_probe(struct platform_device *pdev)
86{
87 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
88 struct ath5k_hw *ah;
89 struct ieee80211_hw *hw;
90 struct resource *res;
91 void __iomem *mem;
92 int irq;
93 int ret = 0;
94 u32 reg;
95
96 if (!dev_get_platdata(&pdev->dev)) {
97 dev_err(&pdev->dev, "no platform data specified\n");
98 ret = -EINVAL;
99 goto err_out;
100 }
101
102 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
103 if (res == NULL) {
104 dev_err(&pdev->dev, "no memory resource found\n");
105 ret = -ENXIO;
106 goto err_out;
107 }
108
109 mem = ioremap_nocache(res->start, resource_size(res));
110 if (mem == NULL) {
111 dev_err(&pdev->dev, "ioremap failed\n");
112 ret = -ENOMEM;
113 goto err_out;
114 }
115
116 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
117 if (res == NULL) {
118 dev_err(&pdev->dev, "no IRQ resource found\n");
119 ret = -ENXIO;
120 goto err_iounmap;
121 }
122
123 irq = res->start;
124
125 hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
126 if (hw == NULL) {
127 dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
128 ret = -ENOMEM;
129 goto err_iounmap;
130 }
131
132 ah = hw->priv;
133 ah->hw = hw;
134 ah->dev = &pdev->dev;
135 ah->iobase = mem;
136 ah->irq = irq;
137 ah->devid = bcfg->devid;
138
139 if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
140 /* Enable WMAC AHB arbitration */
141 reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
142 reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN;
143 iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
144
145 /* Enable global WMAC swapping */
146 reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP);
147 reg |= AR5K_AR2315_BYTESWAP_WMAC;
148 iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP);
149 } else {
150 /* Enable WMAC DMA access (assuming 5312 or 231x*/
151 /* TODO: check other platforms */
152 reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
153 if (to_platform_device(ah->dev)->id == 0)
154 reg |= AR5K_AR5312_ENABLE_WLAN0;
155 else
156 reg |= AR5K_AR5312_ENABLE_WLAN1;
157 iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
158
159 /*
160 * On a dual-band AR5312, the multiband radio is only
161 * used as pass-through. Disable 2 GHz support in the
162 * driver for it
163 */
164 if (to_platform_device(ah->dev)->id == 0 &&
165 (bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
166 (BD_WLAN1 | BD_WLAN0))
167 ah->ah_capabilities.cap_needs_2GHz_ovr = true;
168 else
169 ah->ah_capabilities.cap_needs_2GHz_ovr = false;
170 }
171
172 ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
173 if (ret != 0) {
174 dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
175 ret = -ENODEV;
176 goto err_free_hw;
177 }
178
179 platform_set_drvdata(pdev, hw);
180
181 return 0;
182
183 err_free_hw:
184 ieee80211_free_hw(hw);
185 err_iounmap:
186 iounmap(mem);
187 err_out:
188 return ret;
189}
190
191static int ath_ahb_remove(struct platform_device *pdev)
192{
193 struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
194 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
195 struct ath5k_hw *ah;
196 u32 reg;
197
198 if (!hw)
199 return 0;
200
201 ah = hw->priv;
202
203 if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
204 /* Disable WMAC AHB arbitration */
205 reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
206 reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN;
207 iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
208 } else {
209 /*Stop DMA access */
210 reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
211 if (to_platform_device(ah->dev)->id == 0)
212 reg &= ~AR5K_AR5312_ENABLE_WLAN0;
213 else
214 reg &= ~AR5K_AR5312_ENABLE_WLAN1;
215 iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
216 }
217
218 ath5k_deinit_ah(ah);
219 iounmap(ah->iobase);
220 ieee80211_free_hw(hw);
221
222 return 0;
223}
224
225static struct platform_driver ath_ahb_driver = {
226 .probe = ath_ahb_probe,
227 .remove = ath_ahb_remove,
228 .driver = {
229 .name = "ar231x-wmac",
230 .owner = THIS_MODULE,
231 },
232};
233
234module_platform_driver(ath_ahb_driver);
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index ed2468220216..1ed7a88aeea9 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -1647,6 +1647,32 @@ static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
1647 return &(ath5k_hw_common(ah)->regulatory); 1647 return &(ath5k_hw_common(ah)->regulatory);
1648} 1648}
1649 1649
1650#ifdef CONFIG_ATH5K_AHB
1651#define AR5K_AR2315_PCI_BASE ((void __iomem *)0xb0100000)
1652
1653static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
1654{
1655 /* On AR2315 and AR2317 the PCI clock domain registers
1656 * are outside of the WMAC register space */
1657 if (unlikely((reg >= 0x4000) && (reg < 0x5000) &&
1658 (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
1659 return AR5K_AR2315_PCI_BASE + reg;
1660
1661 return ah->iobase + reg;
1662}
1663
1664static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
1665{
1666 return ioread32(ath5k_ahb_reg(ah, reg));
1667}
1668
1669static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1670{
1671 iowrite32(val, ath5k_ahb_reg(ah, reg));
1672}
1673
1674#else
1675
1650static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) 1676static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
1651{ 1677{
1652 return ioread32(ah->iobase + reg); 1678 return ioread32(ah->iobase + reg);
@@ -1657,6 +1683,8 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1657 iowrite32(val, ah->iobase + reg); 1683 iowrite32(val, ah->iobase + reg);
1658} 1684}
1659 1685
1686#endif
1687
1660static inline enum ath_bus_type ath5k_get_bus_type(struct ath5k_hw *ah) 1688static inline enum ath_bus_type ath5k_get_bus_type(struct ath5k_hw *ah)
1661{ 1689{
1662 return ath5k_hw_common(ah)->bus_ops->ath_bus_type; 1690 return ath5k_hw_common(ah)->bus_ops->ath_bus_type;
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index a4a09bb8f2f3..bc9cb356fa69 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -99,6 +99,15 @@ static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
99 99
100/* Known SREVs */ 100/* Known SREVs */
101static const struct ath5k_srev_name srev_names[] = { 101static const struct ath5k_srev_name srev_names[] = {
102#ifdef CONFIG_ATH5K_AHB
103 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
104 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
105 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
106 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
107 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
108 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
109 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
110#else
102 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, 111 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
103 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, 112 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
104 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, 113 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
@@ -117,6 +126,7 @@ static const struct ath5k_srev_name srev_names[] = {
117 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, 126 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
118 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, 127 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
119 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, 128 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
129#endif
120 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, 130 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
121 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, 131 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
122 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, 132 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
@@ -132,6 +142,10 @@ static const struct ath5k_srev_name srev_names[] = {
132 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, 142 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
133 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, 143 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
134 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, 144 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
145#ifdef CONFIG_ATH5K_AHB
146 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
147 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
148#endif
135 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, 149 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
136}; 150};
137 151
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 0beb7e7d6075..ca4b7ccd697f 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -163,14 +163,20 @@ int ath5k_init_leds(struct ath5k_hw *ah)
163{ 163{
164 int ret = 0; 164 int ret = 0;
165 struct ieee80211_hw *hw = ah->hw; 165 struct ieee80211_hw *hw = ah->hw;
166#ifndef CONFIG_ATH5K_AHB
166 struct pci_dev *pdev = ah->pdev; 167 struct pci_dev *pdev = ah->pdev;
168#endif
167 char name[ATH5K_LED_MAX_NAME_LEN + 1]; 169 char name[ATH5K_LED_MAX_NAME_LEN + 1];
168 const struct pci_device_id *match; 170 const struct pci_device_id *match;
169 171
170 if (!ah->pdev) 172 if (!ah->pdev)
171 return 0; 173 return 0;
172 174
175#ifdef CONFIG_ATH5K_AHB
176 match = NULL;
177#else
173 match = pci_match_id(&ath5k_led_devices[0], pdev); 178 match = pci_match_id(&ath5k_led_devices[0], pdev);
179#endif
174 if (match) { 180 if (match) {
175 __set_bit(ATH_STAT_LEDSOFT, ah->status); 181 __set_bit(ATH_STAT_LEDSOFT, ah->status);
176 ah->led_pin = ATH_PIN(match->driver_data); 182 ah->led_pin = ATH_PIN(match->driver_data);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a7ac72639c52..cab05f31223f 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1138,8 +1138,8 @@ EXPORT_SYMBOL_GPL(pci_store_saved_state);
1138 * @dev: PCI device that we're dealing with 1138 * @dev: PCI device that we're dealing with
1139 * @state: Saved state returned from pci_store_saved_state() 1139 * @state: Saved state returned from pci_store_saved_state()
1140 */ 1140 */
1141static int pci_load_saved_state(struct pci_dev *dev, 1141int pci_load_saved_state(struct pci_dev *dev,
1142 struct pci_saved_state *state) 1142 struct pci_saved_state *state)
1143{ 1143{
1144 struct pci_cap_saved_data *cap; 1144 struct pci_cap_saved_data *cap;
1145 1145
@@ -1167,6 +1167,7 @@ static int pci_load_saved_state(struct pci_dev *dev,
1167 dev->state_saved = true; 1167 dev->state_saved = true;
1168 return 0; 1168 return 0;
1169} 1169}
1170EXPORT_SYMBOL_GPL(pci_load_saved_state);
1170 1171
1171/** 1172/**
1172 * pci_load_and_free_saved_state - Reload the save state pointed to by state, 1173 * pci_load_and_free_saved_state - Reload the save state pointed to by state,
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index b0ce7cdee0c2..910e90bf16c6 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -147,7 +147,6 @@ config TCIC
147config PCMCIA_ALCHEMY_DEVBOARD 147config PCMCIA_ALCHEMY_DEVBOARD
148 tristate "Alchemy Db/Pb1xxx PCMCIA socket services" 148 tristate "Alchemy Db/Pb1xxx PCMCIA socket services"
149 depends on MIPS_ALCHEMY && PCMCIA 149 depends on MIPS_ALCHEMY && PCMCIA
150 select 64BIT_PHYS_ADDR
151 help 150 help
152 Enable this driver of you want PCMCIA support on your Alchemy 151 Enable this driver of you want PCMCIA support on your Alchemy
153 Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300 152 Db1000, Db/Pb1100, Db/Pb1500, Db/Pb1550, Db/Pb1200, DB1300
@@ -158,7 +157,6 @@ config PCMCIA_ALCHEMY_DEVBOARD
158config PCMCIA_XXS1500 157config PCMCIA_XXS1500
159 tristate "MyCable XXS1500 PCMCIA socket support" 158 tristate "MyCable XXS1500 PCMCIA socket support"
160 depends on PCMCIA && MIPS_XXS1500 159 depends on PCMCIA && MIPS_XXS1500
161 select 64BIT_PHYS_ADDR
162 help 160 help
163 Support for the PCMCIA/CF socket interface on MyCable XXS1500 161 Support for the PCMCIA/CF socket interface on MyCable XXS1500
164 systems. 162 systems.
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index ff8a027a4afb..d2ab06048169 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -93,6 +93,7 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
93 for (i = 0; i < sinfo->nskt; i++) 93 for (i = 0; i < sinfo->nskt; i++)
94 soc_pcmcia_remove_one(&sinfo->skt[i]); 94 soc_pcmcia_remove_one(&sinfo->skt[i]);
95 95
96 clk_put(sinfo->clk);
96 kfree(sinfo); 97 kfree(sinfo);
97 return 0; 98 return 0;
98} 99}
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 65b02c3e14ce..7bae7e549d8b 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -145,6 +145,12 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
145 return -ENOMEM; 145 return -ENOMEM;
146 146
147 s->soc.nr = ops->first + i; 147 s->soc.nr = ops->first + i;
148 s->soc.clk = clk_get(&dev->dev, NULL);
149 if (IS_ERR(s->soc.clk)) {
150 ret = PTR_ERR(s->soc.clk);
151 kfree(s);
152 return ret;
153 }
148 soc_pcmcia_init_one(&s->soc, ops, &dev->dev); 154 soc_pcmcia_init_one(&s->soc, ops, &dev->dev);
149 s->dev = dev; 155 s->dev = dev;
150 if (s->soc.nr) { 156 if (s->soc.nr) {
@@ -220,6 +226,7 @@ static int pcmcia_remove(struct sa1111_dev *dev)
220 for (; s; s = next) { 226 for (; s; s = next) {
221 next = s->next; 227 next = s->next;
222 soc_pcmcia_remove_one(&s->soc); 228 soc_pcmcia_remove_one(&s->soc);
229 clk_put(s->soc.clk);
223 kfree(s); 230 kfree(s);
224 } 231 }
225 232
diff --git a/drivers/pcmcia/sa11xx_base.c b/drivers/pcmcia/sa11xx_base.c
index 54d3089d157b..cf6de2c2b329 100644
--- a/drivers/pcmcia/sa11xx_base.c
+++ b/drivers/pcmcia/sa11xx_base.c
@@ -135,14 +135,16 @@ sa1100_pcmcia_frequency_change(struct soc_pcmcia_socket *skt,
135static int 135static int
136sa1100_pcmcia_set_timing(struct soc_pcmcia_socket *skt) 136sa1100_pcmcia_set_timing(struct soc_pcmcia_socket *skt)
137{ 137{
138 return sa1100_pcmcia_set_mecr(skt, cpufreq_get(0)); 138 unsigned long clk = clk_get_rate(skt->clk);
139
140 return sa1100_pcmcia_set_mecr(skt, clk / 1000);
139} 141}
140 142
141static int 143static int
142sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf) 144sa1100_pcmcia_show_timing(struct soc_pcmcia_socket *skt, char *buf)
143{ 145{
144 struct soc_pcmcia_timing timing; 146 struct soc_pcmcia_timing timing;
145 unsigned int clock = cpufreq_get(0); 147 unsigned int clock = clk_get_rate(skt->clk);
146 unsigned long mecr = MECR; 148 unsigned long mecr = MECR;
147 char *p = buf; 149 char *p = buf;
148 150
@@ -218,6 +220,11 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
218 struct skt_dev_info *sinfo; 220 struct skt_dev_info *sinfo;
219 struct soc_pcmcia_socket *skt; 221 struct soc_pcmcia_socket *skt;
220 int i, ret = 0; 222 int i, ret = 0;
223 struct clk *clk;
224
225 clk = clk_get(dev, NULL);
226 if (IS_ERR(clk))
227 return PTR_ERR(clk);
221 228
222 sa11xx_drv_pcmcia_ops(ops); 229 sa11xx_drv_pcmcia_ops(ops);
223 230
@@ -226,12 +233,14 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
226 return -ENOMEM; 233 return -ENOMEM;
227 234
228 sinfo->nskt = nr; 235 sinfo->nskt = nr;
236 sinfo->clk = clk;
229 237
230 /* Initialize processor specific parameters */ 238 /* Initialize processor specific parameters */
231 for (i = 0; i < nr; i++) { 239 for (i = 0; i < nr; i++) {
232 skt = &sinfo->skt[i]; 240 skt = &sinfo->skt[i];
233 241
234 skt->nr = first + i; 242 skt->nr = first + i;
243 skt->clk = clk;
235 soc_pcmcia_init_one(skt, ops, dev); 244 soc_pcmcia_init_one(skt, ops, dev);
236 245
237 ret = sa11xx_drv_pcmcia_add_one(skt); 246 ret = sa11xx_drv_pcmcia_add_one(skt);
@@ -242,6 +251,7 @@ int sa11xx_drv_pcmcia_probe(struct device *dev, struct pcmcia_low_level *ops,
242 if (ret) { 251 if (ret) {
243 while (--i >= 0) 252 while (--i >= 0)
244 soc_pcmcia_remove_one(&sinfo->skt[i]); 253 soc_pcmcia_remove_one(&sinfo->skt[i]);
254 clk_put(clk);
245 kfree(sinfo); 255 kfree(sinfo);
246 } else { 256 } else {
247 dev_set_drvdata(dev, sinfo); 257 dev_set_drvdata(dev, sinfo);
diff --git a/drivers/pcmcia/soc_common.c b/drivers/pcmcia/soc_common.c
index a2bc6ee1702e..933f4657515b 100644
--- a/drivers/pcmcia/soc_common.c
+++ b/drivers/pcmcia/soc_common.c
@@ -120,6 +120,8 @@ static void __soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt,
120 120
121 if (skt->ops->hw_shutdown) 121 if (skt->ops->hw_shutdown)
122 skt->ops->hw_shutdown(skt); 122 skt->ops->hw_shutdown(skt);
123
124 clk_disable_unprepare(skt->clk);
123} 125}
124 126
125static void soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt) 127static void soc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
@@ -131,6 +133,8 @@ static int soc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
131{ 133{
132 int ret = 0, i; 134 int ret = 0, i;
133 135
136 clk_prepare_enable(skt->clk);
137
134 if (skt->ops->hw_init) { 138 if (skt->ops->hw_init) {
135 ret = skt->ops->hw_init(skt); 139 ret = skt->ops->hw_init(skt);
136 if (ret) 140 if (ret)
diff --git a/drivers/pinctrl/spear/pinctrl-plgpio.c b/drivers/pinctrl/spear/pinctrl-plgpio.c
index bddb79105d67..ce5f22c4151d 100644
--- a/drivers/pinctrl/spear/pinctrl-plgpio.c
+++ b/drivers/pinctrl/spear/pinctrl-plgpio.c
@@ -724,5 +724,5 @@ static int __init plgpio_init(void)
724subsys_initcall(plgpio_init); 724subsys_initcall(plgpio_init);
725 725
726MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>"); 726MODULE_AUTHOR("Viresh Kumar <viresh.kumar@linaro.org>");
727MODULE_DESCRIPTION("ST Microlectronics SPEAr PLGPIO driver"); 727MODULE_DESCRIPTION("STMicroelectronics SPEAr PLGPIO driver");
728MODULE_LICENSE("GPL"); 728MODULE_LICENSE("GPL");
diff --git a/drivers/platform/x86/intel_ips.c b/drivers/platform/x86/intel_ips.c
index c0242ed13d9e..ecd36e332c3c 100644
--- a/drivers/platform/x86/intel_ips.c
+++ b/drivers/platform/x86/intel_ips.c
@@ -593,7 +593,7 @@ static void ips_disable_gpu_turbo(struct ips_driver *ips)
593 return; 593 return;
594 594
595 if (!ips->gpu_turbo_disable()) 595 if (!ips->gpu_turbo_disable())
596 dev_err(&ips->dev->dev, "failed to disable graphis turbo\n"); 596 dev_err(&ips->dev->dev, "failed to disable graphics turbo\n");
597 else 597 else
598 ips->__gpu_turbo_on = false; 598 ips->__gpu_turbo_on = false;
599} 599}
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4511ddc1ac31..f15cddfeb897 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -987,6 +987,17 @@ config RTC_DRV_NUC900
987 If you say yes here you get support for the RTC subsystem of the 987 If you say yes here you get support for the RTC subsystem of the
988 NUC910/NUC920 used in embedded systems. 988 NUC910/NUC920 used in embedded systems.
989 989
990config RTC_DRV_OPAL
991 tristate "IBM OPAL RTC driver"
992 depends on PPC_POWERNV
993 default y
994 help
995 If you say yes here you get support for the PowerNV platform RTC
996 driver based on OPAL interfaces.
997
998 This driver can also be built as a module. If so, the module
999 will be called rtc-opal.
1000
990comment "on-CPU RTC drivers" 1001comment "on-CPU RTC drivers"
991 1002
992config RTC_DRV_DAVINCI 1003config RTC_DRV_DAVINCI
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index b188323c096a..c8ef3e1e6ccd 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -92,6 +92,7 @@ obj-$(CONFIG_RTC_DRV_MSM6242) += rtc-msm6242.o
92obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o 92obj-$(CONFIG_RTC_DRV_MPC5121) += rtc-mpc5121.o
93obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o 93obj-$(CONFIG_RTC_DRV_MV) += rtc-mv.o
94obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o 94obj-$(CONFIG_RTC_DRV_NUC900) += rtc-nuc900.o
95obj-$(CONFIG_RTC_DRV_OPAL) += rtc-opal.o
95obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o 96obj-$(CONFIG_RTC_DRV_OMAP) += rtc-omap.o
96obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o 97obj-$(CONFIG_RTC_DRV_PALMAS) += rtc-palmas.o
97obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o 98obj-$(CONFIG_RTC_DRV_PCAP) += rtc-pcap.o
diff --git a/drivers/rtc/rtc-opal.c b/drivers/rtc/rtc-opal.c
new file mode 100644
index 000000000000..95f652165fe9
--- /dev/null
+++ b/drivers/rtc/rtc-opal.c
@@ -0,0 +1,261 @@
1/*
2 * IBM OPAL RTC driver
3 * Copyright (C) 2014 IBM
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program.
17 */
18
19#define DRVNAME "rtc-opal"
20#define pr_fmt(fmt) DRVNAME ": " fmt
21
22#include <linux/module.h>
23#include <linux/err.h>
24#include <linux/rtc.h>
25#include <linux/delay.h>
26#include <linux/bcd.h>
27#include <linux/platform_device.h>
28#include <linux/of.h>
29#include <asm/opal.h>
30#include <asm/firmware.h>
31
32static void opal_to_tm(u32 y_m_d, u64 h_m_s_ms, struct rtc_time *tm)
33{
34 tm->tm_year = ((bcd2bin(y_m_d >> 24) * 100) +
35 bcd2bin((y_m_d >> 16) & 0xff)) - 1900;
36 tm->tm_mon = bcd2bin((y_m_d >> 8) & 0xff) - 1;
37 tm->tm_mday = bcd2bin(y_m_d & 0xff);
38 tm->tm_hour = bcd2bin((h_m_s_ms >> 56) & 0xff);
39 tm->tm_min = bcd2bin((h_m_s_ms >> 48) & 0xff);
40 tm->tm_sec = bcd2bin((h_m_s_ms >> 40) & 0xff);
41
42 GregorianDay(tm);
43}
44
45static void tm_to_opal(struct rtc_time *tm, u32 *y_m_d, u64 *h_m_s_ms)
46{
47 *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) / 100)) << 24;
48 *y_m_d |= ((u32)bin2bcd((tm->tm_year + 1900) % 100)) << 16;
49 *y_m_d |= ((u32)bin2bcd((tm->tm_mon + 1))) << 8;
50 *y_m_d |= ((u32)bin2bcd(tm->tm_mday));
51
52 *h_m_s_ms |= ((u64)bin2bcd(tm->tm_hour)) << 56;
53 *h_m_s_ms |= ((u64)bin2bcd(tm->tm_min)) << 48;
54 *h_m_s_ms |= ((u64)bin2bcd(tm->tm_sec)) << 40;
55}
56
57static int opal_get_rtc_time(struct device *dev, struct rtc_time *tm)
58{
59 long rc = OPAL_BUSY;
60 u32 y_m_d;
61 u64 h_m_s_ms;
62 __be32 __y_m_d;
63 __be64 __h_m_s_ms;
64
65 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
66 rc = opal_rtc_read(&__y_m_d, &__h_m_s_ms);
67 if (rc == OPAL_BUSY_EVENT)
68 opal_poll_events(NULL);
69 else
70 msleep(10);
71 }
72
73 if (rc != OPAL_SUCCESS)
74 return -EIO;
75
76 y_m_d = be32_to_cpu(__y_m_d);
77 h_m_s_ms = be64_to_cpu(__h_m_s_ms);
78 opal_to_tm(y_m_d, h_m_s_ms, tm);
79
80 return 0;
81}
82
83static int opal_set_rtc_time(struct device *dev, struct rtc_time *tm)
84{
85 long rc = OPAL_BUSY;
86 u32 y_m_d = 0;
87 u64 h_m_s_ms = 0;
88
89 tm_to_opal(tm, &y_m_d, &h_m_s_ms);
90 while (rc == OPAL_BUSY || rc == OPAL_BUSY_EVENT) {
91 rc = opal_rtc_write(y_m_d, h_m_s_ms);
92 if (rc == OPAL_BUSY_EVENT)
93 opal_poll_events(NULL);
94 else
95 msleep(10);
96 }
97
98 return rc == OPAL_SUCCESS ? 0 : -EIO;
99}
100
101/*
102 * TPO Timed Power-On
103 *
104 * TPO get/set OPAL calls care about the hour and min and to make it consistent
105 * with the rtc utility time conversion functions, we use the 'u64' to store
106 * its value and perform bit shift by 32 before use..
107 */
108static int opal_get_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
109{
110 __be32 __y_m_d, __h_m;
111 struct opal_msg msg;
112 int rc, token;
113 u64 h_m_s_ms;
114 u32 y_m_d;
115
116 token = opal_async_get_token_interruptible();
117 if (token < 0) {
118 if (token != -ERESTARTSYS)
119 pr_err("Failed to get the async token\n");
120
121 return token;
122 }
123
124 rc = opal_tpo_read(token, &__y_m_d, &__h_m);
125 if (rc != OPAL_ASYNC_COMPLETION) {
126 rc = -EIO;
127 goto exit;
128 }
129
130 rc = opal_async_wait_response(token, &msg);
131 if (rc) {
132 rc = -EIO;
133 goto exit;
134 }
135
136 rc = be64_to_cpu(msg.params[1]);
137 if (rc != OPAL_SUCCESS) {
138 rc = -EIO;
139 goto exit;
140 }
141
142 y_m_d = be32_to_cpu(__y_m_d);
143 h_m_s_ms = ((u64)be32_to_cpu(__h_m) << 32);
144 opal_to_tm(y_m_d, h_m_s_ms, &alarm->time);
145
146exit:
147 opal_async_release_token(token);
148 return rc;
149}
150
151/* Set Timed Power-On */
152static int opal_set_tpo_time(struct device *dev, struct rtc_wkalrm *alarm)
153{
154 u64 h_m_s_ms = 0, token;
155 struct opal_msg msg;
156 u32 y_m_d = 0;
157 int rc;
158
159 tm_to_opal(&alarm->time, &y_m_d, &h_m_s_ms);
160
161 token = opal_async_get_token_interruptible();
162 if (token < 0) {
163 if (token != -ERESTARTSYS)
164 pr_err("Failed to get the async token\n");
165
166 return token;
167 }
168
169 /* TPO, we care about hour and minute */
170 rc = opal_tpo_write(token, y_m_d,
171 (u32)((h_m_s_ms >> 32) & 0xffff0000));
172 if (rc != OPAL_ASYNC_COMPLETION) {
173 rc = -EIO;
174 goto exit;
175 }
176
177 rc = opal_async_wait_response(token, &msg);
178 if (rc) {
179 rc = -EIO;
180 goto exit;
181 }
182
183 rc = be64_to_cpu(msg.params[1]);
184 if (rc != OPAL_SUCCESS)
185 rc = -EIO;
186
187exit:
188 opal_async_release_token(token);
189 return rc;
190}
191
192static const struct rtc_class_ops opal_rtc_ops = {
193 .read_time = opal_get_rtc_time,
194 .set_time = opal_set_rtc_time,
195 .read_alarm = opal_get_tpo_time,
196 .set_alarm = opal_set_tpo_time,
197};
198
199static int opal_rtc_probe(struct platform_device *pdev)
200{
201 struct rtc_device *rtc;
202
203 if (pdev->dev.of_node && of_get_property(pdev->dev.of_node, "has-tpo",
204 NULL))
205 device_set_wakeup_capable(&pdev->dev, true);
206
207 rtc = devm_rtc_device_register(&pdev->dev, DRVNAME, &opal_rtc_ops,
208 THIS_MODULE);
209 if (IS_ERR(rtc))
210 return PTR_ERR(rtc);
211
212 rtc->uie_unsupported = 1;
213
214 return 0;
215}
216
217static const struct of_device_id opal_rtc_match[] = {
218 {
219 .compatible = "ibm,opal-rtc",
220 },
221 { }
222};
223MODULE_DEVICE_TABLE(of, opal_rtc_match);
224
225static const struct platform_device_id opal_rtc_driver_ids[] = {
226 {
227 .name = "opal-rtc",
228 },
229 { }
230};
231MODULE_DEVICE_TABLE(platform, opal_rtc_driver_ids);
232
233static struct platform_driver opal_rtc_driver = {
234 .probe = opal_rtc_probe,
235 .id_table = opal_rtc_driver_ids,
236 .driver = {
237 .name = DRVNAME,
238 .owner = THIS_MODULE,
239 .of_match_table = opal_rtc_match,
240 },
241};
242
243static int __init opal_rtc_init(void)
244{
245 if (!firmware_has_feature(FW_FEATURE_OPAL))
246 return -ENODEV;
247
248 return platform_driver_register(&opal_rtc_driver);
249}
250
251static void __exit opal_rtc_exit(void)
252{
253 platform_driver_unregister(&opal_rtc_driver);
254}
255
256MODULE_AUTHOR("Neelesh Gupta <neelegup@linux.vnet.ibm.com>");
257MODULE_DESCRIPTION("IBM OPAL RTC driver");
258MODULE_LICENSE("GPL");
259
260module_init(opal_rtc_init);
261module_exit(opal_rtc_exit);
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 329db997ee66..4abf11965484 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1377,6 +1377,20 @@ int dasd_term_IO(struct dasd_ccw_req *cqr)
1377 "I/O error, retry"); 1377 "I/O error, retry");
1378 break; 1378 break;
1379 case -EINVAL: 1379 case -EINVAL:
1380 /*
1381 * device not valid so no I/O could be running
1382 * handle CQR as termination successful
1383 */
1384 cqr->status = DASD_CQR_CLEARED;
1385 cqr->stopclk = get_tod_clock();
1386 cqr->starttime = 0;
1387 /* no retries for invalid devices */
1388 cqr->retries = -1;
1389 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1390 "EINVAL, handle as terminated");
1391 /* fake rc to success */
1392 rc = 0;
1393 break;
1380 case -EBUSY: 1394 case -EBUSY:
1381 DBF_DEV_EVENT(DBF_ERR, device, "%s", 1395 DBF_DEV_EVENT(DBF_ERR, device, "%s",
1382 "device busy, retry later"); 1396 "device busy, retry later");
@@ -1683,11 +1697,8 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
1683 if (cqr->status == DASD_CQR_CLEAR_PENDING && 1697 if (cqr->status == DASD_CQR_CLEAR_PENDING &&
1684 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) { 1698 scsw_fctl(&irb->scsw) & SCSW_FCTL_CLEAR_FUNC) {
1685 cqr->status = DASD_CQR_CLEARED; 1699 cqr->status = DASD_CQR_CLEARED;
1686 if (cqr->callback_data == DASD_SLEEPON_START_TAG)
1687 cqr->callback_data = DASD_SLEEPON_END_TAG;
1688 dasd_device_clear_timer(device); 1700 dasd_device_clear_timer(device);
1689 wake_up(&dasd_flush_wq); 1701 wake_up(&dasd_flush_wq);
1690 wake_up(&generic_waitq);
1691 dasd_schedule_device_bh(device); 1702 dasd_schedule_device_bh(device);
1692 return; 1703 return;
1693 } 1704 }
@@ -2326,21 +2337,11 @@ retry:
2326 return -EAGAIN; 2337 return -EAGAIN;
2327 2338
2328 /* normal recovery for basedev IO */ 2339 /* normal recovery for basedev IO */
2329 if (__dasd_sleep_on_erp(cqr)) { 2340 if (__dasd_sleep_on_erp(cqr))
2341 /* handle erp first */
2330 goto retry; 2342 goto retry;
2331 /* remember that ERP was needed */
2332 rc = 1;
2333 /* skip processing for active cqr */
2334 if (cqr->status != DASD_CQR_TERMINATED &&
2335 cqr->status != DASD_CQR_NEED_ERP)
2336 break;
2337 }
2338 } 2343 }
2339 2344
2340 /* start ERP requests in upper loop */
2341 if (rc)
2342 goto retry;
2343
2344 return 0; 2345 return 0;
2345} 2346}
2346 2347
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index f224d59c4b6b..90f39f79f5d7 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -99,15 +99,37 @@ void dasd_gendisk_free(struct dasd_block *block)
99int dasd_scan_partitions(struct dasd_block *block) 99int dasd_scan_partitions(struct dasd_block *block)
100{ 100{
101 struct block_device *bdev; 101 struct block_device *bdev;
102 int retry, rc;
102 103
104 retry = 5;
103 bdev = bdget_disk(block->gdp, 0); 105 bdev = bdget_disk(block->gdp, 0);
104 if (!bdev || blkdev_get(bdev, FMODE_READ, NULL) < 0) 106 if (!bdev) {
107 DBF_DEV_EVENT(DBF_ERR, block->base, "%s",
108 "scan partitions error, bdget returned NULL");
105 return -ENODEV; 109 return -ENODEV;
110 }
111
112 rc = blkdev_get(bdev, FMODE_READ, NULL);
113 if (rc < 0) {
114 DBF_DEV_EVENT(DBF_ERR, block->base,
115 "scan partitions error, blkdev_get returned %d",
116 rc);
117 return -ENODEV;
118 }
106 /* 119 /*
107 * See fs/partition/check.c:register_disk,rescan_partitions 120 * See fs/partition/check.c:register_disk,rescan_partitions
108 * Can't call rescan_partitions directly. Use ioctl. 121 * Can't call rescan_partitions directly. Use ioctl.
109 */ 122 */
110 ioctl_by_bdev(bdev, BLKRRPART, 0); 123 rc = ioctl_by_bdev(bdev, BLKRRPART, 0);
124 while (rc == -EBUSY && retry > 0) {
125 schedule();
126 rc = ioctl_by_bdev(bdev, BLKRRPART, 0);
127 retry--;
128 DBF_DEV_EVENT(DBF_ERR, block->base,
129 "scan partitions error, retry %d rc %d",
130 retry, rc);
131 }
132
111 /* 133 /*
112 * Since the matching blkdev_put call to the blkdev_get in 134 * Since the matching blkdev_put call to the blkdev_get in
113 * this function is not called before dasd_destroy_partitions 135 * this function is not called before dasd_destroy_partitions
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index 56046ab39629..75d9896deccb 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -10,6 +10,7 @@
10 10
11#include <linux/interrupt.h> 11#include <linux/interrupt.h>
12#include <linux/spinlock.h> 12#include <linux/spinlock.h>
13#include <linux/mempool.h>
13#include <linux/module.h> 14#include <linux/module.h>
14#include <linux/blkdev.h> 15#include <linux/blkdev.h>
15#include <linux/genhd.h> 16#include <linux/genhd.h>
@@ -20,13 +21,18 @@
20 21
21debug_info_t *scm_debug; 22debug_info_t *scm_debug;
22static int scm_major; 23static int scm_major;
24static mempool_t *aidaw_pool;
23static DEFINE_SPINLOCK(list_lock); 25static DEFINE_SPINLOCK(list_lock);
24static LIST_HEAD(inactive_requests); 26static LIST_HEAD(inactive_requests);
25static unsigned int nr_requests = 64; 27static unsigned int nr_requests = 64;
28static unsigned int nr_requests_per_io = 8;
26static atomic_t nr_devices = ATOMIC_INIT(0); 29static atomic_t nr_devices = ATOMIC_INIT(0);
27module_param(nr_requests, uint, S_IRUGO); 30module_param(nr_requests, uint, S_IRUGO);
28MODULE_PARM_DESC(nr_requests, "Number of parallel requests."); 31MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
29 32
33module_param(nr_requests_per_io, uint, S_IRUGO);
34MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
35
30MODULE_DESCRIPTION("Block driver for s390 storage class memory."); 36MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
31MODULE_LICENSE("GPL"); 37MODULE_LICENSE("GPL");
32MODULE_ALIAS("scm:scmdev*"); 38MODULE_ALIAS("scm:scmdev*");
@@ -36,8 +42,8 @@ static void __scm_free_rq(struct scm_request *scmrq)
36 struct aob_rq_header *aobrq = to_aobrq(scmrq); 42 struct aob_rq_header *aobrq = to_aobrq(scmrq);
37 43
38 free_page((unsigned long) scmrq->aob); 44 free_page((unsigned long) scmrq->aob);
39 free_page((unsigned long) scmrq->aidaw);
40 __scm_free_rq_cluster(scmrq); 45 __scm_free_rq_cluster(scmrq);
46 kfree(scmrq->request);
41 kfree(aobrq); 47 kfree(aobrq);
42} 48}
43 49
@@ -53,6 +59,8 @@ static void scm_free_rqs(void)
53 __scm_free_rq(scmrq); 59 __scm_free_rq(scmrq);
54 } 60 }
55 spin_unlock_irq(&list_lock); 61 spin_unlock_irq(&list_lock);
62
63 mempool_destroy(aidaw_pool);
56} 64}
57 65
58static int __scm_alloc_rq(void) 66static int __scm_alloc_rq(void)
@@ -65,17 +73,17 @@ static int __scm_alloc_rq(void)
65 return -ENOMEM; 73 return -ENOMEM;
66 74
67 scmrq = (void *) aobrq->data; 75 scmrq = (void *) aobrq->data;
68 scmrq->aidaw = (void *) get_zeroed_page(GFP_DMA);
69 scmrq->aob = (void *) get_zeroed_page(GFP_DMA); 76 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
70 if (!scmrq->aob || !scmrq->aidaw) { 77 if (!scmrq->aob)
71 __scm_free_rq(scmrq); 78 goto free;
72 return -ENOMEM;
73 }
74 79
75 if (__scm_alloc_rq_cluster(scmrq)) { 80 scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
76 __scm_free_rq(scmrq); 81 GFP_KERNEL);
77 return -ENOMEM; 82 if (!scmrq->request)
78 } 83 goto free;
84
85 if (__scm_alloc_rq_cluster(scmrq))
86 goto free;
79 87
80 INIT_LIST_HEAD(&scmrq->list); 88 INIT_LIST_HEAD(&scmrq->list);
81 spin_lock_irq(&list_lock); 89 spin_lock_irq(&list_lock);
@@ -83,12 +91,19 @@ static int __scm_alloc_rq(void)
83 spin_unlock_irq(&list_lock); 91 spin_unlock_irq(&list_lock);
84 92
85 return 0; 93 return 0;
94free:
95 __scm_free_rq(scmrq);
96 return -ENOMEM;
86} 97}
87 98
88static int scm_alloc_rqs(unsigned int nrqs) 99static int scm_alloc_rqs(unsigned int nrqs)
89{ 100{
90 int ret = 0; 101 int ret = 0;
91 102
103 aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
104 if (!aidaw_pool)
105 return -ENOMEM;
106
92 while (nrqs-- && !ret) 107 while (nrqs-- && !ret)
93 ret = __scm_alloc_rq(); 108 ret = __scm_alloc_rq();
94 109
@@ -112,6 +127,18 @@ out:
112static void scm_request_done(struct scm_request *scmrq) 127static void scm_request_done(struct scm_request *scmrq)
113{ 128{
114 unsigned long flags; 129 unsigned long flags;
130 struct msb *msb;
131 u64 aidaw;
132 int i;
133
134 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
135 msb = &scmrq->aob->msb[i];
136 aidaw = msb->data_addr;
137
138 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
139 IS_ALIGNED(aidaw, PAGE_SIZE))
140 mempool_free(virt_to_page(aidaw), aidaw_pool);
141 }
115 142
116 spin_lock_irqsave(&list_lock, flags); 143 spin_lock_irqsave(&list_lock, flags);
117 list_add(&scmrq->list, &inactive_requests); 144 list_add(&scmrq->list, &inactive_requests);
@@ -123,48 +150,90 @@ static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
123 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; 150 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
124} 151}
125 152
126static void scm_request_prepare(struct scm_request *scmrq) 153static inline struct aidaw *scm_aidaw_alloc(void)
154{
155 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
156
157 return page ? page_address(page) : NULL;
158}
159
160static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
161{
162 unsigned long _aidaw = (unsigned long) aidaw;
163 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
164
165 return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
166}
167
168struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
169{
170 struct aidaw *aidaw;
171
172 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
173 return scmrq->next_aidaw;
174
175 aidaw = scm_aidaw_alloc();
176 if (aidaw)
177 memset(aidaw, 0, PAGE_SIZE);
178 return aidaw;
179}
180
181static int scm_request_prepare(struct scm_request *scmrq)
127{ 182{
128 struct scm_blk_dev *bdev = scmrq->bdev; 183 struct scm_blk_dev *bdev = scmrq->bdev;
129 struct scm_device *scmdev = bdev->gendisk->private_data; 184 struct scm_device *scmdev = bdev->gendisk->private_data;
130 struct aidaw *aidaw = scmrq->aidaw; 185 int pos = scmrq->aob->request.msb_count;
131 struct msb *msb = &scmrq->aob->msb[0]; 186 struct msb *msb = &scmrq->aob->msb[pos];
187 struct request *req = scmrq->request[pos];
132 struct req_iterator iter; 188 struct req_iterator iter;
189 struct aidaw *aidaw;
133 struct bio_vec bv; 190 struct bio_vec bv;
134 191
192 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
193 if (!aidaw)
194 return -ENOMEM;
195
135 msb->bs = MSB_BS_4K; 196 msb->bs = MSB_BS_4K;
136 scmrq->aob->request.msb_count = 1; 197 scmrq->aob->request.msb_count++;
137 msb->scm_addr = scmdev->address + 198 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
138 ((u64) blk_rq_pos(scmrq->request) << 9); 199 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
139 msb->oc = (rq_data_dir(scmrq->request) == READ) ?
140 MSB_OC_READ : MSB_OC_WRITE;
141 msb->flags |= MSB_FLAG_IDA; 200 msb->flags |= MSB_FLAG_IDA;
142 msb->data_addr = (u64) aidaw; 201 msb->data_addr = (u64) aidaw;
143 202
144 rq_for_each_segment(bv, scmrq->request, iter) { 203 rq_for_each_segment(bv, req, iter) {
145 WARN_ON(bv.bv_offset); 204 WARN_ON(bv.bv_offset);
146 msb->blk_count += bv.bv_len >> 12; 205 msb->blk_count += bv.bv_len >> 12;
147 aidaw->data_addr = (u64) page_address(bv.bv_page); 206 aidaw->data_addr = (u64) page_address(bv.bv_page);
148 aidaw++; 207 aidaw++;
149 } 208 }
209
210 scmrq->next_aidaw = aidaw;
211 return 0;
212}
213
214static inline void scm_request_set(struct scm_request *scmrq,
215 struct request *req)
216{
217 scmrq->request[scmrq->aob->request.msb_count] = req;
150} 218}
151 219
152static inline void scm_request_init(struct scm_blk_dev *bdev, 220static inline void scm_request_init(struct scm_blk_dev *bdev,
153 struct scm_request *scmrq, 221 struct scm_request *scmrq)
154 struct request *req)
155{ 222{
156 struct aob_rq_header *aobrq = to_aobrq(scmrq); 223 struct aob_rq_header *aobrq = to_aobrq(scmrq);
157 struct aob *aob = scmrq->aob; 224 struct aob *aob = scmrq->aob;
158 225
226 memset(scmrq->request, 0,
227 nr_requests_per_io * sizeof(scmrq->request[0]));
159 memset(aob, 0, sizeof(*aob)); 228 memset(aob, 0, sizeof(*aob));
160 memset(scmrq->aidaw, 0, PAGE_SIZE);
161 aobrq->scmdev = bdev->scmdev; 229 aobrq->scmdev = bdev->scmdev;
162 aob->request.cmd_code = ARQB_CMD_MOVE; 230 aob->request.cmd_code = ARQB_CMD_MOVE;
163 aob->request.data = (u64) aobrq; 231 aob->request.data = (u64) aobrq;
164 scmrq->request = req;
165 scmrq->bdev = bdev; 232 scmrq->bdev = bdev;
166 scmrq->retries = 4; 233 scmrq->retries = 4;
167 scmrq->error = 0; 234 scmrq->error = 0;
235 /* We don't use all msbs - place aidaws at the end of the aob page. */
236 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
168 scm_request_cluster_init(scmrq); 237 scm_request_cluster_init(scmrq);
169} 238}
170 239
@@ -180,9 +249,12 @@ static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
180void scm_request_requeue(struct scm_request *scmrq) 249void scm_request_requeue(struct scm_request *scmrq)
181{ 250{
182 struct scm_blk_dev *bdev = scmrq->bdev; 251 struct scm_blk_dev *bdev = scmrq->bdev;
252 int i;
183 253
184 scm_release_cluster(scmrq); 254 scm_release_cluster(scmrq);
185 blk_requeue_request(bdev->rq, scmrq->request); 255 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
256 blk_requeue_request(bdev->rq, scmrq->request[i]);
257
186 atomic_dec(&bdev->queued_reqs); 258 atomic_dec(&bdev->queued_reqs);
187 scm_request_done(scmrq); 259 scm_request_done(scmrq);
188 scm_ensure_queue_restart(bdev); 260 scm_ensure_queue_restart(bdev);
@@ -191,20 +263,41 @@ void scm_request_requeue(struct scm_request *scmrq)
191void scm_request_finish(struct scm_request *scmrq) 263void scm_request_finish(struct scm_request *scmrq)
192{ 264{
193 struct scm_blk_dev *bdev = scmrq->bdev; 265 struct scm_blk_dev *bdev = scmrq->bdev;
266 int i;
194 267
195 scm_release_cluster(scmrq); 268 scm_release_cluster(scmrq);
196 blk_end_request_all(scmrq->request, scmrq->error); 269 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
270 blk_end_request_all(scmrq->request[i], scmrq->error);
271
197 atomic_dec(&bdev->queued_reqs); 272 atomic_dec(&bdev->queued_reqs);
198 scm_request_done(scmrq); 273 scm_request_done(scmrq);
199} 274}
200 275
276static int scm_request_start(struct scm_request *scmrq)
277{
278 struct scm_blk_dev *bdev = scmrq->bdev;
279 int ret;
280
281 atomic_inc(&bdev->queued_reqs);
282 if (!scmrq->aob->request.msb_count) {
283 scm_request_requeue(scmrq);
284 return -EINVAL;
285 }
286
287 ret = eadm_start_aob(scmrq->aob);
288 if (ret) {
289 SCM_LOG(5, "no subchannel");
290 scm_request_requeue(scmrq);
291 }
292 return ret;
293}
294
201static void scm_blk_request(struct request_queue *rq) 295static void scm_blk_request(struct request_queue *rq)
202{ 296{
203 struct scm_device *scmdev = rq->queuedata; 297 struct scm_device *scmdev = rq->queuedata;
204 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); 298 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
205 struct scm_request *scmrq; 299 struct scm_request *scmrq = NULL;
206 struct request *req; 300 struct request *req;
207 int ret;
208 301
209 while ((req = blk_peek_request(rq))) { 302 while ((req = blk_peek_request(rq))) {
210 if (req->cmd_type != REQ_TYPE_FS) { 303 if (req->cmd_type != REQ_TYPE_FS) {
@@ -214,39 +307,64 @@ static void scm_blk_request(struct request_queue *rq)
214 continue; 307 continue;
215 } 308 }
216 309
217 if (!scm_permit_request(bdev, req)) { 310 if (!scm_permit_request(bdev, req))
218 scm_ensure_queue_restart(bdev); 311 goto out;
219 return; 312
220 }
221 scmrq = scm_request_fetch();
222 if (!scmrq) { 313 if (!scmrq) {
223 SCM_LOG(5, "no request"); 314 scmrq = scm_request_fetch();
224 scm_ensure_queue_restart(bdev); 315 if (!scmrq) {
225 return; 316 SCM_LOG(5, "no request");
317 goto out;
318 }
319 scm_request_init(bdev, scmrq);
226 } 320 }
227 scm_request_init(bdev, scmrq, req); 321 scm_request_set(scmrq, req);
322
228 if (!scm_reserve_cluster(scmrq)) { 323 if (!scm_reserve_cluster(scmrq)) {
229 SCM_LOG(5, "cluster busy"); 324 SCM_LOG(5, "cluster busy");
325 scm_request_set(scmrq, NULL);
326 if (scmrq->aob->request.msb_count)
327 goto out;
328
230 scm_request_done(scmrq); 329 scm_request_done(scmrq);
231 return; 330 return;
232 } 331 }
332
233 if (scm_need_cluster_request(scmrq)) { 333 if (scm_need_cluster_request(scmrq)) {
234 atomic_inc(&bdev->queued_reqs); 334 if (scmrq->aob->request.msb_count) {
235 blk_start_request(req); 335 /* Start cluster requests separately. */
236 scm_initiate_cluster_request(scmrq); 336 scm_request_set(scmrq, NULL);
237 return; 337 if (scm_request_start(scmrq))
338 return;
339 } else {
340 atomic_inc(&bdev->queued_reqs);
341 blk_start_request(req);
342 scm_initiate_cluster_request(scmrq);
343 }
344 scmrq = NULL;
345 continue;
346 }
347
348 if (scm_request_prepare(scmrq)) {
349 SCM_LOG(5, "aidaw alloc failed");
350 scm_request_set(scmrq, NULL);
351 goto out;
238 } 352 }
239 scm_request_prepare(scmrq);
240 atomic_inc(&bdev->queued_reqs);
241 blk_start_request(req); 353 blk_start_request(req);
242 354
243 ret = eadm_start_aob(scmrq->aob); 355 if (scmrq->aob->request.msb_count < nr_requests_per_io)
244 if (ret) { 356 continue;
245 SCM_LOG(5, "no subchannel"); 357
246 scm_request_requeue(scmrq); 358 if (scm_request_start(scmrq))
247 return; 359 return;
248 } 360
361 scmrq = NULL;
249 } 362 }
363out:
364 if (scmrq)
365 scm_request_start(scmrq);
366 else
367 scm_ensure_queue_restart(bdev);
250} 368}
251 369
252static void __scmrq_log_error(struct scm_request *scmrq) 370static void __scmrq_log_error(struct scm_request *scmrq)
@@ -443,11 +561,19 @@ void scm_blk_set_available(struct scm_blk_dev *bdev)
443 spin_unlock_irqrestore(&bdev->lock, flags); 561 spin_unlock_irqrestore(&bdev->lock, flags);
444} 562}
445 563
564static bool __init scm_blk_params_valid(void)
565{
566 if (!nr_requests_per_io || nr_requests_per_io > 64)
567 return false;
568
569 return scm_cluster_size_valid();
570}
571
446static int __init scm_blk_init(void) 572static int __init scm_blk_init(void)
447{ 573{
448 int ret = -EINVAL; 574 int ret = -EINVAL;
449 575
450 if (!scm_cluster_size_valid()) 576 if (!scm_blk_params_valid())
451 goto out; 577 goto out;
452 578
453 ret = register_blkdev(0, "scm"); 579 ret = register_blkdev(0, "scm");
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h
index e59331e6c2e5..09218cdc5129 100644
--- a/drivers/s390/block/scm_blk.h
+++ b/drivers/s390/block/scm_blk.h
@@ -30,8 +30,8 @@ struct scm_blk_dev {
30 30
31struct scm_request { 31struct scm_request {
32 struct scm_blk_dev *bdev; 32 struct scm_blk_dev *bdev;
33 struct request *request; 33 struct aidaw *next_aidaw;
34 struct aidaw *aidaw; 34 struct request **request;
35 struct aob *aob; 35 struct aob *aob;
36 struct list_head list; 36 struct list_head list;
37 u8 retries; 37 u8 retries;
@@ -55,6 +55,8 @@ void scm_blk_irq(struct scm_device *, void *, int);
55void scm_request_finish(struct scm_request *); 55void scm_request_finish(struct scm_request *);
56void scm_request_requeue(struct scm_request *); 56void scm_request_requeue(struct scm_request *);
57 57
58struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes);
59
58int scm_drv_init(void); 60int scm_drv_init(void);
59void scm_drv_cleanup(void); 61void scm_drv_cleanup(void);
60 62
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 9aae909d47a5..09db45296eed 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -57,39 +57,52 @@ void scm_request_cluster_init(struct scm_request *scmrq)
57 scmrq->cluster.state = CLUSTER_NONE; 57 scmrq->cluster.state = CLUSTER_NONE;
58} 58}
59 59
60static bool clusters_intersect(struct scm_request *A, struct scm_request *B) 60static bool clusters_intersect(struct request *A, struct request *B)
61{ 61{
62 unsigned long firstA, lastA, firstB, lastB; 62 unsigned long firstA, lastA, firstB, lastB;
63 63
64 firstA = ((u64) blk_rq_pos(A->request) << 9) / CLUSTER_SIZE; 64 firstA = ((u64) blk_rq_pos(A) << 9) / CLUSTER_SIZE;
65 lastA = (((u64) blk_rq_pos(A->request) << 9) + 65 lastA = (((u64) blk_rq_pos(A) << 9) +
66 blk_rq_bytes(A->request) - 1) / CLUSTER_SIZE; 66 blk_rq_bytes(A) - 1) / CLUSTER_SIZE;
67 67
68 firstB = ((u64) blk_rq_pos(B->request) << 9) / CLUSTER_SIZE; 68 firstB = ((u64) blk_rq_pos(B) << 9) / CLUSTER_SIZE;
69 lastB = (((u64) blk_rq_pos(B->request) << 9) + 69 lastB = (((u64) blk_rq_pos(B) << 9) +
70 blk_rq_bytes(B->request) - 1) / CLUSTER_SIZE; 70 blk_rq_bytes(B) - 1) / CLUSTER_SIZE;
71 71
72 return (firstB <= lastA && firstA <= lastB); 72 return (firstB <= lastA && firstA <= lastB);
73} 73}
74 74
75bool scm_reserve_cluster(struct scm_request *scmrq) 75bool scm_reserve_cluster(struct scm_request *scmrq)
76{ 76{
77 struct request *req = scmrq->request[scmrq->aob->request.msb_count];
77 struct scm_blk_dev *bdev = scmrq->bdev; 78 struct scm_blk_dev *bdev = scmrq->bdev;
78 struct scm_request *iter; 79 struct scm_request *iter;
80 int pos, add = 1;
79 81
80 if (write_cluster_size == 0) 82 if (write_cluster_size == 0)
81 return true; 83 return true;
82 84
83 spin_lock(&bdev->lock); 85 spin_lock(&bdev->lock);
84 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) { 86 list_for_each_entry(iter, &bdev->cluster_list, cluster.list) {
85 if (clusters_intersect(scmrq, iter) && 87 if (iter == scmrq) {
86 (rq_data_dir(scmrq->request) == WRITE || 88 /*
87 rq_data_dir(iter->request) == WRITE)) { 89 * We don't have to use clusters_intersect here, since
88 spin_unlock(&bdev->lock); 90 * cluster requests are always started separately.
89 return false; 91 */
92 add = 0;
93 continue;
94 }
95 for (pos = 0; pos <= iter->aob->request.msb_count; pos++) {
96 if (clusters_intersect(req, iter->request[pos]) &&
97 (rq_data_dir(req) == WRITE ||
98 rq_data_dir(iter->request[pos]) == WRITE)) {
99 spin_unlock(&bdev->lock);
100 return false;
101 }
90 } 102 }
91 } 103 }
92 list_add(&scmrq->cluster.list, &bdev->cluster_list); 104 if (add)
105 list_add(&scmrq->cluster.list, &bdev->cluster_list);
93 spin_unlock(&bdev->lock); 106 spin_unlock(&bdev->lock);
94 107
95 return true; 108 return true;
@@ -114,14 +127,14 @@ void scm_blk_dev_cluster_setup(struct scm_blk_dev *bdev)
114 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE); 127 blk_queue_io_opt(bdev->rq, CLUSTER_SIZE);
115} 128}
116 129
117static void scm_prepare_cluster_request(struct scm_request *scmrq) 130static int scm_prepare_cluster_request(struct scm_request *scmrq)
118{ 131{
119 struct scm_blk_dev *bdev = scmrq->bdev; 132 struct scm_blk_dev *bdev = scmrq->bdev;
120 struct scm_device *scmdev = bdev->gendisk->private_data; 133 struct scm_device *scmdev = bdev->gendisk->private_data;
121 struct request *req = scmrq->request; 134 struct request *req = scmrq->request[0];
122 struct aidaw *aidaw = scmrq->aidaw;
123 struct msb *msb = &scmrq->aob->msb[0]; 135 struct msb *msb = &scmrq->aob->msb[0];
124 struct req_iterator iter; 136 struct req_iterator iter;
137 struct aidaw *aidaw;
125 struct bio_vec bv; 138 struct bio_vec bv;
126 int i = 0; 139 int i = 0;
127 u64 addr; 140 u64 addr;
@@ -131,11 +144,9 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
131 scmrq->cluster.state = CLUSTER_READ; 144 scmrq->cluster.state = CLUSTER_READ;
132 /* fall through */ 145 /* fall through */
133 case CLUSTER_READ: 146 case CLUSTER_READ:
134 scmrq->aob->request.msb_count = 1;
135 msb->bs = MSB_BS_4K; 147 msb->bs = MSB_BS_4K;
136 msb->oc = MSB_OC_READ; 148 msb->oc = MSB_OC_READ;
137 msb->flags = MSB_FLAG_IDA; 149 msb->flags = MSB_FLAG_IDA;
138 msb->data_addr = (u64) aidaw;
139 msb->blk_count = write_cluster_size; 150 msb->blk_count = write_cluster_size;
140 151
141 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9); 152 addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
@@ -146,6 +157,12 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
146 CLUSTER_SIZE)) 157 CLUSTER_SIZE))
147 msb->blk_count = 2 * write_cluster_size; 158 msb->blk_count = 2 * write_cluster_size;
148 159
160 aidaw = scm_aidaw_fetch(scmrq, msb->blk_count * PAGE_SIZE);
161 if (!aidaw)
162 return -ENOMEM;
163
164 scmrq->aob->request.msb_count = 1;
165 msb->data_addr = (u64) aidaw;
149 for (i = 0; i < msb->blk_count; i++) { 166 for (i = 0; i < msb->blk_count; i++) {
150 aidaw->data_addr = (u64) scmrq->cluster.buf[i]; 167 aidaw->data_addr = (u64) scmrq->cluster.buf[i];
151 aidaw++; 168 aidaw++;
@@ -153,6 +170,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
153 170
154 break; 171 break;
155 case CLUSTER_WRITE: 172 case CLUSTER_WRITE:
173 aidaw = (void *) msb->data_addr;
156 msb->oc = MSB_OC_WRITE; 174 msb->oc = MSB_OC_WRITE;
157 175
158 for (addr = msb->scm_addr; 176 for (addr = msb->scm_addr;
@@ -173,22 +191,29 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
173 } 191 }
174 break; 192 break;
175 } 193 }
194 return 0;
176} 195}
177 196
178bool scm_need_cluster_request(struct scm_request *scmrq) 197bool scm_need_cluster_request(struct scm_request *scmrq)
179{ 198{
180 if (rq_data_dir(scmrq->request) == READ) 199 int pos = scmrq->aob->request.msb_count;
200
201 if (rq_data_dir(scmrq->request[pos]) == READ)
181 return false; 202 return false;
182 203
183 return blk_rq_bytes(scmrq->request) < CLUSTER_SIZE; 204 return blk_rq_bytes(scmrq->request[pos]) < CLUSTER_SIZE;
184} 205}
185 206
186/* Called with queue lock held. */ 207/* Called with queue lock held. */
187void scm_initiate_cluster_request(struct scm_request *scmrq) 208void scm_initiate_cluster_request(struct scm_request *scmrq)
188{ 209{
189 scm_prepare_cluster_request(scmrq); 210 if (scm_prepare_cluster_request(scmrq))
211 goto requeue;
190 if (eadm_start_aob(scmrq->aob)) 212 if (eadm_start_aob(scmrq->aob))
191 scm_request_requeue(scmrq); 213 goto requeue;
214 return;
215requeue:
216 scm_request_requeue(scmrq);
192} 217}
193 218
194bool scm_test_cluster_request(struct scm_request *scmrq) 219bool scm_test_cluster_request(struct scm_request *scmrq)
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index db2cb1f8a1b5..a5c6f7e157aa 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -102,6 +102,16 @@ config SCLP_ASYNC
102 want for inform other people about your kernel panics, 102 want for inform other people about your kernel panics,
103 need this feature and intend to run your kernel in LPAR. 103 need this feature and intend to run your kernel in LPAR.
104 104
105config SCLP_ASYNC_ID
106 string "Component ID for Call Home"
107 depends on SCLP_ASYNC
108 default "000000000"
109 help
110 The Component ID for Call Home is used to identify the correct
111 problem reporting queue the call home records should be sent to.
112
113 If your are unsure, please use the default value "000000000".
114
105config HMC_DRV 115config HMC_DRV
106 def_tristate m 116 def_tristate m
107 prompt "Support for file transfers from HMC drive CD/DVD-ROM" 117 prompt "Support for file transfers from HMC drive CD/DVD-ROM"
diff --git a/drivers/s390/char/sclp_async.c b/drivers/s390/char/sclp_async.c
index 5f9f929e891c..19c25427f27f 100644
--- a/drivers/s390/char/sclp_async.c
+++ b/drivers/s390/char/sclp_async.c
@@ -137,7 +137,8 @@ static int sclp_async_send_wait(char *message)
137 * Retain Queue 137 * Retain Queue
138 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS) 138 * e.g. 5639CC140 500 Red Hat RHEL5 Linux for zSeries (RHEL AS)
139 */ 139 */
140 strncpy(sccb->evbuf.comp_id, "000000000", sizeof(sccb->evbuf.comp_id)); 140 strncpy(sccb->evbuf.comp_id, CONFIG_SCLP_ASYNC_ID,
141 sizeof(sccb->evbuf.comp_id));
141 sccb->evbuf.header.length = sizeof(sccb->evbuf); 142 sccb->evbuf.header.length = sizeof(sccb->evbuf);
142 sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header); 143 sccb->header.length = sizeof(sccb->evbuf) + sizeof(sccb->header);
143 sccb->header.function_code = SCLP_NORMAL_WRITE; 144 sccb->header.function_code = SCLP_NORMAL_WRITE;
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c
index 327cb19ad0b0..d3d1936057b4 100644
--- a/drivers/s390/char/tape_3590.c
+++ b/drivers/s390/char/tape_3590.c
@@ -1090,7 +1090,7 @@ tape_3590_print_io_sim_msg_f1(struct tape_device *device, struct irb *irb)
1090 "channel path 0x%x on CU", 1090 "channel path 0x%x on CU",
1091 sense->fmt.f71.md[1]); 1091 sense->fmt.f71.md[1]);
1092 else 1092 else
1093 snprintf(service, BUFSIZE, "Repair will disable cannel" 1093 snprintf(service, BUFSIZE, "Repair will disable channel"
1094 " paths (0x%x-0x%x) on CU", 1094 " paths (0x%x-0x%x) on CU",
1095 sense->fmt.f71.md[1], sense->fmt.f71.md[2]); 1095 sense->fmt.f71.md[1], sense->fmt.f71.md[2]);
1096 break; 1096 break;
@@ -1481,7 +1481,7 @@ tape_3590_irq(struct tape_device *device, struct tape_request *request,
1481 } 1481 }
1482 1482
1483 if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) { 1483 if (irb->scsw.cmd.dstat & DEV_STAT_CHN_END) {
1484 DBF_EVENT(2, "cannel end\n"); 1484 DBF_EVENT(2, "channel end\n");
1485 return TAPE_IO_PENDING; 1485 return TAPE_IO_PENDING;
1486 } 1486 }
1487 1487
diff --git a/drivers/s390/cio/eadm_sch.c b/drivers/s390/cio/eadm_sch.c
index 37f0834300ea..bee8c11cd086 100644
--- a/drivers/s390/cio/eadm_sch.c
+++ b/drivers/s390/cio/eadm_sch.c
@@ -31,7 +31,7 @@
31MODULE_DESCRIPTION("driver for s390 eadm subchannels"); 31MODULE_DESCRIPTION("driver for s390 eadm subchannels");
32MODULE_LICENSE("GPL"); 32MODULE_LICENSE("GPL");
33 33
34#define EADM_TIMEOUT (5 * HZ) 34#define EADM_TIMEOUT (7 * HZ)
35static DEFINE_SPINLOCK(list_lock); 35static DEFINE_SPINLOCK(list_lock);
36static LIST_HEAD(eadm_list); 36static LIST_HEAD(eadm_list);
37 37
diff --git a/drivers/scsi/aic94xx/aic94xx_task.c b/drivers/scsi/aic94xx/aic94xx_task.c
index 5ff1ce7ba1f4..cdd4ab683be9 100644
--- a/drivers/scsi/aic94xx/aic94xx_task.c
+++ b/drivers/scsi/aic94xx/aic94xx_task.c
@@ -373,10 +373,10 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
373 373
374 if (unlikely(task->ata_task.device_control_reg_update)) 374 if (unlikely(task->ata_task.device_control_reg_update))
375 scb->header.opcode = CONTROL_ATA_DEV; 375 scb->header.opcode = CONTROL_ATA_DEV;
376 else if (dev->sata_dev.command_set == ATA_COMMAND_SET) 376 else if (dev->sata_dev.class == ATA_DEV_ATAPI)
377 scb->header.opcode = INITIATE_ATA_TASK;
378 else
379 scb->header.opcode = INITIATE_ATAPI_TASK; 377 scb->header.opcode = INITIATE_ATAPI_TASK;
378 else
379 scb->header.opcode = INITIATE_ATA_TASK;
380 380
381 scb->ata_task.proto_conn_rate = (1 << 5); /* STP */ 381 scb->ata_task.proto_conn_rate = (1 << 5); /* STP */
382 if (dev->port->oob_mode == SAS_OOB_MODE) 382 if (dev->port->oob_mode == SAS_OOB_MODE)
@@ -387,7 +387,7 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
387 if (likely(!task->ata_task.device_control_reg_update)) 387 if (likely(!task->ata_task.device_control_reg_update))
388 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 388 scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
389 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ 389 scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */
390 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) 390 if (dev->sata_dev.class == ATA_DEV_ATAPI)
391 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, 391 memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet,
392 16); 392 16);
393 scb->ata_task.sister_scb = cpu_to_le16(0xFFFF); 393 scb->ata_task.sister_scb = cpu_to_le16(0xFFFF);
@@ -399,7 +399,7 @@ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task,
399 if (task->ata_task.dma_xfer) 399 if (task->ata_task.dma_xfer)
400 flags |= DATA_XFER_MODE_DMA; 400 flags |= DATA_XFER_MODE_DMA;
401 if (task->ata_task.use_ncq && 401 if (task->ata_task.use_ncq &&
402 dev->sata_dev.command_set != ATAPI_COMMAND_SET) 402 dev->sata_dev.class != ATA_DEV_ATAPI)
403 flags |= ATA_Q_TYPE_NCQ; 403 flags |= ATA_Q_TYPE_NCQ;
404 flags |= data_dir_flags[task->data_dir]; 404 flags |= data_dir_flags[task->data_dir];
405 scb->ata_task.ata_flags = flags; 405 scb->ata_task.ata_flags = flags;
diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c
index 56e38096f0c4..cfd0084f1cd2 100644
--- a/drivers/scsi/isci/request.c
+++ b/drivers/scsi/isci/request.c
@@ -694,7 +694,7 @@ sci_io_request_construct_sata(struct isci_request *ireq,
694 } 694 }
695 695
696 /* ATAPI */ 696 /* ATAPI */
697 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && 697 if (dev->sata_dev.class == ATA_DEV_ATAPI &&
698 task->ata_task.fis.command == ATA_CMD_PACKET) { 698 task->ata_task.fis.command == ATA_CMD_PACKET) {
699 sci_atapi_construct(ireq); 699 sci_atapi_construct(ireq);
700 return SCI_SUCCESS; 700 return SCI_SUCCESS;
@@ -2980,7 +2980,7 @@ static void sci_request_started_state_enter(struct sci_base_state_machine *sm)
2980 state = SCI_REQ_SMP_WAIT_RESP; 2980 state = SCI_REQ_SMP_WAIT_RESP;
2981 } else if (task && sas_protocol_ata(task->task_proto) && 2981 } else if (task && sas_protocol_ata(task->task_proto) &&
2982 !task->ata_task.use_ncq) { 2982 !task->ata_task.use_ncq) {
2983 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET && 2983 if (dev->sata_dev.class == ATA_DEV_ATAPI &&
2984 task->ata_task.fis.command == ATA_CMD_PACKET) { 2984 task->ata_task.fis.command == ATA_CMD_PACKET) {
2985 state = SCI_REQ_ATAPI_WAIT_H2D; 2985 state = SCI_REQ_ATAPI_WAIT_H2D;
2986 } else if (task->data_dir == DMA_NONE) { 2986 } else if (task->data_dir == DMA_NONE) {
diff --git a/drivers/scsi/isci/task.c b/drivers/scsi/isci/task.c
index 3f63c6318b0d..6dcaed0c1fc8 100644
--- a/drivers/scsi/isci/task.c
+++ b/drivers/scsi/isci/task.c
@@ -588,7 +588,7 @@ int isci_task_abort_task(struct sas_task *task)
588 588
589 ret = TMF_RESP_FUNC_COMPLETE; 589 ret = TMF_RESP_FUNC_COMPLETE;
590 } else { 590 } else {
591 /* Fill in the tmf stucture */ 591 /* Fill in the tmf structure */
592 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort, 592 isci_task_build_abort_task_tmf(&tmf, isci_tmf_ssp_task_abort,
593 old_request); 593 old_request);
594 594
diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
index 577770fdee86..932d9cc98d2f 100644
--- a/drivers/scsi/libsas/sas_ata.c
+++ b/drivers/scsi/libsas/sas_ata.c
@@ -138,7 +138,7 @@ static void sas_ata_task_done(struct sas_task *task)
138 138
139 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD || 139 if (stat->stat == SAS_PROTO_RESPONSE || stat->stat == SAM_STAT_GOOD ||
140 ((stat->stat == SAM_STAT_CHECK_CONDITION && 140 ((stat->stat == SAM_STAT_CHECK_CONDITION &&
141 dev->sata_dev.command_set == ATAPI_COMMAND_SET))) { 141 dev->sata_dev.class == ATA_DEV_ATAPI))) {
142 memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE); 142 memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE);
143 143
144 if (!link->sactive) { 144 if (!link->sactive) {
@@ -272,7 +272,7 @@ static struct sas_internal *dev_to_sas_internal(struct domain_device *dev)
272 return to_sas_internal(dev->port->ha->core.shost->transportt); 272 return to_sas_internal(dev->port->ha->core.shost->transportt);
273} 273}
274 274
275static void sas_get_ata_command_set(struct domain_device *dev); 275static int sas_get_ata_command_set(struct domain_device *dev);
276 276
277int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) 277int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
278{ 278{
@@ -297,8 +297,7 @@ int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy)
297 } 297 }
298 memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis, 298 memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis,
299 sizeof(struct dev_to_host_fis)); 299 sizeof(struct dev_to_host_fis));
300 /* TODO switch to ata_dev_classify() */ 300 dev->sata_dev.class = sas_get_ata_command_set(dev);
301 sas_get_ata_command_set(dev);
302 } 301 }
303 return 0; 302 return 0;
304} 303}
@@ -419,18 +418,7 @@ static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class,
419 if (ret && ret != -EAGAIN) 418 if (ret && ret != -EAGAIN)
420 sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret); 419 sas_ata_printk(KERN_ERR, dev, "reset failed (errno=%d)\n", ret);
421 420
422 /* XXX: if the class changes during the reset the upper layer 421 *class = dev->sata_dev.class;
423 * should be informed, if the device has gone away we assume
424 * libsas will eventually delete it
425 */
426 switch (dev->sata_dev.command_set) {
427 case ATA_COMMAND_SET:
428 *class = ATA_DEV_ATA;
429 break;
430 case ATAPI_COMMAND_SET:
431 *class = ATA_DEV_ATAPI;
432 break;
433 }
434 422
435 ap->cbl = ATA_CBL_SATA; 423 ap->cbl = ATA_CBL_SATA;
436 return ret; 424 return ret;
@@ -619,50 +607,18 @@ void sas_ata_task_abort(struct sas_task *task)
619 complete(waiting); 607 complete(waiting);
620} 608}
621 609
622static void sas_get_ata_command_set(struct domain_device *dev) 610static int sas_get_ata_command_set(struct domain_device *dev)
623{ 611{
624 struct dev_to_host_fis *fis = 612 struct dev_to_host_fis *fis =
625 (struct dev_to_host_fis *) dev->frame_rcvd; 613 (struct dev_to_host_fis *) dev->frame_rcvd;
614 struct ata_taskfile tf;
626 615
627 if (dev->dev_type == SAS_SATA_PENDING) 616 if (dev->dev_type == SAS_SATA_PENDING)
628 return; 617 return ATA_DEV_UNKNOWN;
618
619 ata_tf_from_fis((const u8 *)fis, &tf);
629 620
630 if ((fis->sector_count == 1 && /* ATA */ 621 return ata_dev_classify(&tf);
631 fis->lbal == 1 &&
632 fis->lbam == 0 &&
633 fis->lbah == 0 &&
634 fis->device == 0)
635 ||
636 (fis->sector_count == 0 && /* CE-ATA (mATA) */
637 fis->lbal == 0 &&
638 fis->lbam == 0xCE &&
639 fis->lbah == 0xAA &&
640 (fis->device & ~0x10) == 0))
641
642 dev->sata_dev.command_set = ATA_COMMAND_SET;
643
644 else if ((fis->interrupt_reason == 1 && /* ATAPI */
645 fis->lbal == 1 &&
646 fis->byte_count_low == 0x14 &&
647 fis->byte_count_high == 0xEB &&
648 (fis->device & ~0x10) == 0))
649
650 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
651
652 else if ((fis->sector_count == 1 && /* SEMB */
653 fis->lbal == 1 &&
654 fis->lbam == 0x3C &&
655 fis->lbah == 0xC3 &&
656 fis->device == 0)
657 ||
658 (fis->interrupt_reason == 1 && /* SATA PM */
659 fis->lbal == 1 &&
660 fis->byte_count_low == 0x69 &&
661 fis->byte_count_high == 0x96 &&
662 (fis->device & ~0x10) == 0))
663
664 /* Treat it as a superset? */
665 dev->sata_dev.command_set = ATAPI_COMMAND_SET;
666} 622}
667 623
668void sas_probe_sata(struct asd_sas_port *port) 624void sas_probe_sata(struct asd_sas_port *port)
@@ -768,7 +724,7 @@ int sas_discover_sata(struct domain_device *dev)
768 if (dev->dev_type == SAS_SATA_PM) 724 if (dev->dev_type == SAS_SATA_PM)
769 return -ENODEV; 725 return -ENODEV;
770 726
771 sas_get_ata_command_set(dev); 727 dev->sata_dev.class = sas_get_ata_command_set(dev);
772 sas_fill_in_rphy(dev, dev->rphy); 728 sas_fill_in_rphy(dev, dev->rphy);
773 729
774 res = sas_notify_lldd_dev_found(dev); 730 res = sas_notify_lldd_dev_found(dev);
diff --git a/drivers/scsi/mac_scsi.c b/drivers/scsi/mac_scsi.c
index 953fd9b953c7..1e85c07e3b62 100644
--- a/drivers/scsi/mac_scsi.c
+++ b/drivers/scsi/mac_scsi.c
@@ -166,7 +166,7 @@ static void mac_scsi_reset_boot(struct Scsi_Host *instance)
166 XXX: Since bus errors in the PDMA routines never happen on my 166 XXX: Since bus errors in the PDMA routines never happen on my
167 computer, the bus error code is untested. 167 computer, the bus error code is untested.
168 If the code works as intended, a bus error results in Pseudo-DMA 168 If the code works as intended, a bus error results in Pseudo-DMA
169 beeing disabled, meaning that the driver switches to slow handshake. 169 being disabled, meaning that the driver switches to slow handshake.
170 If bus errors are NOT extremely rare, this has to be changed. 170 If bus errors are NOT extremely rare, this has to be changed.
171*/ 171*/
172 172
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f05580e693d0..ff283d23788a 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6783,7 +6783,7 @@ static int __init megasas_init(void)
6783 rval = pci_register_driver(&megasas_pci_driver); 6783 rval = pci_register_driver(&megasas_pci_driver);
6784 6784
6785 if (rval) { 6785 if (rval) {
6786 printk(KERN_DEBUG "megasas: PCI hotplug regisration failed \n"); 6786 printk(KERN_DEBUG "megasas: PCI hotplug registration failed \n");
6787 goto err_pcidrv; 6787 goto err_pcidrv;
6788 } 6788 }
6789 6789
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index fa0567c96050..7f842c88abd2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -224,7 +224,7 @@ struct mpt2_ioctl_eventreport {
224}; 224};
225 225
226/** 226/**
227 * struct mpt2_ioctl_command - generic mpt firmware passthru ioclt 227 * struct mpt2_ioctl_command - generic mpt firmware passthru ioctl
228 * @hdr - generic header 228 * @hdr - generic header
229 * @timeout - command timeout in seconds. (if zero then use driver default 229 * @timeout - command timeout in seconds. (if zero then use driver default
230 * value). 230 * value).
diff --git a/drivers/scsi/mvsas/mv_sas.c b/drivers/scsi/mvsas/mv_sas.c
index 85d86a5cdb60..2d5ab6d969ec 100644
--- a/drivers/scsi/mvsas/mv_sas.c
+++ b/drivers/scsi/mvsas/mv_sas.c
@@ -479,7 +479,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
479 479
480 if (task->ata_task.use_ncq) 480 if (task->ata_task.use_ncq)
481 flags |= MCH_FPDMA; 481 flags |= MCH_FPDMA;
482 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) { 482 if (dev->sata_dev.class == ATA_DEV_ATAPI) {
483 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI) 483 if (task->ata_task.fis.command != ATA_CMD_ID_ATAPI)
484 flags |= MCH_ATAPI; 484 flags |= MCH_ATAPI;
485 } 485 }
@@ -546,7 +546,7 @@ static int mvs_task_prep_ata(struct mvs_info *mvi,
546 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ 546 task->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */
547 /* fill in command FIS and ATAPI CDB */ 547 /* fill in command FIS and ATAPI CDB */
548 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); 548 memcpy(buf_cmd, &task->ata_task.fis, sizeof(struct host_to_dev_fis));
549 if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) 549 if (dev->sata_dev.class == ATA_DEV_ATAPI)
550 memcpy(buf_cmd + STP_ATAPI_CMD, 550 memcpy(buf_cmd + STP_ATAPI_CMD,
551 task->ata_task.atapi_packet, 16); 551 task->ata_task.atapi_packet, 16);
552 552
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index 933f21471951..96dcc097a463 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -4367,7 +4367,7 @@ static int pm8001_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4367 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); 4367 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
4368 } 4368 }
4369 if (task->ata_task.use_ncq && 4369 if (task->ata_task.use_ncq &&
4370 dev->sata_dev.command_set != ATAPI_COMMAND_SET) { 4370 dev->sata_dev.class != ATA_DEV_ATAPI) {
4371 ATAP = 0x07; /* FPDMA */ 4371 ATAP = 0x07; /* FPDMA */
4372 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); 4372 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
4373 } 4373 }
diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c
index b06443a0db2d..05cce463ab01 100644
--- a/drivers/scsi/pm8001/pm80xx_hwi.c
+++ b/drivers/scsi/pm8001/pm80xx_hwi.c
@@ -4077,7 +4077,7 @@ static int pm80xx_chip_sata_req(struct pm8001_hba_info *pm8001_ha,
4077 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n")); 4077 PM8001_IO_DBG(pm8001_ha, pm8001_printk("PIO\n"));
4078 } 4078 }
4079 if (task->ata_task.use_ncq && 4079 if (task->ata_task.use_ncq &&
4080 dev->sata_dev.command_set != ATAPI_COMMAND_SET) { 4080 dev->sata_dev.class != ATA_DEV_ATAPI) {
4081 ATAP = 0x07; /* FPDMA */ 4081 ATAP = 0x07; /* FPDMA */
4082 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n")); 4082 PM8001_IO_DBG(pm8001_ha, pm8001_printk("FPDMA\n"));
4083 } 4083 }
diff --git a/drivers/scsi/ps3rom.c b/drivers/scsi/ps3rom.c
index b3b48b5a984c..5298def33733 100644
--- a/drivers/scsi/ps3rom.c
+++ b/drivers/scsi/ps3rom.c
@@ -383,6 +383,7 @@ static int ps3rom_probe(struct ps3_system_bus_device *_dev)
383 if (!host) { 383 if (!host) {
384 dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n", 384 dev_err(&dev->sbd.core, "%s:%u: scsi_host_alloc failed\n",
385 __func__, __LINE__); 385 __func__, __LINE__);
386 error = -ENOMEM;
386 goto fail_teardown; 387 goto fail_teardown;
387 } 388 }
388 389
diff --git a/drivers/ssb/driver_mipscore.c b/drivers/ssb/driver_mipscore.c
index 09077067b0c8..7b986f9f213f 100644
--- a/drivers/ssb/driver_mipscore.c
+++ b/drivers/ssb/driver_mipscore.c
@@ -15,6 +15,9 @@
15#include <linux/serial_core.h> 15#include <linux/serial_core.h>
16#include <linux/serial_reg.h> 16#include <linux/serial_reg.h>
17#include <linux/time.h> 17#include <linux/time.h>
18#ifdef CONFIG_BCM47XX
19#include <bcm47xx_nvram.h>
20#endif
18 21
19#include "ssb_private.h" 22#include "ssb_private.h"
20 23
@@ -210,6 +213,7 @@ static void ssb_mips_serial_init(struct ssb_mipscore *mcore)
210static void ssb_mips_flash_detect(struct ssb_mipscore *mcore) 213static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
211{ 214{
212 struct ssb_bus *bus = mcore->dev->bus; 215 struct ssb_bus *bus = mcore->dev->bus;
216 struct ssb_sflash *sflash = &mcore->sflash;
213 struct ssb_pflash *pflash = &mcore->pflash; 217 struct ssb_pflash *pflash = &mcore->pflash;
214 218
215 /* When there is no chipcommon on the bus there is 4MB flash */ 219 /* When there is no chipcommon on the bus there is 4MB flash */
@@ -242,7 +246,15 @@ static void ssb_mips_flash_detect(struct ssb_mipscore *mcore)
242 } 246 }
243 247
244ssb_pflash: 248ssb_pflash:
245 if (pflash->present) { 249 if (sflash->present) {
250#ifdef CONFIG_BCM47XX
251 bcm47xx_nvram_init_from_mem(sflash->window, sflash->size);
252#endif
253 } else if (pflash->present) {
254#ifdef CONFIG_BCM47XX
255 bcm47xx_nvram_init_from_mem(pflash->window, pflash->window_size);
256#endif
257
246 ssb_pflash_data.width = pflash->buswidth; 258 ssb_pflash_data.width = pflash->buswidth;
247 ssb_pflash_resource.start = pflash->window; 259 ssb_pflash_resource.start = pflash->window;
248 ssb_pflash_resource.end = pflash->window + pflash->window_size; 260 ssb_pflash_resource.end = pflash->window + pflash->window_size;
diff --git a/drivers/target/target_core_xcopy.c b/drivers/target/target_core_xcopy.c
index e9186cdf35e9..33ac39bf75e5 100644
--- a/drivers/target/target_core_xcopy.c
+++ b/drivers/target/target_core_xcopy.c
@@ -335,7 +335,7 @@ static int target_xcopy_parse_segment_descriptors(struct se_cmd *se_cmd,
335 desc += XCOPY_SEGMENT_DESC_LEN; 335 desc += XCOPY_SEGMENT_DESC_LEN;
336 break; 336 break;
337 default: 337 default:
338 pr_err("XCOPY unspported segment descriptor" 338 pr_err("XCOPY unsupported segment descriptor"
339 "type: 0x%02x\n", desc[0]); 339 "type: 0x%02x\n", desc[0]);
340 goto out; 340 goto out;
341 } 341 }
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index 946562389ca8..3be9519654e5 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -83,8 +83,7 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
83 /* Found a board, allocate it an entry in the list */ 83 /* Found a board, allocate it an entry in the list */
84 tdev = kzalloc(sizeof(*tdev), GFP_KERNEL); 84 tdev = kzalloc(sizeof(*tdev), GFP_KERNEL);
85 if (!tdev) { 85 if (!tdev) {
86 printk(KERN_ERR "tc%x: unable to allocate tc_dev\n", 86 pr_err("tc%x: unable to allocate tc_dev\n", slot);
87 slot);
88 goto out_err; 87 goto out_err;
89 } 88 }
90 dev_set_name(&tdev->dev, "tc%x", slot); 89 dev_set_name(&tdev->dev, "tc%x", slot);
@@ -117,10 +116,10 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
117 tdev->resource.start = extslotaddr; 116 tdev->resource.start = extslotaddr;
118 tdev->resource.end = extslotaddr + devsize - 1; 117 tdev->resource.end = extslotaddr + devsize - 1;
119 } else { 118 } else {
120 printk(KERN_ERR "%s: Cannot provide slot space " 119 pr_err("%s: Cannot provide slot space "
121 "(%dMiB required, up to %dMiB supported)\n", 120 "(%ldMiB required, up to %ldMiB supported)\n",
122 dev_name(&tdev->dev), devsize >> 20, 121 dev_name(&tdev->dev), (long)(devsize >> 20),
123 max(slotsize, extslotsize) >> 20); 122 (long)(max(slotsize, extslotsize) >> 20));
124 kfree(tdev); 123 kfree(tdev);
125 goto out_err; 124 goto out_err;
126 } 125 }
@@ -147,14 +146,12 @@ static int __init tc_init(void)
147{ 146{
148 /* Initialize the TURBOchannel bus */ 147 /* Initialize the TURBOchannel bus */
149 if (tc_bus_get_info(&tc_bus)) 148 if (tc_bus_get_info(&tc_bus))
150 return 0; 149 goto out_err;
151 150
152 INIT_LIST_HEAD(&tc_bus.devices); 151 INIT_LIST_HEAD(&tc_bus.devices);
153 dev_set_name(&tc_bus.dev, "tc"); 152 dev_set_name(&tc_bus.dev, "tc");
154 if (device_register(&tc_bus.dev)) { 153 if (device_register(&tc_bus.dev))
155 put_device(&tc_bus.dev); 154 goto out_err_device;
156 return 0;
157 }
158 155
159 if (tc_bus.info.slot_size) { 156 if (tc_bus.info.slot_size) {
160 unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000; 157 unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
@@ -172,8 +169,8 @@ static int __init tc_init(void)
172 tc_bus.resource[0].flags = IORESOURCE_MEM; 169 tc_bus.resource[0].flags = IORESOURCE_MEM;
173 if (request_resource(&iomem_resource, 170 if (request_resource(&iomem_resource,
174 &tc_bus.resource[0]) < 0) { 171 &tc_bus.resource[0]) < 0) {
175 printk(KERN_ERR "tc: Cannot reserve resource\n"); 172 pr_err("tc: Cannot reserve resource\n");
176 return 0; 173 goto out_err_device;
177 } 174 }
178 if (tc_bus.ext_slot_size) { 175 if (tc_bus.ext_slot_size) {
179 tc_bus.resource[1].start = tc_bus.ext_slot_base; 176 tc_bus.resource[1].start = tc_bus.ext_slot_base;
@@ -184,10 +181,8 @@ static int __init tc_init(void)
184 tc_bus.resource[1].flags = IORESOURCE_MEM; 181 tc_bus.resource[1].flags = IORESOURCE_MEM;
185 if (request_resource(&iomem_resource, 182 if (request_resource(&iomem_resource,
186 &tc_bus.resource[1]) < 0) { 183 &tc_bus.resource[1]) < 0) {
187 printk(KERN_ERR 184 pr_err("tc: Cannot reserve resource\n");
188 "tc: Cannot reserve resource\n"); 185 goto out_err_resource;
189 release_resource(&tc_bus.resource[0]);
190 return 0;
191 } 186 }
192 } 187 }
193 188
@@ -195,6 +190,13 @@ static int __init tc_init(void)
195 } 190 }
196 191
197 return 0; 192 return 0;
193
194out_err_resource:
195 release_resource(&tc_bus.resource[0]);
196out_err_device:
197 put_device(&tc_bus.dev);
198out_err:
199 return 0;
198} 200}
199 201
200subsys_initcall(tc_init); 202subsys_initcall(tc_init);
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 98f8bcaf3e7e..a26653fe788c 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -701,7 +701,7 @@ config PDC_CONSOLE
701 Saying Y here will enable the software based PDC console to be 701 Saying Y here will enable the software based PDC console to be
702 used as the system console. This is useful for machines in 702 used as the system console. This is useful for machines in
703 which the hardware based console has not been written yet. The 703 which the hardware based console has not been written yet. The
704 following steps must be competed to use the PDC console: 704 following steps must be completed to use the PDC console:
705 705
706 1. create the device entry (mknod /dev/ttyB0 c 11 0) 706 1. create the device entry (mknod /dev/ttyB0 c 11 0)
707 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0 707 2. Edit the /etc/inittab to start a getty listening on /dev/ttyB0
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 96fafed92b76..0ffb4ed0a945 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -103,6 +103,9 @@ static const struct usb_device_id usb_quirk_list[] = {
103 { USB_DEVICE(0x04f3, 0x009b), .driver_info = 103 { USB_DEVICE(0x04f3, 0x009b), .driver_info =
104 USB_QUIRK_DEVICE_QUALIFIER }, 104 USB_QUIRK_DEVICE_QUALIFIER },
105 105
106 { USB_DEVICE(0x04f3, 0x010c), .driver_info =
107 USB_QUIRK_DEVICE_QUALIFIER },
108
106 { USB_DEVICE(0x04f3, 0x016f), .driver_info = 109 { USB_DEVICE(0x04f3, 0x016f), .driver_info =
107 USB_QUIRK_DEVICE_QUALIFIER }, 110 USB_QUIRK_DEVICE_QUALIFIER },
108 111
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index 59ab62c92b66..ea2b9c374305 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -396,7 +396,7 @@ static int hidg_setup(struct usb_function *f,
396 396
397 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8 397 case ((USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE) << 8
398 | HID_REQ_SET_REPORT): 398 | HID_REQ_SET_REPORT):
399 VDBG(cdev, "set_report | wLenght=%d\n", ctrl->wLength); 399 VDBG(cdev, "set_report | wLength=%d\n", ctrl->wLength);
400 goto stall; 400 goto stall;
401 break; 401 break;
402 402
diff --git a/drivers/usb/gadget/legacy/zero.c b/drivers/usb/gadget/legacy/zero.c
index ebf09f439f3a..ff97ac93ac03 100644
--- a/drivers/usb/gadget/legacy/zero.c
+++ b/drivers/usb/gadget/legacy/zero.c
@@ -28,7 +28,7 @@
28 * 28 *
29 * Why is *this* driver using two configurations, rather than setting up 29 * Why is *this* driver using two configurations, rather than setting up
30 * two interfaces with different functions? To help verify that multiple 30 * two interfaces with different functions? To help verify that multiple
31 * configuration infrastucture is working correctly; also, so that it can 31 * configuration infrastructure is working correctly; also, so that it can
32 * work with low capability USB controllers without four bulk endpoints. 32 * work with low capability USB controllers without four bulk endpoints.
33 */ 33 */
34 34
diff --git a/drivers/usb/host/ehci-sysfs.c b/drivers/usb/host/ehci-sysfs.c
index f6459dfb6f54..5e44407aa099 100644
--- a/drivers/usb/host/ehci-sysfs.c
+++ b/drivers/usb/host/ehci-sysfs.c
@@ -132,7 +132,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
132 132
133 if (allocated_max > uframe_periodic_max) { 133 if (allocated_max > uframe_periodic_max) {
134 ehci_info(ehci, 134 ehci_info(ehci,
135 "cannot decrease uframe_periodic_max becase " 135 "cannot decrease uframe_periodic_max because "
136 "periodic bandwidth is already allocated " 136 "periodic bandwidth is already allocated "
137 "(%u > %u)\n", 137 "(%u > %u)\n",
138 allocated_max, uframe_periodic_max); 138 allocated_max, uframe_periodic_max);
diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c
index 3de1278677d0..ecf02b2623e8 100644
--- a/drivers/usb/host/fotg210-hcd.c
+++ b/drivers/usb/host/fotg210-hcd.c
@@ -4958,7 +4958,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
4958 4958
4959 if (allocated_max > uframe_periodic_max) { 4959 if (allocated_max > uframe_periodic_max) {
4960 fotg210_info(fotg210, 4960 fotg210_info(fotg210,
4961 "cannot decrease uframe_periodic_max becase " 4961 "cannot decrease uframe_periodic_max because "
4962 "periodic bandwidth is already allocated " 4962 "periodic bandwidth is already allocated "
4963 "(%u > %u)\n", 4963 "(%u > %u)\n",
4964 allocated_max, uframe_periodic_max); 4964 allocated_max, uframe_periodic_max);
diff --git a/drivers/usb/host/fusbh200-hcd.c b/drivers/usb/host/fusbh200-hcd.c
index abe42f31559f..664d2aa1239c 100644
--- a/drivers/usb/host/fusbh200-hcd.c
+++ b/drivers/usb/host/fusbh200-hcd.c
@@ -4893,7 +4893,7 @@ static ssize_t store_uframe_periodic_max(struct device *dev,
4893 4893
4894 if (allocated_max > uframe_periodic_max) { 4894 if (allocated_max > uframe_periodic_max) {
4895 fusbh200_info(fusbh200, 4895 fusbh200_info(fusbh200,
4896 "cannot decrease uframe_periodic_max becase " 4896 "cannot decrease uframe_periodic_max because "
4897 "periodic bandwidth is already allocated " 4897 "periodic bandwidth is already allocated "
4898 "(%u > %u)\n", 4898 "(%u > %u)\n",
4899 allocated_max, uframe_periodic_max); 4899 allocated_max, uframe_periodic_max);
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 7064eb8d6142..8bfc47c29828 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -64,7 +64,7 @@ DEVICE(vivopay, VIVOPAY_IDS);
64/* Motorola USB Phone driver */ 64/* Motorola USB Phone driver */
65#define MOTO_IDS() \ 65#define MOTO_IDS() \
66 { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \ 66 { USB_DEVICE(0x05c6, 0x3197) }, /* unknown Motorola phone */ \
67 { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Mororola phone */ \ 67 { USB_DEVICE(0x0c44, 0x0022) }, /* unknown Motorola phone */ \
68 { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ \ 68 { USB_DEVICE(0x22b8, 0x2a64) }, /* Motorola KRZR K1m */ \
69 { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */ \ 69 { USB_DEVICE(0x22b8, 0x2c84) }, /* Motorola VE240 phone */ \
70 { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */ 70 { USB_DEVICE(0x22b8, 0x2c64) } /* Motorola V950 phone */
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
index cee9602f9a7b..716bfad6a1c0 100644
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi.c
@@ -570,5 +570,5 @@ static struct platform_driver exynos_mipi_dsi_driver = {
570module_platform_driver(exynos_mipi_dsi_driver); 570module_platform_driver(exynos_mipi_dsi_driver);
571 571
572MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>"); 572MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
573MODULE_DESCRIPTION("Samusung SoC MIPI-DSI driver"); 573MODULE_DESCRIPTION("Samsung SoC MIPI-DSI driver");
574MODULE_LICENSE("GPL"); 574MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
index 85edabfdef5a..2358a2fbbbcd 100644
--- a/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/fbdev/exynos/exynos_mipi_dsi_common.c
@@ -876,5 +876,5 @@ int exynos_mipi_dsi_fifo_clear(struct mipi_dsim_device *dsim,
876} 876}
877 877
878MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>"); 878MODULE_AUTHOR("InKi Dae <inki.dae@samsung.com>");
879MODULE_DESCRIPTION("Samusung SoC MIPI-DSI common driver"); 879MODULE_DESCRIPTION("Samsung SoC MIPI-DSI common driver");
880MODULE_LICENSE("GPL"); 880MODULE_LICENSE("GPL");
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 9690216d38ff..c0abe276ee55 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -178,6 +178,7 @@
178#include <linux/dma-mapping.h> 178#include <linux/dma-mapping.h>
179#include <linux/mutex.h> 179#include <linux/mutex.h>
180#include <linux/io.h> 180#include <linux/io.h>
181#include <linux/clk.h>
181 182
182#include <video/sa1100fb.h> 183#include <video/sa1100fb.h>
183 184
@@ -416,9 +417,9 @@ sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
416 var->transp.offset); 417 var->transp.offset);
417 418
418#ifdef CONFIG_CPU_FREQ 419#ifdef CONFIG_CPU_FREQ
419 dev_dbg(fbi->dev, "dma period = %d ps, clock = %d kHz\n", 420 dev_dbg(fbi->dev, "dma period = %d ps, clock = %ld kHz\n",
420 sa1100fb_display_dma_period(var), 421 sa1100fb_display_dma_period(var),
421 cpufreq_get(smp_processor_id())); 422 clk_get_rate(fbi->clk) / 1000);
422#endif 423#endif
423 424
424 return 0; 425 return 0;
@@ -592,9 +593,10 @@ static struct fb_ops sa1100fb_ops = {
592 * Calculate the PCD value from the clock rate (in picoseconds). 593 * Calculate the PCD value from the clock rate (in picoseconds).
593 * We take account of the PPCR clock setting. 594 * We take account of the PPCR clock setting.
594 */ 595 */
595static inline unsigned int get_pcd(unsigned int pixclock, unsigned int cpuclock) 596static inline unsigned int get_pcd(struct sa1100fb_info *fbi,
597 unsigned int pixclock)
596{ 598{
597 unsigned int pcd = cpuclock / 100; 599 unsigned int pcd = clk_get_rate(fbi->clk) / 100 / 1000;
598 600
599 pcd *= pixclock; 601 pcd *= pixclock;
600 pcd /= 10000000; 602 pcd /= 10000000;
@@ -673,7 +675,7 @@ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_
673 LCCR2_BegFrmDel(var->upper_margin) + 675 LCCR2_BegFrmDel(var->upper_margin) +
674 LCCR2_EndFrmDel(var->lower_margin); 676 LCCR2_EndFrmDel(var->lower_margin);
675 677
676 pcd = get_pcd(var->pixclock, cpufreq_get(0)); 678 pcd = get_pcd(fbi, var->pixclock);
677 new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->inf->lccr3 | 679 new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->inf->lccr3 |
678 (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) | 680 (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) |
679 (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL); 681 (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL);
@@ -787,6 +789,9 @@ static void sa1100fb_enable_controller(struct sa1100fb_info *fbi)
787 fbi->palette_cpu[0] &= 0xcfff; 789 fbi->palette_cpu[0] &= 0xcfff;
788 fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var); 790 fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var);
789 791
792 /* enable LCD controller clock */
793 clk_prepare_enable(fbi->clk);
794
790 /* Sequence from 11.7.10 */ 795 /* Sequence from 11.7.10 */
791 writel_relaxed(fbi->reg_lccr3, fbi->base + LCCR3); 796 writel_relaxed(fbi->reg_lccr3, fbi->base + LCCR3);
792 writel_relaxed(fbi->reg_lccr2, fbi->base + LCCR2); 797 writel_relaxed(fbi->reg_lccr2, fbi->base + LCCR2);
@@ -831,6 +836,9 @@ static void sa1100fb_disable_controller(struct sa1100fb_info *fbi)
831 836
832 schedule_timeout(20 * HZ / 1000); 837 schedule_timeout(20 * HZ / 1000);
833 remove_wait_queue(&fbi->ctrlr_wait, &wait); 838 remove_wait_queue(&fbi->ctrlr_wait, &wait);
839
840 /* disable LCD controller clock */
841 clk_disable_unprepare(fbi->clk);
834} 842}
835 843
836/* 844/*
@@ -1009,7 +1017,6 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
1009 void *data) 1017 void *data)
1010{ 1018{
1011 struct sa1100fb_info *fbi = TO_INF(nb, freq_transition); 1019 struct sa1100fb_info *fbi = TO_INF(nb, freq_transition);
1012 struct cpufreq_freqs *f = data;
1013 u_int pcd; 1020 u_int pcd;
1014 1021
1015 switch (val) { 1022 switch (val) {
@@ -1018,7 +1025,7 @@ sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val,
1018 break; 1025 break;
1019 1026
1020 case CPUFREQ_POSTCHANGE: 1027 case CPUFREQ_POSTCHANGE:
1021 pcd = get_pcd(fbi->fb.var.pixclock, f->new); 1028 pcd = get_pcd(fbi, fbi->fb.var.pixclock);
1022 fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd); 1029 fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd);
1023 set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE); 1030 set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE);
1024 break; 1031 break;
@@ -1225,6 +1232,13 @@ static int sa1100fb_probe(struct platform_device *pdev)
1225 if (!fbi) 1232 if (!fbi)
1226 goto failed; 1233 goto failed;
1227 1234
1235 fbi->clk = clk_get(&pdev->dev, NULL);
1236 if (IS_ERR(fbi->clk)) {
1237 ret = PTR_ERR(fbi->clk);
1238 fbi->clk = NULL;
1239 goto failed;
1240 }
1241
1228 fbi->base = ioremap(res->start, resource_size(res)); 1242 fbi->base = ioremap(res->start, resource_size(res));
1229 if (!fbi->base) 1243 if (!fbi->base)
1230 goto failed; 1244 goto failed;
@@ -1277,6 +1291,8 @@ static int sa1100fb_probe(struct platform_device *pdev)
1277 failed: 1291 failed:
1278 if (fbi) 1292 if (fbi)
1279 iounmap(fbi->base); 1293 iounmap(fbi->base);
1294 if (fbi->clk)
1295 clk_put(fbi->clk);
1280 kfree(fbi); 1296 kfree(fbi);
1281 release_mem_region(res->start, resource_size(res)); 1297 release_mem_region(res->start, resource_size(res));
1282 return ret; 1298 return ret;
diff --git a/drivers/video/fbdev/sa1100fb.h b/drivers/video/fbdev/sa1100fb.h
index fc5d4292fad6..0139d13377a5 100644
--- a/drivers/video/fbdev/sa1100fb.h
+++ b/drivers/video/fbdev/sa1100fb.h
@@ -68,6 +68,7 @@ struct sa1100fb_info {
68#endif 68#endif
69 69
70 const struct sa1100fb_mach_info *inf; 70 const struct sa1100fb_mach_info *inf;
71 struct clk *clk;
71}; 72};
72 73
73#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member) 74#define TO_INF(ptr,member) container_of(ptr,struct sa1100fb_info,member)
diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
index ebd8f218a788..810ad419e34c 100644
--- a/drivers/xen/swiotlb-xen.c
+++ b/drivers/xen/swiotlb-xen.c
@@ -96,8 +96,6 @@ static inline phys_addr_t xen_bus_to_phys(dma_addr_t baddr)
96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT; 96 dma_addr_t dma = (dma_addr_t)pfn << PAGE_SHIFT;
97 phys_addr_t paddr = dma; 97 phys_addr_t paddr = dma;
98 98
99 BUG_ON(paddr != dma); /* truncation has occurred, should never happen */
100
101 paddr |= baddr & ~PAGE_MASK; 99 paddr |= baddr & ~PAGE_MASK;
102 100
103 return paddr; 101 return paddr;
@@ -399,11 +397,13 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
399 * buffering it. 397 * buffering it.
400 */ 398 */
401 if (dma_capable(dev, dev_addr, size) && 399 if (dma_capable(dev, dev_addr, size) &&
402 !range_straddles_page_boundary(phys, size) && !swiotlb_force) { 400 !range_straddles_page_boundary(phys, size) &&
401 !xen_arch_need_swiotlb(dev, PFN_DOWN(phys), PFN_DOWN(dev_addr)) &&
402 !swiotlb_force) {
403 /* we are not interested in the dma_addr returned by 403 /* we are not interested in the dma_addr returned by
404 * xen_dma_map_page, only in the potential cache flushes executed 404 * xen_dma_map_page, only in the potential cache flushes executed
405 * by the function. */ 405 * by the function. */
406 xen_dma_map_page(dev, page, offset, size, dir, attrs); 406 xen_dma_map_page(dev, page, dev_addr, offset, size, dir, attrs);
407 return dev_addr; 407 return dev_addr;
408 } 408 }
409 409
@@ -417,7 +417,7 @@ dma_addr_t xen_swiotlb_map_page(struct device *dev, struct page *page,
417 return DMA_ERROR_CODE; 417 return DMA_ERROR_CODE;
418 418
419 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT), 419 xen_dma_map_page(dev, pfn_to_page(map >> PAGE_SHIFT),
420 map & ~PAGE_MASK, size, dir, attrs); 420 dev_addr, map & ~PAGE_MASK, size, dir, attrs);
421 dev_addr = xen_phys_to_bus(map); 421 dev_addr = xen_phys_to_bus(map);
422 422
423 /* 423 /*
@@ -447,7 +447,7 @@ static void xen_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
447 447
448 BUG_ON(dir == DMA_NONE); 448 BUG_ON(dir == DMA_NONE);
449 449
450 xen_dma_unmap_page(hwdev, paddr, size, dir, attrs); 450 xen_dma_unmap_page(hwdev, dev_addr, size, dir, attrs);
451 451
452 /* NOTE: We use dev_addr here, not paddr! */ 452 /* NOTE: We use dev_addr here, not paddr! */
453 if (is_xen_swiotlb_buffer(dev_addr)) { 453 if (is_xen_swiotlb_buffer(dev_addr)) {
@@ -495,14 +495,14 @@ xen_swiotlb_sync_single(struct device *hwdev, dma_addr_t dev_addr,
495 BUG_ON(dir == DMA_NONE); 495 BUG_ON(dir == DMA_NONE);
496 496
497 if (target == SYNC_FOR_CPU) 497 if (target == SYNC_FOR_CPU)
498 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); 498 xen_dma_sync_single_for_cpu(hwdev, dev_addr, size, dir);
499 499
500 /* NOTE: We use dev_addr here, not paddr! */ 500 /* NOTE: We use dev_addr here, not paddr! */
501 if (is_xen_swiotlb_buffer(dev_addr)) 501 if (is_xen_swiotlb_buffer(dev_addr))
502 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target); 502 swiotlb_tbl_sync_single(hwdev, paddr, size, dir, target);
503 503
504 if (target == SYNC_FOR_DEVICE) 504 if (target == SYNC_FOR_DEVICE)
505 xen_dma_sync_single_for_cpu(hwdev, paddr, size, dir); 505 xen_dma_sync_single_for_device(hwdev, dev_addr, size, dir);
506 506
507 if (dir != DMA_FROM_DEVICE) 507 if (dir != DMA_FROM_DEVICE)
508 return; 508 return;
@@ -557,6 +557,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
557 dma_addr_t dev_addr = xen_phys_to_bus(paddr); 557 dma_addr_t dev_addr = xen_phys_to_bus(paddr);
558 558
559 if (swiotlb_force || 559 if (swiotlb_force ||
560 xen_arch_need_swiotlb(hwdev, PFN_DOWN(paddr), PFN_DOWN(dev_addr)) ||
560 !dma_capable(hwdev, dev_addr, sg->length) || 561 !dma_capable(hwdev, dev_addr, sg->length) ||
561 range_straddles_page_boundary(paddr, sg->length)) { 562 range_straddles_page_boundary(paddr, sg->length)) {
562 phys_addr_t map = swiotlb_tbl_map_single(hwdev, 563 phys_addr_t map = swiotlb_tbl_map_single(hwdev,
@@ -574,6 +575,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
574 return 0; 575 return 0;
575 } 576 }
576 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT), 577 xen_dma_map_page(hwdev, pfn_to_page(map >> PAGE_SHIFT),
578 dev_addr,
577 map & ~PAGE_MASK, 579 map & ~PAGE_MASK,
578 sg->length, 580 sg->length,
579 dir, 581 dir,
@@ -584,6 +586,7 @@ xen_swiotlb_map_sg_attrs(struct device *hwdev, struct scatterlist *sgl,
584 * xen_dma_map_page, only in the potential cache flushes executed 586 * xen_dma_map_page, only in the potential cache flushes executed
585 * by the function. */ 587 * by the function. */
586 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT), 588 xen_dma_map_page(hwdev, pfn_to_page(paddr >> PAGE_SHIFT),
589 dev_addr,
587 paddr & ~PAGE_MASK, 590 paddr & ~PAGE_MASK,
588 sg->length, 591 sg->length,
589 dir, 592 dir,
diff --git a/drivers/xen/xen-pciback/passthrough.c b/drivers/xen/xen-pciback/passthrough.c
index 828dddc360df..f16a30e2a110 100644
--- a/drivers/xen/xen-pciback/passthrough.c
+++ b/drivers/xen/xen-pciback/passthrough.c
@@ -69,7 +69,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
69} 69}
70 70
71static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 71static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
72 struct pci_dev *dev) 72 struct pci_dev *dev, bool lock)
73{ 73{
74 struct passthrough_dev_data *dev_data = pdev->pci_dev_data; 74 struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
75 struct pci_dev_entry *dev_entry, *t; 75 struct pci_dev_entry *dev_entry, *t;
@@ -87,8 +87,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
87 87
88 mutex_unlock(&dev_data->lock); 88 mutex_unlock(&dev_data->lock);
89 89
90 if (found_dev) 90 if (found_dev) {
91 if (lock)
92 device_lock(&found_dev->dev);
91 pcistub_put_pci_dev(found_dev); 93 pcistub_put_pci_dev(found_dev);
94 if (lock)
95 device_unlock(&found_dev->dev);
96 }
92} 97}
93 98
94static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) 99static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
@@ -156,8 +161,11 @@ static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
156 struct pci_dev_entry *dev_entry, *t; 161 struct pci_dev_entry *dev_entry, *t;
157 162
158 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { 163 list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
164 struct pci_dev *dev = dev_entry->dev;
159 list_del(&dev_entry->list); 165 list_del(&dev_entry->list);
160 pcistub_put_pci_dev(dev_entry->dev); 166 device_lock(&dev->dev);
167 pcistub_put_pci_dev(dev);
168 device_unlock(&dev->dev);
161 kfree(dev_entry); 169 kfree(dev_entry);
162 } 170 }
163 171
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 017069a455d4..cc3cbb4435f8 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -105,7 +105,7 @@ static void pcistub_device_release(struct kref *kref)
105 */ 105 */
106 __pci_reset_function_locked(dev); 106 __pci_reset_function_locked(dev);
107 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state)) 107 if (pci_load_and_free_saved_state(dev, &dev_data->pci_saved_state))
108 dev_dbg(&dev->dev, "Could not reload PCI state\n"); 108 dev_info(&dev->dev, "Could not reload PCI state\n");
109 else 109 else
110 pci_restore_state(dev); 110 pci_restore_state(dev);
111 111
@@ -250,11 +250,15 @@ struct pci_dev *pcistub_get_pci_dev(struct xen_pcibk_device *pdev,
250 * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove 250 * - 'echo BDF > unbind' with a guest still using it. See pcistub_remove
251 * 251 *
252 * As such we have to be careful. 252 * As such we have to be careful.
253 *
254 * To make this easier, the caller has to hold the device lock.
253 */ 255 */
254void pcistub_put_pci_dev(struct pci_dev *dev) 256void pcistub_put_pci_dev(struct pci_dev *dev)
255{ 257{
256 struct pcistub_device *psdev, *found_psdev = NULL; 258 struct pcistub_device *psdev, *found_psdev = NULL;
257 unsigned long flags; 259 unsigned long flags;
260 struct xen_pcibk_dev_data *dev_data;
261 int ret;
258 262
259 spin_lock_irqsave(&pcistub_devices_lock, flags); 263 spin_lock_irqsave(&pcistub_devices_lock, flags);
260 264
@@ -276,13 +280,20 @@ void pcistub_put_pci_dev(struct pci_dev *dev)
276 /* Cleanup our device 280 /* Cleanup our device
277 * (so it's ready for the next domain) 281 * (so it's ready for the next domain)
278 */ 282 */
283 device_lock_assert(&dev->dev);
284 __pci_reset_function_locked(dev);
279 285
280 /* This is OK - we are running from workqueue context 286 dev_data = pci_get_drvdata(dev);
281 * and want to inhibit the user from fiddling with 'reset' 287 ret = pci_load_saved_state(dev, dev_data->pci_saved_state);
282 */ 288 if (!ret) {
283 pci_reset_function(dev); 289 /*
284 pci_restore_state(dev); 290 * The usual sequence is pci_save_state & pci_restore_state
285 291 * but the guest might have messed the configuration space up.
292 * Use the initial version (when device was bound to us).
293 */
294 pci_restore_state(dev);
295 } else
296 dev_info(&dev->dev, "Could not reload PCI state\n");
286 /* This disables the device. */ 297 /* This disables the device. */
287 xen_pcibk_reset_device(dev); 298 xen_pcibk_reset_device(dev);
288 299
@@ -554,12 +565,14 @@ static void pcistub_remove(struct pci_dev *dev)
554 spin_unlock_irqrestore(&pcistub_devices_lock, flags); 565 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
555 566
556 if (found_psdev) { 567 if (found_psdev) {
557 dev_dbg(&dev->dev, "found device to remove - in use? %p\n", 568 dev_dbg(&dev->dev, "found device to remove %s\n",
558 found_psdev->pdev); 569 found_psdev->pdev ? "- in-use" : "");
559 570
560 if (found_psdev->pdev) { 571 if (found_psdev->pdev) {
561 pr_warn("****** removing device %s while still in-use! ******\n", 572 int domid = xen_find_device_domain_owner(dev);
562 pci_name(found_psdev->dev)); 573
574 pr_warn("****** removing device %s while still in-use by domain %d! ******\n",
575 pci_name(found_psdev->dev), domid);
563 pr_warn("****** driver domain may still access this device's i/o resources!\n"); 576 pr_warn("****** driver domain may still access this device's i/o resources!\n");
564 pr_warn("****** shutdown driver domain before binding device\n"); 577 pr_warn("****** shutdown driver domain before binding device\n");
565 pr_warn("****** to other drivers or domains\n"); 578 pr_warn("****** to other drivers or domains\n");
@@ -567,7 +580,8 @@ static void pcistub_remove(struct pci_dev *dev)
567 /* N.B. This ends up calling pcistub_put_pci_dev which ends up 580 /* N.B. This ends up calling pcistub_put_pci_dev which ends up
568 * doing the FLR. */ 581 * doing the FLR. */
569 xen_pcibk_release_pci_dev(found_psdev->pdev, 582 xen_pcibk_release_pci_dev(found_psdev->pdev,
570 found_psdev->dev); 583 found_psdev->dev,
584 false /* caller holds the lock. */);
571 } 585 }
572 586
573 spin_lock_irqsave(&pcistub_devices_lock, flags); 587 spin_lock_irqsave(&pcistub_devices_lock, flags);
@@ -629,10 +643,12 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
629{ 643{
630 pci_ers_result_t res = result; 644 pci_ers_result_t res = result;
631 struct xen_pcie_aer_op *aer_op; 645 struct xen_pcie_aer_op *aer_op;
646 struct xen_pcibk_device *pdev = psdev->pdev;
647 struct xen_pci_sharedinfo *sh_info = pdev->sh_info;
632 int ret; 648 int ret;
633 649
634 /*with PV AER drivers*/ 650 /*with PV AER drivers*/
635 aer_op = &(psdev->pdev->sh_info->aer_op); 651 aer_op = &(sh_info->aer_op);
636 aer_op->cmd = aer_cmd ; 652 aer_op->cmd = aer_cmd ;
637 /*useful for error_detected callback*/ 653 /*useful for error_detected callback*/
638 aer_op->err = state; 654 aer_op->err = state;
@@ -653,36 +669,36 @@ static pci_ers_result_t common_process(struct pcistub_device *psdev,
653 * this flag to judge whether we need to check pci-front give aer 669 * this flag to judge whether we need to check pci-front give aer
654 * service ack signal 670 * service ack signal
655 */ 671 */
656 set_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags); 672 set_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
657 673
658 /*It is possible that a pcifront conf_read_write ops request invokes 674 /*It is possible that a pcifront conf_read_write ops request invokes
659 * the callback which cause the spurious execution of wake_up. 675 * the callback which cause the spurious execution of wake_up.
660 * Yet it is harmless and better than a spinlock here 676 * Yet it is harmless and better than a spinlock here
661 */ 677 */
662 set_bit(_XEN_PCIB_active, 678 set_bit(_XEN_PCIB_active,
663 (unsigned long *)&psdev->pdev->sh_info->flags); 679 (unsigned long *)&sh_info->flags);
664 wmb(); 680 wmb();
665 notify_remote_via_irq(psdev->pdev->evtchn_irq); 681 notify_remote_via_irq(pdev->evtchn_irq);
666 682
667 ret = wait_event_timeout(xen_pcibk_aer_wait_queue, 683 ret = wait_event_timeout(xen_pcibk_aer_wait_queue,
668 !(test_bit(_XEN_PCIB_active, (unsigned long *) 684 !(test_bit(_XEN_PCIB_active, (unsigned long *)
669 &psdev->pdev->sh_info->flags)), 300*HZ); 685 &sh_info->flags)), 300*HZ);
670 686
671 if (!ret) { 687 if (!ret) {
672 if (test_bit(_XEN_PCIB_active, 688 if (test_bit(_XEN_PCIB_active,
673 (unsigned long *)&psdev->pdev->sh_info->flags)) { 689 (unsigned long *)&sh_info->flags)) {
674 dev_err(&psdev->dev->dev, 690 dev_err(&psdev->dev->dev,
675 "pcifront aer process not responding!\n"); 691 "pcifront aer process not responding!\n");
676 clear_bit(_XEN_PCIB_active, 692 clear_bit(_XEN_PCIB_active,
677 (unsigned long *)&psdev->pdev->sh_info->flags); 693 (unsigned long *)&sh_info->flags);
678 aer_op->err = PCI_ERS_RESULT_NONE; 694 aer_op->err = PCI_ERS_RESULT_NONE;
679 return res; 695 return res;
680 } 696 }
681 } 697 }
682 clear_bit(_PCIB_op_pending, (unsigned long *)&psdev->pdev->flags); 698 clear_bit(_PCIB_op_pending, (unsigned long *)&pdev->flags);
683 699
684 if (test_bit(_XEN_PCIF_active, 700 if (test_bit(_XEN_PCIF_active,
685 (unsigned long *)&psdev->pdev->sh_info->flags)) { 701 (unsigned long *)&sh_info->flags)) {
686 dev_dbg(&psdev->dev->dev, 702 dev_dbg(&psdev->dev->dev,
687 "schedule pci_conf service in " DRV_NAME "\n"); 703 "schedule pci_conf service in " DRV_NAME "\n");
688 xen_pcibk_test_and_schedule_op(psdev->pdev); 704 xen_pcibk_test_and_schedule_op(psdev->pdev);
@@ -1502,6 +1518,53 @@ parse_error:
1502fs_initcall(pcistub_init); 1518fs_initcall(pcistub_init);
1503#endif 1519#endif
1504 1520
1521#ifdef CONFIG_PCI_IOV
1522static struct pcistub_device *find_vfs(const struct pci_dev *pdev)
1523{
1524 struct pcistub_device *psdev = NULL;
1525 unsigned long flags;
1526 bool found = false;
1527
1528 spin_lock_irqsave(&pcistub_devices_lock, flags);
1529 list_for_each_entry(psdev, &pcistub_devices, dev_list) {
1530 if (!psdev->pdev && psdev->dev != pdev
1531 && pci_physfn(psdev->dev) == pdev) {
1532 found = true;
1533 break;
1534 }
1535 }
1536 spin_unlock_irqrestore(&pcistub_devices_lock, flags);
1537 if (found)
1538 return psdev;
1539 return NULL;
1540}
1541
1542static int pci_stub_notifier(struct notifier_block *nb,
1543 unsigned long action, void *data)
1544{
1545 struct device *dev = data;
1546 const struct pci_dev *pdev = to_pci_dev(dev);
1547
1548 if (action != BUS_NOTIFY_UNBIND_DRIVER)
1549 return NOTIFY_DONE;
1550
1551 if (!pdev->is_physfn)
1552 return NOTIFY_DONE;
1553
1554 for (;;) {
1555 struct pcistub_device *psdev = find_vfs(pdev);
1556 if (!psdev)
1557 break;
1558 device_release_driver(&psdev->dev->dev);
1559 }
1560 return NOTIFY_DONE;
1561}
1562
1563static struct notifier_block pci_stub_nb = {
1564 .notifier_call = pci_stub_notifier,
1565};
1566#endif
1567
1505static int __init xen_pcibk_init(void) 1568static int __init xen_pcibk_init(void)
1506{ 1569{
1507 int err; 1570 int err;
@@ -1523,12 +1586,19 @@ static int __init xen_pcibk_init(void)
1523 err = xen_pcibk_xenbus_register(); 1586 err = xen_pcibk_xenbus_register();
1524 if (err) 1587 if (err)
1525 pcistub_exit(); 1588 pcistub_exit();
1589#ifdef CONFIG_PCI_IOV
1590 else
1591 bus_register_notifier(&pci_bus_type, &pci_stub_nb);
1592#endif
1526 1593
1527 return err; 1594 return err;
1528} 1595}
1529 1596
1530static void __exit xen_pcibk_cleanup(void) 1597static void __exit xen_pcibk_cleanup(void)
1531{ 1598{
1599#ifdef CONFIG_PCI_IOV
1600 bus_unregister_notifier(&pci_bus_type, &pci_stub_nb);
1601#endif
1532 xen_pcibk_xenbus_unregister(); 1602 xen_pcibk_xenbus_unregister();
1533 pcistub_exit(); 1603 pcistub_exit();
1534} 1604}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index f72af87640e0..58e38d586f52 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -99,7 +99,8 @@ struct xen_pcibk_backend {
99 unsigned int *domain, unsigned int *bus, 99 unsigned int *domain, unsigned int *bus,
100 unsigned int *devfn); 100 unsigned int *devfn);
101 int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb); 101 int (*publish)(struct xen_pcibk_device *pdev, publish_pci_root_cb cb);
102 void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev); 102 void (*release)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
103 bool lock);
103 int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev, 104 int (*add)(struct xen_pcibk_device *pdev, struct pci_dev *dev,
104 int devid, publish_pci_dev_cb publish_cb); 105 int devid, publish_pci_dev_cb publish_cb);
105 struct pci_dev *(*get)(struct xen_pcibk_device *pdev, 106 struct pci_dev *(*get)(struct xen_pcibk_device *pdev,
@@ -122,10 +123,10 @@ static inline int xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
122} 123}
123 124
124static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 125static inline void xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
125 struct pci_dev *dev) 126 struct pci_dev *dev, bool lock)
126{ 127{
127 if (xen_pcibk_backend && xen_pcibk_backend->release) 128 if (xen_pcibk_backend && xen_pcibk_backend->release)
128 return xen_pcibk_backend->release(pdev, dev); 129 return xen_pcibk_backend->release(pdev, dev, lock);
129} 130}
130 131
131static inline struct pci_dev * 132static inline struct pci_dev *
diff --git a/drivers/xen/xen-pciback/vpci.c b/drivers/xen/xen-pciback/vpci.c
index 51afff96c515..c99f8bb1c56c 100644
--- a/drivers/xen/xen-pciback/vpci.c
+++ b/drivers/xen/xen-pciback/vpci.c
@@ -145,7 +145,7 @@ out:
145} 145}
146 146
147static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, 147static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
148 struct pci_dev *dev) 148 struct pci_dev *dev, bool lock)
149{ 149{
150 int slot; 150 int slot;
151 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; 151 struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
@@ -169,8 +169,13 @@ static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev,
169out: 169out:
170 mutex_unlock(&vpci_dev->lock); 170 mutex_unlock(&vpci_dev->lock);
171 171
172 if (found_dev) 172 if (found_dev) {
173 if (lock)
174 device_lock(&found_dev->dev);
173 pcistub_put_pci_dev(found_dev); 175 pcistub_put_pci_dev(found_dev);
176 if (lock)
177 device_unlock(&found_dev->dev);
178 }
174} 179}
175 180
176static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) 181static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev)
@@ -208,8 +213,11 @@ static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev)
208 struct pci_dev_entry *e, *tmp; 213 struct pci_dev_entry *e, *tmp;
209 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot], 214 list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
210 list) { 215 list) {
216 struct pci_dev *dev = e->dev;
211 list_del(&e->list); 217 list_del(&e->list);
212 pcistub_put_pci_dev(e->dev); 218 device_lock(&dev->dev);
219 pcistub_put_pci_dev(dev);
220 device_unlock(&dev->dev);
213 kfree(e); 221 kfree(e);
214 } 222 }
215 } 223 }
diff --git a/drivers/xen/xen-pciback/xenbus.c b/drivers/xen/xen-pciback/xenbus.c
index ad8d30c088fe..fe17c80ff4b7 100644
--- a/drivers/xen/xen-pciback/xenbus.c
+++ b/drivers/xen/xen-pciback/xenbus.c
@@ -247,7 +247,7 @@ static int xen_pcibk_export_device(struct xen_pcibk_device *pdev,
247 if (err) 247 if (err)
248 goto out; 248 goto out;
249 249
250 dev_dbg(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id); 250 dev_info(&dev->dev, "registering for %d\n", pdev->xdev->otherend_id);
251 if (xen_register_device_domain_owner(dev, 251 if (xen_register_device_domain_owner(dev,
252 pdev->xdev->otherend_id) != 0) { 252 pdev->xdev->otherend_id) != 0) {
253 dev_err(&dev->dev, "Stealing ownership from dom%d.\n", 253 dev_err(&dev->dev, "Stealing ownership from dom%d.\n",
@@ -291,7 +291,7 @@ static int xen_pcibk_remove_device(struct xen_pcibk_device *pdev,
291 291
292 /* N.B. This ends up calling pcistub_put_pci_dev which ends up 292 /* N.B. This ends up calling pcistub_put_pci_dev which ends up
293 * doing the FLR. */ 293 * doing the FLR. */
294 xen_pcibk_release_pci_dev(pdev, dev); 294 xen_pcibk_release_pci_dev(pdev, dev, true /* use the lock. */);
295 295
296out: 296out:
297 return err; 297 return err;
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 370b24cee4d8..c055d56ec63d 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -30,6 +30,9 @@ config COMPAT_BINFMT_ELF
30config ARCH_BINFMT_ELF_RANDOMIZE_PIE 30config ARCH_BINFMT_ELF_RANDOMIZE_PIE
31 bool 31 bool
32 32
33config ARCH_BINFMT_ELF_STATE
34 bool
35
33config BINFMT_ELF_FDPIC 36config BINFMT_ELF_FDPIC
34 bool "Kernel support for FDPIC ELF binaries" 37 bool "Kernel support for FDPIC ELF binaries"
35 default y 38 default y
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 3a6175fe10c0..02b16910f4c9 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -386,6 +386,127 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
386 ELF_PAGESTART(cmds[first_idx].p_vaddr); 386 ELF_PAGESTART(cmds[first_idx].p_vaddr);
387} 387}
388 388
389/**
390 * load_elf_phdrs() - load ELF program headers
391 * @elf_ex: ELF header of the binary whose program headers should be loaded
392 * @elf_file: the opened ELF binary file
393 *
394 * Loads ELF program headers from the binary file elf_file, which has the ELF
395 * header pointed to by elf_ex, into a newly allocated array. The caller is
396 * responsible for freeing the allocated data. Returns an ERR_PTR upon failure.
397 */
398static struct elf_phdr *load_elf_phdrs(struct elfhdr *elf_ex,
399 struct file *elf_file)
400{
401 struct elf_phdr *elf_phdata = NULL;
402 int retval, size, err = -1;
403
404 /*
405 * If the size of this structure has changed, then punt, since
406 * we will be doing the wrong thing.
407 */
408 if (elf_ex->e_phentsize != sizeof(struct elf_phdr))
409 goto out;
410
411 /* Sanity check the number of program headers... */
412 if (elf_ex->e_phnum < 1 ||
413 elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
414 goto out;
415
416 /* ...and their total size. */
417 size = sizeof(struct elf_phdr) * elf_ex->e_phnum;
418 if (size > ELF_MIN_ALIGN)
419 goto out;
420
421 elf_phdata = kmalloc(size, GFP_KERNEL);
422 if (!elf_phdata)
423 goto out;
424
425 /* Read in the program headers */
426 retval = kernel_read(elf_file, elf_ex->e_phoff,
427 (char *)elf_phdata, size);
428 if (retval != size) {
429 err = (retval < 0) ? retval : -EIO;
430 goto out;
431 }
432
433 /* Success! */
434 err = 0;
435out:
436 if (err) {
437 kfree(elf_phdata);
438 elf_phdata = NULL;
439 }
440 return elf_phdata;
441}
442
443#ifndef CONFIG_ARCH_BINFMT_ELF_STATE
444
445/**
446 * struct arch_elf_state - arch-specific ELF loading state
447 *
448 * This structure is used to preserve architecture specific data during
449 * the loading of an ELF file, throughout the checking of architecture
450 * specific ELF headers & through to the point where the ELF load is
451 * known to be proceeding (ie. SET_PERSONALITY).
452 *
453 * This implementation is a dummy for architectures which require no
454 * specific state.
455 */
456struct arch_elf_state {
457};
458
459#define INIT_ARCH_ELF_STATE {}
460
461/**
462 * arch_elf_pt_proc() - check a PT_LOPROC..PT_HIPROC ELF program header
463 * @ehdr: The main ELF header
464 * @phdr: The program header to check
465 * @elf: The open ELF file
466 * @is_interp: True if the phdr is from the interpreter of the ELF being
467 * loaded, else false.
468 * @state: Architecture-specific state preserved throughout the process
469 * of loading the ELF.
470 *
471 * Inspects the program header phdr to validate its correctness and/or
472 * suitability for the system. Called once per ELF program header in the
473 * range PT_LOPROC to PT_HIPROC, for both the ELF being loaded and its
474 * interpreter.
475 *
476 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
477 * with that return code.
478 */
479static inline int arch_elf_pt_proc(struct elfhdr *ehdr,
480 struct elf_phdr *phdr,
481 struct file *elf, bool is_interp,
482 struct arch_elf_state *state)
483{
484 /* Dummy implementation, always proceed */
485 return 0;
486}
487
488/**
489 * arch_check_elf() - check a PT_LOPROC..PT_HIPROC ELF program header
490 * @ehdr: The main ELF header
491 * @has_interp: True if the ELF has an interpreter, else false.
492 * @state: Architecture-specific state preserved throughout the process
493 * of loading the ELF.
494 *
495 * Provides a final opportunity for architecture code to reject the loading
496 * of the ELF & cause an exec syscall to return an error. This is called after
497 * all program headers to be checked by arch_elf_pt_proc have been.
498 *
499 * Return: Zero to proceed with the ELF load, non-zero to fail the ELF load
500 * with that return code.
501 */
502static inline int arch_check_elf(struct elfhdr *ehdr, bool has_interp,
503 struct arch_elf_state *state)
504{
505 /* Dummy implementation, always proceed */
506 return 0;
507}
508
509#endif /* !CONFIG_ARCH_BINFMT_ELF_STATE */
389 510
390/* This is much more generalized than the library routine read function, 511/* This is much more generalized than the library routine read function,
391 so we keep this separate. Technically the library read function 512 so we keep this separate. Technically the library read function
@@ -394,16 +515,15 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
394 515
395static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex, 516static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
396 struct file *interpreter, unsigned long *interp_map_addr, 517 struct file *interpreter, unsigned long *interp_map_addr,
397 unsigned long no_base) 518 unsigned long no_base, struct elf_phdr *interp_elf_phdata)
398{ 519{
399 struct elf_phdr *elf_phdata;
400 struct elf_phdr *eppnt; 520 struct elf_phdr *eppnt;
401 unsigned long load_addr = 0; 521 unsigned long load_addr = 0;
402 int load_addr_set = 0; 522 int load_addr_set = 0;
403 unsigned long last_bss = 0, elf_bss = 0; 523 unsigned long last_bss = 0, elf_bss = 0;
404 unsigned long error = ~0UL; 524 unsigned long error = ~0UL;
405 unsigned long total_size; 525 unsigned long total_size;
406 int retval, i, size; 526 int i;
407 527
408 /* First of all, some simple consistency checks */ 528 /* First of all, some simple consistency checks */
409 if (interp_elf_ex->e_type != ET_EXEC && 529 if (interp_elf_ex->e_type != ET_EXEC &&
@@ -414,40 +534,14 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
414 if (!interpreter->f_op->mmap) 534 if (!interpreter->f_op->mmap)
415 goto out; 535 goto out;
416 536
417 /* 537 total_size = total_mapping_size(interp_elf_phdata,
418 * If the size of this structure has changed, then punt, since 538 interp_elf_ex->e_phnum);
419 * we will be doing the wrong thing.
420 */
421 if (interp_elf_ex->e_phentsize != sizeof(struct elf_phdr))
422 goto out;
423 if (interp_elf_ex->e_phnum < 1 ||
424 interp_elf_ex->e_phnum > 65536U / sizeof(struct elf_phdr))
425 goto out;
426
427 /* Now read in all of the header information */
428 size = sizeof(struct elf_phdr) * interp_elf_ex->e_phnum;
429 if (size > ELF_MIN_ALIGN)
430 goto out;
431 elf_phdata = kmalloc(size, GFP_KERNEL);
432 if (!elf_phdata)
433 goto out;
434
435 retval = kernel_read(interpreter, interp_elf_ex->e_phoff,
436 (char *)elf_phdata, size);
437 error = -EIO;
438 if (retval != size) {
439 if (retval < 0)
440 error = retval;
441 goto out_close;
442 }
443
444 total_size = total_mapping_size(elf_phdata, interp_elf_ex->e_phnum);
445 if (!total_size) { 539 if (!total_size) {
446 error = -EINVAL; 540 error = -EINVAL;
447 goto out_close; 541 goto out;
448 } 542 }
449 543
450 eppnt = elf_phdata; 544 eppnt = interp_elf_phdata;
451 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { 545 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
452 if (eppnt->p_type == PT_LOAD) { 546 if (eppnt->p_type == PT_LOAD) {
453 int elf_type = MAP_PRIVATE | MAP_DENYWRITE; 547 int elf_type = MAP_PRIVATE | MAP_DENYWRITE;
@@ -474,7 +568,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
474 *interp_map_addr = map_addr; 568 *interp_map_addr = map_addr;
475 error = map_addr; 569 error = map_addr;
476 if (BAD_ADDR(map_addr)) 570 if (BAD_ADDR(map_addr))
477 goto out_close; 571 goto out;
478 572
479 if (!load_addr_set && 573 if (!load_addr_set &&
480 interp_elf_ex->e_type == ET_DYN) { 574 interp_elf_ex->e_type == ET_DYN) {
@@ -493,7 +587,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
493 eppnt->p_memsz > TASK_SIZE || 587 eppnt->p_memsz > TASK_SIZE ||
494 TASK_SIZE - eppnt->p_memsz < k) { 588 TASK_SIZE - eppnt->p_memsz < k) {
495 error = -ENOMEM; 589 error = -ENOMEM;
496 goto out_close; 590 goto out;
497 } 591 }
498 592
499 /* 593 /*
@@ -523,7 +617,7 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
523 */ 617 */
524 if (padzero(elf_bss)) { 618 if (padzero(elf_bss)) {
525 error = -EFAULT; 619 error = -EFAULT;
526 goto out_close; 620 goto out;
527 } 621 }
528 622
529 /* What we have mapped so far */ 623 /* What we have mapped so far */
@@ -532,13 +626,10 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
532 /* Map the last of the bss segment */ 626 /* Map the last of the bss segment */
533 error = vm_brk(elf_bss, last_bss - elf_bss); 627 error = vm_brk(elf_bss, last_bss - elf_bss);
534 if (BAD_ADDR(error)) 628 if (BAD_ADDR(error))
535 goto out_close; 629 goto out;
536 } 630 }
537 631
538 error = load_addr; 632 error = load_addr;
539
540out_close:
541 kfree(elf_phdata);
542out: 633out:
543 return error; 634 return error;
544} 635}
@@ -575,10 +666,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
575 int load_addr_set = 0; 666 int load_addr_set = 0;
576 char * elf_interpreter = NULL; 667 char * elf_interpreter = NULL;
577 unsigned long error; 668 unsigned long error;
578 struct elf_phdr *elf_ppnt, *elf_phdata; 669 struct elf_phdr *elf_ppnt, *elf_phdata, *interp_elf_phdata = NULL;
579 unsigned long elf_bss, elf_brk; 670 unsigned long elf_bss, elf_brk;
580 int retval, i; 671 int retval, i;
581 unsigned int size;
582 unsigned long elf_entry; 672 unsigned long elf_entry;
583 unsigned long interp_load_addr = 0; 673 unsigned long interp_load_addr = 0;
584 unsigned long start_code, end_code, start_data, end_data; 674 unsigned long start_code, end_code, start_data, end_data;
@@ -589,6 +679,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
589 struct elfhdr elf_ex; 679 struct elfhdr elf_ex;
590 struct elfhdr interp_elf_ex; 680 struct elfhdr interp_elf_ex;
591 } *loc; 681 } *loc;
682 struct arch_elf_state arch_state = INIT_ARCH_ELF_STATE;
592 683
593 loc = kmalloc(sizeof(*loc), GFP_KERNEL); 684 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
594 if (!loc) { 685 if (!loc) {
@@ -611,26 +702,10 @@ static int load_elf_binary(struct linux_binprm *bprm)
611 if (!bprm->file->f_op->mmap) 702 if (!bprm->file->f_op->mmap)
612 goto out; 703 goto out;
613 704
614 /* Now read in all of the header information */ 705 elf_phdata = load_elf_phdrs(&loc->elf_ex, bprm->file);
615 if (loc->elf_ex.e_phentsize != sizeof(struct elf_phdr))
616 goto out;
617 if (loc->elf_ex.e_phnum < 1 ||
618 loc->elf_ex.e_phnum > 65536U / sizeof(struct elf_phdr))
619 goto out;
620 size = loc->elf_ex.e_phnum * sizeof(struct elf_phdr);
621 retval = -ENOMEM;
622 elf_phdata = kmalloc(size, GFP_KERNEL);
623 if (!elf_phdata) 706 if (!elf_phdata)
624 goto out; 707 goto out;
625 708
626 retval = kernel_read(bprm->file, loc->elf_ex.e_phoff,
627 (char *)elf_phdata, size);
628 if (retval != size) {
629 if (retval >= 0)
630 retval = -EIO;
631 goto out_free_ph;
632 }
633
634 elf_ppnt = elf_phdata; 709 elf_ppnt = elf_phdata;
635 elf_bss = 0; 710 elf_bss = 0;
636 elf_brk = 0; 711 elf_brk = 0;
@@ -699,12 +774,21 @@ static int load_elf_binary(struct linux_binprm *bprm)
699 774
700 elf_ppnt = elf_phdata; 775 elf_ppnt = elf_phdata;
701 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++) 776 for (i = 0; i < loc->elf_ex.e_phnum; i++, elf_ppnt++)
702 if (elf_ppnt->p_type == PT_GNU_STACK) { 777 switch (elf_ppnt->p_type) {
778 case PT_GNU_STACK:
703 if (elf_ppnt->p_flags & PF_X) 779 if (elf_ppnt->p_flags & PF_X)
704 executable_stack = EXSTACK_ENABLE_X; 780 executable_stack = EXSTACK_ENABLE_X;
705 else 781 else
706 executable_stack = EXSTACK_DISABLE_X; 782 executable_stack = EXSTACK_DISABLE_X;
707 break; 783 break;
784
785 case PT_LOPROC ... PT_HIPROC:
786 retval = arch_elf_pt_proc(&loc->elf_ex, elf_ppnt,
787 bprm->file, false,
788 &arch_state);
789 if (retval)
790 goto out_free_dentry;
791 break;
708 } 792 }
709 793
710 /* Some simple consistency checks for the interpreter */ 794 /* Some simple consistency checks for the interpreter */
@@ -716,8 +800,36 @@ static int load_elf_binary(struct linux_binprm *bprm)
716 /* Verify the interpreter has a valid arch */ 800 /* Verify the interpreter has a valid arch */
717 if (!elf_check_arch(&loc->interp_elf_ex)) 801 if (!elf_check_arch(&loc->interp_elf_ex))
718 goto out_free_dentry; 802 goto out_free_dentry;
803
804 /* Load the interpreter program headers */
805 interp_elf_phdata = load_elf_phdrs(&loc->interp_elf_ex,
806 interpreter);
807 if (!interp_elf_phdata)
808 goto out_free_dentry;
809
810 /* Pass PT_LOPROC..PT_HIPROC headers to arch code */
811 elf_ppnt = interp_elf_phdata;
812 for (i = 0; i < loc->interp_elf_ex.e_phnum; i++, elf_ppnt++)
813 switch (elf_ppnt->p_type) {
814 case PT_LOPROC ... PT_HIPROC:
815 retval = arch_elf_pt_proc(&loc->interp_elf_ex,
816 elf_ppnt, interpreter,
817 true, &arch_state);
818 if (retval)
819 goto out_free_dentry;
820 break;
821 }
719 } 822 }
720 823
824 /*
825 * Allow arch code to reject the ELF at this point, whilst it's
826 * still possible to return an error to the code that invoked
827 * the exec syscall.
828 */
829 retval = arch_check_elf(&loc->elf_ex, !!interpreter, &arch_state);
830 if (retval)
831 goto out_free_dentry;
832
721 /* Flush all traces of the currently running executable */ 833 /* Flush all traces of the currently running executable */
722 retval = flush_old_exec(bprm); 834 retval = flush_old_exec(bprm);
723 if (retval) 835 if (retval)
@@ -725,7 +837,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
725 837
726 /* Do this immediately, since STACK_TOP as used in setup_arg_pages 838 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
727 may depend on the personality. */ 839 may depend on the personality. */
728 SET_PERSONALITY(loc->elf_ex); 840 SET_PERSONALITY2(loc->elf_ex, &arch_state);
729 if (elf_read_implies_exec(loc->elf_ex, executable_stack)) 841 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
730 current->personality |= READ_IMPLIES_EXEC; 842 current->personality |= READ_IMPLIES_EXEC;
731 843
@@ -890,7 +1002,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
890 elf_entry = load_elf_interp(&loc->interp_elf_ex, 1002 elf_entry = load_elf_interp(&loc->interp_elf_ex,
891 interpreter, 1003 interpreter,
892 &interp_map_addr, 1004 &interp_map_addr,
893 load_bias); 1005 load_bias, interp_elf_phdata);
894 if (!IS_ERR((void *)elf_entry)) { 1006 if (!IS_ERR((void *)elf_entry)) {
895 /* 1007 /*
896 * load_elf_interp() returns relocation 1008 * load_elf_interp() returns relocation
@@ -917,6 +1029,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
917 } 1029 }
918 } 1030 }
919 1031
1032 kfree(interp_elf_phdata);
920 kfree(elf_phdata); 1033 kfree(elf_phdata);
921 1034
922 set_binfmt(&elf_format); 1035 set_binfmt(&elf_format);
@@ -981,6 +1094,7 @@ out_ret:
981 1094
982 /* error cleanup */ 1095 /* error cleanup */
983out_free_dentry: 1096out_free_dentry:
1097 kfree(interp_elf_phdata);
984 allow_write_access(interpreter); 1098 allow_write_access(interpreter);
985 if (interpreter) 1099 if (interpreter)
986 fput(interpreter); 1100 fput(interpreter);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index cb7f3fe9c9f6..d897ef803b3b 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -94,6 +94,7 @@
94#include <linux/mutex.h> 94#include <linux/mutex.h>
95#include <linux/genhd.h> 95#include <linux/genhd.h>
96#include <linux/blkdev.h> 96#include <linux/blkdev.h>
97#include <linux/vmalloc.h>
97#include "ctree.h" 98#include "ctree.h"
98#include "disk-io.h" 99#include "disk-io.h"
99#include "hash.h" 100#include "hash.h"
@@ -326,9 +327,6 @@ static int btrfsic_handle_extent_data(struct btrfsic_state *state,
326static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len, 327static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
327 struct btrfsic_block_data_ctx *block_ctx_out, 328 struct btrfsic_block_data_ctx *block_ctx_out,
328 int mirror_num); 329 int mirror_num);
329static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
330 u32 len, struct block_device *bdev,
331 struct btrfsic_block_data_ctx *block_ctx_out);
332static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx); 330static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
333static int btrfsic_read_block(struct btrfsic_state *state, 331static int btrfsic_read_block(struct btrfsic_state *state,
334 struct btrfsic_block_data_ctx *block_ctx); 332 struct btrfsic_block_data_ctx *block_ctx);
@@ -1326,24 +1324,25 @@ static int btrfsic_create_link_to_next_block(
1326 l = NULL; 1324 l = NULL;
1327 next_block->generation = BTRFSIC_GENERATION_UNKNOWN; 1325 next_block->generation = BTRFSIC_GENERATION_UNKNOWN;
1328 } else { 1326 } else {
1329 if (next_block->logical_bytenr != next_bytenr && 1327 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) {
1330 !(!next_block->is_metadata && 1328 if (next_block->logical_bytenr != next_bytenr &&
1331 0 == next_block->logical_bytenr)) { 1329 !(!next_block->is_metadata &&
1332 printk(KERN_INFO 1330 0 == next_block->logical_bytenr))
1333 "Referenced block @%llu (%s/%llu/%d)" 1331 printk(KERN_INFO
1334 " found in hash table, %c," 1332 "Referenced block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
1335 " bytenr mismatch (!= stored %llu).\n", 1333 next_bytenr, next_block_ctx->dev->name,
1336 next_bytenr, next_block_ctx->dev->name, 1334 next_block_ctx->dev_bytenr, *mirror_nump,
1337 next_block_ctx->dev_bytenr, *mirror_nump, 1335 btrfsic_get_block_type(state,
1338 btrfsic_get_block_type(state, next_block), 1336 next_block),
1339 next_block->logical_bytenr); 1337 next_block->logical_bytenr);
1340 } else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1338 else
1341 printk(KERN_INFO 1339 printk(KERN_INFO
1342 "Referenced block @%llu (%s/%llu/%d)" 1340 "Referenced block @%llu (%s/%llu/%d) found in hash table, %c.\n",
1343 " found in hash table, %c.\n", 1341 next_bytenr, next_block_ctx->dev->name,
1344 next_bytenr, next_block_ctx->dev->name, 1342 next_block_ctx->dev_bytenr, *mirror_nump,
1345 next_block_ctx->dev_bytenr, *mirror_nump, 1343 btrfsic_get_block_type(state,
1346 btrfsic_get_block_type(state, next_block)); 1344 next_block));
1345 }
1347 next_block->logical_bytenr = next_bytenr; 1346 next_block->logical_bytenr = next_bytenr;
1348 1347
1349 next_block->mirror_num = *mirror_nump; 1348 next_block->mirror_num = *mirror_nump;
@@ -1529,7 +1528,9 @@ static int btrfsic_handle_extent_data(
1529 return -1; 1528 return -1;
1530 } 1529 }
1531 if (!block_was_created) { 1530 if (!block_was_created) {
1532 if (next_block->logical_bytenr != next_bytenr && 1531 if ((state->print_mask &
1532 BTRFSIC_PRINT_MASK_VERBOSE) &&
1533 next_block->logical_bytenr != next_bytenr &&
1533 !(!next_block->is_metadata && 1534 !(!next_block->is_metadata &&
1534 0 == next_block->logical_bytenr)) { 1535 0 == next_block->logical_bytenr)) {
1535 printk(KERN_INFO 1536 printk(KERN_INFO
@@ -1607,25 +1608,6 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
1607 return ret; 1608 return ret;
1608} 1609}
1609 1610
1610static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
1611 u32 len, struct block_device *bdev,
1612 struct btrfsic_block_data_ctx *block_ctx_out)
1613{
1614 block_ctx_out->dev = btrfsic_dev_state_lookup(bdev);
1615 block_ctx_out->dev_bytenr = bytenr;
1616 block_ctx_out->start = bytenr;
1617 block_ctx_out->len = len;
1618 block_ctx_out->datav = NULL;
1619 block_ctx_out->pagev = NULL;
1620 block_ctx_out->mem_to_free = NULL;
1621 if (NULL != block_ctx_out->dev) {
1622 return 0;
1623 } else {
1624 printk(KERN_INFO "btrfsic: error, cannot lookup dev (#2)!\n");
1625 return -ENXIO;
1626 }
1627}
1628
1629static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) 1611static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
1630{ 1612{
1631 if (block_ctx->mem_to_free) { 1613 if (block_ctx->mem_to_free) {
@@ -1901,25 +1883,26 @@ again:
1901 dev_state, 1883 dev_state,
1902 dev_bytenr); 1884 dev_bytenr);
1903 } 1885 }
1904 if (block->logical_bytenr != bytenr && 1886 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) {
1905 !(!block->is_metadata && 1887 if (block->logical_bytenr != bytenr &&
1906 block->logical_bytenr == 0)) 1888 !(!block->is_metadata &&
1907 printk(KERN_INFO 1889 block->logical_bytenr == 0))
1908 "Written block @%llu (%s/%llu/%d)" 1890 printk(KERN_INFO
1909 " found in hash table, %c," 1891 "Written block @%llu (%s/%llu/%d) found in hash table, %c, bytenr mismatch (!= stored %llu).\n",
1910 " bytenr mismatch" 1892 bytenr, dev_state->name,
1911 " (!= stored %llu).\n", 1893 dev_bytenr,
1912 bytenr, dev_state->name, dev_bytenr, 1894 block->mirror_num,
1913 block->mirror_num, 1895 btrfsic_get_block_type(state,
1914 btrfsic_get_block_type(state, block), 1896 block),
1915 block->logical_bytenr); 1897 block->logical_bytenr);
1916 else if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1898 else
1917 printk(KERN_INFO 1899 printk(KERN_INFO
1918 "Written block @%llu (%s/%llu/%d)" 1900 "Written block @%llu (%s/%llu/%d) found in hash table, %c.\n",
1919 " found in hash table, %c.\n", 1901 bytenr, dev_state->name,
1920 bytenr, dev_state->name, dev_bytenr, 1902 dev_bytenr, block->mirror_num,
1921 block->mirror_num, 1903 btrfsic_get_block_type(state,
1922 btrfsic_get_block_type(state, block)); 1904 block));
1905 }
1923 block->logical_bytenr = bytenr; 1906 block->logical_bytenr = bytenr;
1924 } else { 1907 } else {
1925 if (num_pages * PAGE_CACHE_SIZE < 1908 if (num_pages * PAGE_CACHE_SIZE <
@@ -2002,24 +1985,13 @@ again:
2002 } 1985 }
2003 } 1986 }
2004 1987
2005 if (block->is_superblock)
2006 ret = btrfsic_map_superblock(state, bytenr,
2007 processed_len,
2008 bdev, &block_ctx);
2009 else
2010 ret = btrfsic_map_block(state, bytenr, processed_len,
2011 &block_ctx, 0);
2012 if (ret) {
2013 printk(KERN_INFO
2014 "btrfsic: btrfsic_map_block(root @%llu)"
2015 " failed!\n", bytenr);
2016 goto continue_loop;
2017 }
2018 block_ctx.datav = mapped_datav;
2019 /* the following is required in case of writes to mirrors,
2020 * use the same that was used for the lookup */
2021 block_ctx.dev = dev_state; 1988 block_ctx.dev = dev_state;
2022 block_ctx.dev_bytenr = dev_bytenr; 1989 block_ctx.dev_bytenr = dev_bytenr;
1990 block_ctx.start = bytenr;
1991 block_ctx.len = processed_len;
1992 block_ctx.pagev = NULL;
1993 block_ctx.mem_to_free = NULL;
1994 block_ctx.datav = mapped_datav;
2023 1995
2024 if (is_metadata || state->include_extent_data) { 1996 if (is_metadata || state->include_extent_data) {
2025 block->never_written = 0; 1997 block->never_written = 0;
@@ -2133,10 +2105,6 @@ again:
2133 /* this is getting ugly for the 2105 /* this is getting ugly for the
2134 * include_extent_data case... */ 2106 * include_extent_data case... */
2135 bytenr = 0; /* unknown */ 2107 bytenr = 0; /* unknown */
2136 block_ctx.start = bytenr;
2137 block_ctx.len = processed_len;
2138 block_ctx.mem_to_free = NULL;
2139 block_ctx.pagev = NULL;
2140 } else { 2108 } else {
2141 processed_len = state->metablock_size; 2109 processed_len = state->metablock_size;
2142 bytenr = btrfs_stack_header_bytenr( 2110 bytenr = btrfs_stack_header_bytenr(
@@ -2149,22 +2117,15 @@ again:
2149 "Written block @%llu (%s/%llu/?)" 2117 "Written block @%llu (%s/%llu/?)"
2150 " !found in hash table, M.\n", 2118 " !found in hash table, M.\n",
2151 bytenr, dev_state->name, dev_bytenr); 2119 bytenr, dev_state->name, dev_bytenr);
2152
2153 ret = btrfsic_map_block(state, bytenr, processed_len,
2154 &block_ctx, 0);
2155 if (ret) {
2156 printk(KERN_INFO
2157 "btrfsic: btrfsic_map_block(root @%llu)"
2158 " failed!\n",
2159 dev_bytenr);
2160 goto continue_loop;
2161 }
2162 } 2120 }
2163 block_ctx.datav = mapped_datav; 2121
2164 /* the following is required in case of writes to mirrors,
2165 * use the same that was used for the lookup */
2166 block_ctx.dev = dev_state; 2122 block_ctx.dev = dev_state;
2167 block_ctx.dev_bytenr = dev_bytenr; 2123 block_ctx.dev_bytenr = dev_bytenr;
2124 block_ctx.start = bytenr;
2125 block_ctx.len = processed_len;
2126 block_ctx.pagev = NULL;
2127 block_ctx.mem_to_free = NULL;
2128 block_ctx.datav = mapped_datav;
2168 2129
2169 block = btrfsic_block_alloc(); 2130 block = btrfsic_block_alloc();
2170 if (NULL == block) { 2131 if (NULL == block) {
@@ -3130,10 +3091,13 @@ int btrfsic_mount(struct btrfs_root *root,
3130 root->sectorsize, PAGE_CACHE_SIZE); 3091 root->sectorsize, PAGE_CACHE_SIZE);
3131 return -1; 3092 return -1;
3132 } 3093 }
3133 state = kzalloc(sizeof(*state), GFP_NOFS); 3094 state = kzalloc(sizeof(*state), GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
3134 if (NULL == state) { 3095 if (!state) {
3135 printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n"); 3096 state = vzalloc(sizeof(*state));
3136 return -1; 3097 if (!state) {
3098 printk(KERN_INFO "btrfs check-integrity: vzalloc() failed!\n");
3099 return -1;
3100 }
3137 } 3101 }
3138 3102
3139 if (!btrfsic_is_initialized) { 3103 if (!btrfsic_is_initialized) {
@@ -3277,5 +3241,8 @@ void btrfsic_unmount(struct btrfs_root *root,
3277 3241
3278 mutex_unlock(&btrfsic_mutex); 3242 mutex_unlock(&btrfsic_mutex);
3279 3243
3280 kfree(state); 3244 if (is_vmalloc_addr(state))
3245 vfree(state);
3246 else
3247 kfree(state);
3281} 3248}
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index dcd9be32ac57..e9df8862012c 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -224,16 +224,19 @@ out:
224 * Clear the writeback bits on all of the file 224 * Clear the writeback bits on all of the file
225 * pages for a compressed write 225 * pages for a compressed write
226 */ 226 */
227static noinline void end_compressed_writeback(struct inode *inode, u64 start, 227static noinline void end_compressed_writeback(struct inode *inode,
228 unsigned long ram_size) 228 const struct compressed_bio *cb)
229{ 229{
230 unsigned long index = start >> PAGE_CACHE_SHIFT; 230 unsigned long index = cb->start >> PAGE_CACHE_SHIFT;
231 unsigned long end_index = (start + ram_size - 1) >> PAGE_CACHE_SHIFT; 231 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_CACHE_SHIFT;
232 struct page *pages[16]; 232 struct page *pages[16];
233 unsigned long nr_pages = end_index - index + 1; 233 unsigned long nr_pages = end_index - index + 1;
234 int i; 234 int i;
235 int ret; 235 int ret;
236 236
237 if (cb->errors)
238 mapping_set_error(inode->i_mapping, -EIO);
239
237 while (nr_pages > 0) { 240 while (nr_pages > 0) {
238 ret = find_get_pages_contig(inode->i_mapping, index, 241 ret = find_get_pages_contig(inode->i_mapping, index,
239 min_t(unsigned long, 242 min_t(unsigned long,
@@ -244,6 +247,8 @@ static noinline void end_compressed_writeback(struct inode *inode, u64 start,
244 continue; 247 continue;
245 } 248 }
246 for (i = 0; i < ret; i++) { 249 for (i = 0; i < ret; i++) {
250 if (cb->errors)
251 SetPageError(pages[i]);
247 end_page_writeback(pages[i]); 252 end_page_writeback(pages[i]);
248 page_cache_release(pages[i]); 253 page_cache_release(pages[i]);
249 } 254 }
@@ -287,10 +292,11 @@ static void end_compressed_bio_write(struct bio *bio, int err)
287 tree->ops->writepage_end_io_hook(cb->compressed_pages[0], 292 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
288 cb->start, 293 cb->start,
289 cb->start + cb->len - 1, 294 cb->start + cb->len - 1,
290 NULL, 1); 295 NULL,
296 err ? 0 : 1);
291 cb->compressed_pages[0]->mapping = NULL; 297 cb->compressed_pages[0]->mapping = NULL;
292 298
293 end_compressed_writeback(inode, cb->start, cb->len); 299 end_compressed_writeback(inode, cb);
294 /* note, our inode could be gone now */ 300 /* note, our inode could be gone now */
295 301
296 /* 302 /*
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 150822ee0a0b..14a72ed14ef7 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -2929,7 +2929,7 @@ done:
2929 */ 2929 */
2930 if (!p->leave_spinning) 2930 if (!p->leave_spinning)
2931 btrfs_set_path_blocking(p); 2931 btrfs_set_path_blocking(p);
2932 if (ret < 0) 2932 if (ret < 0 && !p->skip_release_on_error)
2933 btrfs_release_path(p); 2933 btrfs_release_path(p);
2934 return ret; 2934 return ret;
2935} 2935}
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index fe69edda11fb..e6fbbd74b716 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -607,6 +607,7 @@ struct btrfs_path {
607 unsigned int leave_spinning:1; 607 unsigned int leave_spinning:1;
608 unsigned int search_commit_root:1; 608 unsigned int search_commit_root:1;
609 unsigned int need_commit_sem:1; 609 unsigned int need_commit_sem:1;
610 unsigned int skip_release_on_error:1;
610}; 611};
611 612
612/* 613/*
@@ -1170,6 +1171,7 @@ struct btrfs_space_info {
1170 struct percpu_counter total_bytes_pinned; 1171 struct percpu_counter total_bytes_pinned;
1171 1172
1172 struct list_head list; 1173 struct list_head list;
1174 struct list_head ro_bgs;
1173 1175
1174 struct rw_semaphore groups_sem; 1176 struct rw_semaphore groups_sem;
1175 /* for block groups in our same type */ 1177 /* for block groups in our same type */
@@ -1276,6 +1278,8 @@ struct btrfs_block_group_cache {
1276 unsigned int ro:1; 1278 unsigned int ro:1;
1277 unsigned int dirty:1; 1279 unsigned int dirty:1;
1278 unsigned int iref:1; 1280 unsigned int iref:1;
1281 unsigned int has_caching_ctl:1;
1282 unsigned int removed:1;
1279 1283
1280 int disk_cache_state; 1284 int disk_cache_state;
1281 1285
@@ -1305,6 +1309,11 @@ struct btrfs_block_group_cache {
1305 1309
1306 /* For delayed block group creation or deletion of empty block groups */ 1310 /* For delayed block group creation or deletion of empty block groups */
1307 struct list_head bg_list; 1311 struct list_head bg_list;
1312
1313 /* For read-only block groups */
1314 struct list_head ro_list;
1315
1316 atomic_t trimming;
1308}; 1317};
1309 1318
1310/* delayed seq elem */ 1319/* delayed seq elem */
@@ -1402,6 +1411,11 @@ struct btrfs_fs_info {
1402 */ 1411 */
1403 u64 last_trans_log_full_commit; 1412 u64 last_trans_log_full_commit;
1404 unsigned long mount_opt; 1413 unsigned long mount_opt;
1414 /*
1415 * Track requests for actions that need to be done during transaction
1416 * commit (like for some mount options).
1417 */
1418 unsigned long pending_changes;
1405 unsigned long compress_type:4; 1419 unsigned long compress_type:4;
1406 int commit_interval; 1420 int commit_interval;
1407 /* 1421 /*
@@ -1729,6 +1743,12 @@ struct btrfs_fs_info {
1729 1743
1730 /* For btrfs to record security options */ 1744 /* For btrfs to record security options */
1731 struct security_mnt_opts security_opts; 1745 struct security_mnt_opts security_opts;
1746
1747 /*
1748 * Chunks that can't be freed yet (under a trim/discard operation)
1749 * and will be latter freed. Protected by fs_info->chunk_mutex.
1750 */
1751 struct list_head pinned_chunks;
1732}; 1752};
1733 1753
1734struct btrfs_subvolume_writers { 1754struct btrfs_subvolume_writers {
@@ -2093,7 +2113,6 @@ struct btrfs_ioctl_defrag_range_args {
2093#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21) 2113#define BTRFS_MOUNT_CHECK_INTEGRITY_INCLUDING_EXTENT_DATA (1 << 21)
2094#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22) 2114#define BTRFS_MOUNT_PANIC_ON_FATAL_ERROR (1 << 22)
2095#define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23) 2115#define BTRFS_MOUNT_RESCAN_UUID_TREE (1 << 23)
2096#define BTRFS_MOUNT_CHANGE_INODE_CACHE (1 << 24)
2097 2116
2098#define BTRFS_DEFAULT_COMMIT_INTERVAL (30) 2117#define BTRFS_DEFAULT_COMMIT_INTERVAL (30)
2099#define BTRFS_DEFAULT_MAX_INLINE (8192) 2118#define BTRFS_DEFAULT_MAX_INLINE (8192)
@@ -2103,6 +2122,7 @@ struct btrfs_ioctl_defrag_range_args {
2103#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt) 2122#define btrfs_raw_test_opt(o, opt) ((o) & BTRFS_MOUNT_##opt)
2104#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \ 2123#define btrfs_test_opt(root, opt) ((root)->fs_info->mount_opt & \
2105 BTRFS_MOUNT_##opt) 2124 BTRFS_MOUNT_##opt)
2125
2106#define btrfs_set_and_info(root, opt, fmt, args...) \ 2126#define btrfs_set_and_info(root, opt, fmt, args...) \
2107{ \ 2127{ \
2108 if (!btrfs_test_opt(root, opt)) \ 2128 if (!btrfs_test_opt(root, opt)) \
@@ -2118,6 +2138,49 @@ struct btrfs_ioctl_defrag_range_args {
2118} 2138}
2119 2139
2120/* 2140/*
2141 * Requests for changes that need to be done during transaction commit.
2142 *
2143 * Internal mount options that are used for special handling of the real
2144 * mount options (eg. cannot be set during remount and have to be set during
2145 * transaction commit)
2146 */
2147
2148#define BTRFS_PENDING_SET_INODE_MAP_CACHE (0)
2149#define BTRFS_PENDING_CLEAR_INODE_MAP_CACHE (1)
2150#define BTRFS_PENDING_COMMIT (2)
2151
2152#define btrfs_test_pending(info, opt) \
2153 test_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
2154#define btrfs_set_pending(info, opt) \
2155 set_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
2156#define btrfs_clear_pending(info, opt) \
2157 clear_bit(BTRFS_PENDING_##opt, &(info)->pending_changes)
2158
2159/*
2160 * Helpers for setting pending mount option changes.
2161 *
2162 * Expects corresponding macros
2163 * BTRFS_PENDING_SET_ and CLEAR_ + short mount option name
2164 */
2165#define btrfs_set_pending_and_info(info, opt, fmt, args...) \
2166do { \
2167 if (!btrfs_raw_test_opt((info)->mount_opt, opt)) { \
2168 btrfs_info((info), fmt, ##args); \
2169 btrfs_set_pending((info), SET_##opt); \
2170 btrfs_clear_pending((info), CLEAR_##opt); \
2171 } \
2172} while(0)
2173
2174#define btrfs_clear_pending_and_info(info, opt, fmt, args...) \
2175do { \
2176 if (btrfs_raw_test_opt((info)->mount_opt, opt)) { \
2177 btrfs_info((info), fmt, ##args); \
2178 btrfs_set_pending((info), CLEAR_##opt); \
2179 btrfs_clear_pending((info), SET_##opt); \
2180 } \
2181} while(0)
2182
2183/*
2121 * Inode flags 2184 * Inode flags
2122 */ 2185 */
2123#define BTRFS_INODE_NODATASUM (1 << 0) 2186#define BTRFS_INODE_NODATASUM (1 << 0)
@@ -3351,7 +3414,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
3351 u64 type, u64 chunk_objectid, u64 chunk_offset, 3414 u64 type, u64 chunk_objectid, u64 chunk_offset,
3352 u64 size); 3415 u64 size);
3353int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 3416int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
3354 struct btrfs_root *root, u64 group_start); 3417 struct btrfs_root *root, u64 group_start,
3418 struct extent_map *em);
3355void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); 3419void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
3356void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans, 3420void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
3357 struct btrfs_root *root); 3421 struct btrfs_root *root);
@@ -3427,8 +3491,8 @@ int btrfs_init_space_info(struct btrfs_fs_info *fs_info);
3427int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans, 3491int btrfs_delayed_refs_qgroup_accounting(struct btrfs_trans_handle *trans,
3428 struct btrfs_fs_info *fs_info); 3492 struct btrfs_fs_info *fs_info);
3429int __get_raid_index(u64 flags); 3493int __get_raid_index(u64 flags);
3430int btrfs_start_nocow_write(struct btrfs_root *root); 3494int btrfs_start_write_no_snapshoting(struct btrfs_root *root);
3431void btrfs_end_nocow_write(struct btrfs_root *root); 3495void btrfs_end_write_no_snapshoting(struct btrfs_root *root);
3432/* ctree.c */ 3496/* ctree.c */
3433int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 3497int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
3434 int level, int *slot); 3498 int level, int *slot);
@@ -3686,6 +3750,10 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
3686int verify_dir_item(struct btrfs_root *root, 3750int verify_dir_item(struct btrfs_root *root,
3687 struct extent_buffer *leaf, 3751 struct extent_buffer *leaf,
3688 struct btrfs_dir_item *dir_item); 3752 struct btrfs_dir_item *dir_item);
3753struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
3754 struct btrfs_path *path,
3755 const char *name,
3756 int name_len);
3689 3757
3690/* orphan.c */ 3758/* orphan.c */
3691int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans, 3759int btrfs_insert_orphan_item(struct btrfs_trans_handle *trans,
@@ -3857,6 +3925,7 @@ int btrfs_prealloc_file_range_trans(struct inode *inode,
3857 struct btrfs_trans_handle *trans, int mode, 3925 struct btrfs_trans_handle *trans, int mode,
3858 u64 start, u64 num_bytes, u64 min_size, 3926 u64 start, u64 num_bytes, u64 min_size,
3859 loff_t actual_len, u64 *alloc_hint); 3927 loff_t actual_len, u64 *alloc_hint);
3928int btrfs_inode_check_errors(struct inode *inode);
3860extern const struct dentry_operations btrfs_dentry_operations; 3929extern const struct dentry_operations btrfs_dentry_operations;
3861 3930
3862/* ioctl.c */ 3931/* ioctl.c */
@@ -3901,6 +3970,7 @@ int btrfs_dirty_pages(struct btrfs_root *root, struct inode *inode,
3901 struct page **pages, size_t num_pages, 3970 struct page **pages, size_t num_pages,
3902 loff_t pos, size_t write_bytes, 3971 loff_t pos, size_t write_bytes,
3903 struct extent_state **cached); 3972 struct extent_state **cached);
3973int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end);
3904 3974
3905/* tree-defrag.c */ 3975/* tree-defrag.c */
3906int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, 3976int btrfs_defrag_leaves(struct btrfs_trans_handle *trans,
@@ -4097,7 +4167,12 @@ int btrfs_scrub_progress(struct btrfs_root *root, u64 devid,
4097/* dev-replace.c */ 4167/* dev-replace.c */
4098void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info); 4168void btrfs_bio_counter_inc_blocked(struct btrfs_fs_info *fs_info);
4099void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info); 4169void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info);
4100void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info); 4170void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount);
4171
4172static inline void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info)
4173{
4174 btrfs_bio_counter_sub(fs_info, 1);
4175}
4101 4176
4102/* reada.c */ 4177/* reada.c */
4103struct reada_control { 4178struct reada_control {
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 6f662b34ba0e..ca6a3a3b6b6c 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -316,11 +316,6 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
316 struct btrfs_device *tgt_device = NULL; 316 struct btrfs_device *tgt_device = NULL;
317 struct btrfs_device *src_device = NULL; 317 struct btrfs_device *src_device = NULL;
318 318
319 if (btrfs_fs_incompat(fs_info, RAID56)) {
320 btrfs_warn(fs_info, "dev_replace cannot yet handle RAID5/RAID6");
321 return -EOPNOTSUPP;
322 }
323
324 switch (args->start.cont_reading_from_srcdev_mode) { 319 switch (args->start.cont_reading_from_srcdev_mode) {
325 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS: 320 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_ALWAYS:
326 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID: 321 case BTRFS_IOCTL_DEV_REPLACE_CONT_READING_FROM_SRCDEV_MODE_AVOID:
@@ -422,9 +417,15 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
422 &dev_replace->scrub_progress, 0, 1); 417 &dev_replace->scrub_progress, 0, 1);
423 418
424 ret = btrfs_dev_replace_finishing(root->fs_info, ret); 419 ret = btrfs_dev_replace_finishing(root->fs_info, ret);
425 WARN_ON(ret); 420 /* don't warn if EINPROGRESS, someone else might be running scrub */
421 if (ret == -EINPROGRESS) {
422 args->result = BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS;
423 ret = 0;
424 } else {
425 WARN_ON(ret);
426 }
426 427
427 return 0; 428 return ret;
428 429
429leave: 430leave:
430 dev_replace->srcdev = NULL; 431 dev_replace->srcdev = NULL;
@@ -542,7 +543,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
542 btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device); 543 btrfs_destroy_dev_replace_tgtdev(fs_info, tgt_device);
543 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); 544 mutex_unlock(&dev_replace->lock_finishing_cancel_unmount);
544 545
545 return 0; 546 return scrub_ret;
546 } 547 }
547 548
548 printk_in_rcu(KERN_INFO 549 printk_in_rcu(KERN_INFO
@@ -571,15 +572,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
571 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); 572 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
572 fs_info->fs_devices->rw_devices++; 573 fs_info->fs_devices->rw_devices++;
573 574
574 /* replace the sysfs entry */
575 btrfs_kobj_rm_device(fs_info, src_device);
576 btrfs_kobj_add_device(fs_info, tgt_device);
577
578 btrfs_dev_replace_unlock(dev_replace); 575 btrfs_dev_replace_unlock(dev_replace);
579 576
580 btrfs_rm_dev_replace_blocked(fs_info); 577 btrfs_rm_dev_replace_blocked(fs_info);
581 578
582 btrfs_rm_dev_replace_srcdev(fs_info, src_device); 579 btrfs_rm_dev_replace_remove_srcdev(fs_info, src_device);
583 580
584 btrfs_rm_dev_replace_unblocked(fs_info); 581 btrfs_rm_dev_replace_unblocked(fs_info);
585 582
@@ -594,6 +591,11 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
594 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 591 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
595 mutex_unlock(&uuid_mutex); 592 mutex_unlock(&uuid_mutex);
596 593
594 /* replace the sysfs entry */
595 btrfs_kobj_rm_device(fs_info, src_device);
596 btrfs_kobj_add_device(fs_info, tgt_device);
597 btrfs_rm_dev_replace_free_srcdev(fs_info, src_device);
598
597 /* write back the superblocks */ 599 /* write back the superblocks */
598 trans = btrfs_start_transaction(root, 0); 600 trans = btrfs_start_transaction(root, 0);
599 if (!IS_ERR(trans)) 601 if (!IS_ERR(trans))
@@ -920,9 +922,9 @@ void btrfs_bio_counter_inc_noblocked(struct btrfs_fs_info *fs_info)
920 percpu_counter_inc(&fs_info->bio_counter); 922 percpu_counter_inc(&fs_info->bio_counter);
921} 923}
922 924
923void btrfs_bio_counter_dec(struct btrfs_fs_info *fs_info) 925void btrfs_bio_counter_sub(struct btrfs_fs_info *fs_info, s64 amount)
924{ 926{
925 percpu_counter_dec(&fs_info->bio_counter); 927 percpu_counter_sub(&fs_info->bio_counter, amount);
926 928
927 if (waitqueue_active(&fs_info->replace_wait)) 929 if (waitqueue_active(&fs_info->replace_wait))
928 wake_up(&fs_info->replace_wait); 930 wake_up(&fs_info->replace_wait);
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c
index fc8df866e919..1752625fb4dd 100644
--- a/fs/btrfs/dir-item.c
+++ b/fs/btrfs/dir-item.c
@@ -21,10 +21,6 @@
21#include "hash.h" 21#include "hash.h"
22#include "transaction.h" 22#include "transaction.h"
23 23
24static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
25 struct btrfs_path *path,
26 const char *name, int name_len);
27
28/* 24/*
29 * insert a name into a directory, doing overflow properly if there is a hash 25 * insert a name into a directory, doing overflow properly if there is a hash
30 * collision. data_size indicates how big the item inserted should be. On 26 * collision. data_size indicates how big the item inserted should be. On
@@ -383,9 +379,9 @@ struct btrfs_dir_item *btrfs_lookup_xattr(struct btrfs_trans_handle *trans,
383 * this walks through all the entries in a dir item and finds one 379 * this walks through all the entries in a dir item and finds one
384 * for a specific name. 380 * for a specific name.
385 */ 381 */
386static struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root, 382struct btrfs_dir_item *btrfs_match_dir_item_name(struct btrfs_root *root,
387 struct btrfs_path *path, 383 struct btrfs_path *path,
388 const char *name, int name_len) 384 const char *name, int name_len)
389{ 385{
390 struct btrfs_dir_item *dir_item; 386 struct btrfs_dir_item *dir_item;
391 unsigned long name_ptr; 387 unsigned long name_ptr;
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1bf9f897065d..30965120772b 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2384,6 +2384,8 @@ int open_ctree(struct super_block *sb,
2384 init_waitqueue_head(&fs_info->transaction_blocked_wait); 2384 init_waitqueue_head(&fs_info->transaction_blocked_wait);
2385 init_waitqueue_head(&fs_info->async_submit_wait); 2385 init_waitqueue_head(&fs_info->async_submit_wait);
2386 2386
2387 INIT_LIST_HEAD(&fs_info->pinned_chunks);
2388
2387 ret = btrfs_alloc_stripe_hash_table(fs_info); 2389 ret = btrfs_alloc_stripe_hash_table(fs_info);
2388 if (ret) { 2390 if (ret) {
2389 err = ret; 2391 err = ret;
@@ -2830,9 +2832,11 @@ retry_root_backup:
2830 btrfs_set_opt(fs_info->mount_opt, SSD); 2832 btrfs_set_opt(fs_info->mount_opt, SSD);
2831 } 2833 }
2832 2834
2833 /* Set the real inode map cache flag */ 2835 /*
2834 if (btrfs_test_opt(tree_root, CHANGE_INODE_CACHE)) 2836 * Mount does not set all options immediatelly, we can do it now and do
2835 btrfs_set_opt(tree_root->fs_info->mount_opt, INODE_MAP_CACHE); 2837 * not have to wait for transaction commit
2838 */
2839 btrfs_apply_pending_changes(fs_info);
2836 2840
2837#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY 2841#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
2838 if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) { 2842 if (btrfs_test_opt(tree_root, CHECK_INTEGRITY)) {
@@ -3713,6 +3717,17 @@ void close_ctree(struct btrfs_root *root)
3713 3717
3714 btrfs_free_block_rsv(root, root->orphan_block_rsv); 3718 btrfs_free_block_rsv(root, root->orphan_block_rsv);
3715 root->orphan_block_rsv = NULL; 3719 root->orphan_block_rsv = NULL;
3720
3721 lock_chunks(root);
3722 while (!list_empty(&fs_info->pinned_chunks)) {
3723 struct extent_map *em;
3724
3725 em = list_first_entry(&fs_info->pinned_chunks,
3726 struct extent_map, list);
3727 list_del_init(&em->list);
3728 free_extent_map(em);
3729 }
3730 unlock_chunks(root);
3716} 3731}
3717 3732
3718int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid, 3733int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid,
@@ -3839,12 +3854,12 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3839 */ 3854 */
3840 if (!IS_ALIGNED(btrfs_super_root(sb), 4096)) 3855 if (!IS_ALIGNED(btrfs_super_root(sb), 4096))
3841 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3856 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
3842 sb->root); 3857 btrfs_super_root(sb));
3843 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096)) 3858 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096))
3844 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3859 printk(KERN_WARNING "BTRFS: chunk_root block unaligned: %llu\n",
3845 sb->chunk_root); 3860 btrfs_super_chunk_root(sb));
3846 if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096)) 3861 if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
3847 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3862 printk(KERN_WARNING "BTRFS: log_root block unaligned: %llu\n",
3848 btrfs_super_log_root(sb)); 3863 btrfs_super_log_root(sb));
3849 3864
3850 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { 3865 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
@@ -4129,6 +4144,25 @@ again:
4129 return 0; 4144 return 0;
4130} 4145}
4131 4146
4147static void btrfs_free_pending_ordered(struct btrfs_transaction *cur_trans,
4148 struct btrfs_fs_info *fs_info)
4149{
4150 struct btrfs_ordered_extent *ordered;
4151
4152 spin_lock(&fs_info->trans_lock);
4153 while (!list_empty(&cur_trans->pending_ordered)) {
4154 ordered = list_first_entry(&cur_trans->pending_ordered,
4155 struct btrfs_ordered_extent,
4156 trans_list);
4157 list_del_init(&ordered->trans_list);
4158 spin_unlock(&fs_info->trans_lock);
4159
4160 btrfs_put_ordered_extent(ordered);
4161 spin_lock(&fs_info->trans_lock);
4162 }
4163 spin_unlock(&fs_info->trans_lock);
4164}
4165
4132void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans, 4166void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4133 struct btrfs_root *root) 4167 struct btrfs_root *root)
4134{ 4168{
@@ -4140,6 +4174,7 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
4140 cur_trans->state = TRANS_STATE_UNBLOCKED; 4174 cur_trans->state = TRANS_STATE_UNBLOCKED;
4141 wake_up(&root->fs_info->transaction_wait); 4175 wake_up(&root->fs_info->transaction_wait);
4142 4176
4177 btrfs_free_pending_ordered(cur_trans, root->fs_info);
4143 btrfs_destroy_delayed_inodes(root); 4178 btrfs_destroy_delayed_inodes(root);
4144 btrfs_assert_delayed_root_empty(root); 4179 btrfs_assert_delayed_root_empty(root);
4145 4180
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 47c1ba141082..222d6aea4a8a 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -315,12 +315,6 @@ get_caching_control(struct btrfs_block_group_cache *cache)
315 struct btrfs_caching_control *ctl; 315 struct btrfs_caching_control *ctl;
316 316
317 spin_lock(&cache->lock); 317 spin_lock(&cache->lock);
318 if (cache->cached != BTRFS_CACHE_STARTED) {
319 spin_unlock(&cache->lock);
320 return NULL;
321 }
322
323 /* We're loading it the fast way, so we don't have a caching_ctl. */
324 if (!cache->caching_ctl) { 318 if (!cache->caching_ctl) {
325 spin_unlock(&cache->lock); 319 spin_unlock(&cache->lock);
326 return NULL; 320 return NULL;
@@ -594,6 +588,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
594 spin_unlock(&cache->lock); 588 spin_unlock(&cache->lock);
595 589
596 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) { 590 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
591 mutex_lock(&caching_ctl->mutex);
597 ret = load_free_space_cache(fs_info, cache); 592 ret = load_free_space_cache(fs_info, cache);
598 593
599 spin_lock(&cache->lock); 594 spin_lock(&cache->lock);
@@ -601,15 +596,19 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
601 cache->caching_ctl = NULL; 596 cache->caching_ctl = NULL;
602 cache->cached = BTRFS_CACHE_FINISHED; 597 cache->cached = BTRFS_CACHE_FINISHED;
603 cache->last_byte_to_unpin = (u64)-1; 598 cache->last_byte_to_unpin = (u64)-1;
599 caching_ctl->progress = (u64)-1;
604 } else { 600 } else {
605 if (load_cache_only) { 601 if (load_cache_only) {
606 cache->caching_ctl = NULL; 602 cache->caching_ctl = NULL;
607 cache->cached = BTRFS_CACHE_NO; 603 cache->cached = BTRFS_CACHE_NO;
608 } else { 604 } else {
609 cache->cached = BTRFS_CACHE_STARTED; 605 cache->cached = BTRFS_CACHE_STARTED;
606 cache->has_caching_ctl = 1;
610 } 607 }
611 } 608 }
612 spin_unlock(&cache->lock); 609 spin_unlock(&cache->lock);
610 mutex_unlock(&caching_ctl->mutex);
611
613 wake_up(&caching_ctl->wait); 612 wake_up(&caching_ctl->wait);
614 if (ret == 1) { 613 if (ret == 1) {
615 put_caching_control(caching_ctl); 614 put_caching_control(caching_ctl);
@@ -627,6 +626,7 @@ static int cache_block_group(struct btrfs_block_group_cache *cache,
627 cache->cached = BTRFS_CACHE_NO; 626 cache->cached = BTRFS_CACHE_NO;
628 } else { 627 } else {
629 cache->cached = BTRFS_CACHE_STARTED; 628 cache->cached = BTRFS_CACHE_STARTED;
629 cache->has_caching_ctl = 1;
630 } 630 }
631 spin_unlock(&cache->lock); 631 spin_unlock(&cache->lock);
632 wake_up(&caching_ctl->wait); 632 wake_up(&caching_ctl->wait);
@@ -3162,7 +3162,19 @@ next_block_group(struct btrfs_root *root,
3162 struct btrfs_block_group_cache *cache) 3162 struct btrfs_block_group_cache *cache)
3163{ 3163{
3164 struct rb_node *node; 3164 struct rb_node *node;
3165
3165 spin_lock(&root->fs_info->block_group_cache_lock); 3166 spin_lock(&root->fs_info->block_group_cache_lock);
3167
3168 /* If our block group was removed, we need a full search. */
3169 if (RB_EMPTY_NODE(&cache->cache_node)) {
3170 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3171
3172 spin_unlock(&root->fs_info->block_group_cache_lock);
3173 btrfs_put_block_group(cache);
3174 cache = btrfs_lookup_first_block_group(root->fs_info,
3175 next_bytenr);
3176 return cache;
3177 }
3166 node = rb_next(&cache->cache_node); 3178 node = rb_next(&cache->cache_node);
3167 btrfs_put_block_group(cache); 3179 btrfs_put_block_group(cache);
3168 if (node) { 3180 if (node) {
@@ -3504,6 +3516,7 @@ static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3504 found->chunk_alloc = 0; 3516 found->chunk_alloc = 0;
3505 found->flush = 0; 3517 found->flush = 0;
3506 init_waitqueue_head(&found->wait); 3518 init_waitqueue_head(&found->wait);
3519 INIT_LIST_HEAD(&found->ro_bgs);
3507 3520
3508 ret = kobject_init_and_add(&found->kobj, &space_info_ktype, 3521 ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3509 info->space_info_kobj, "%s", 3522 info->space_info_kobj, "%s",
@@ -5425,7 +5438,17 @@ static int update_block_group(struct btrfs_root *root,
5425 spin_unlock(&cache->space_info->lock); 5438 spin_unlock(&cache->space_info->lock);
5426 } else { 5439 } else {
5427 old_val -= num_bytes; 5440 old_val -= num_bytes;
5441 btrfs_set_block_group_used(&cache->item, old_val);
5442 cache->pinned += num_bytes;
5443 cache->space_info->bytes_pinned += num_bytes;
5444 cache->space_info->bytes_used -= num_bytes;
5445 cache->space_info->disk_used -= num_bytes * factor;
5446 spin_unlock(&cache->lock);
5447 spin_unlock(&cache->space_info->lock);
5428 5448
5449 set_extent_dirty(info->pinned_extents,
5450 bytenr, bytenr + num_bytes - 1,
5451 GFP_NOFS | __GFP_NOFAIL);
5429 /* 5452 /*
5430 * No longer have used bytes in this block group, queue 5453 * No longer have used bytes in this block group, queue
5431 * it for deletion. 5454 * it for deletion.
@@ -5439,17 +5462,6 @@ static int update_block_group(struct btrfs_root *root,
5439 } 5462 }
5440 spin_unlock(&info->unused_bgs_lock); 5463 spin_unlock(&info->unused_bgs_lock);
5441 } 5464 }
5442 btrfs_set_block_group_used(&cache->item, old_val);
5443 cache->pinned += num_bytes;
5444 cache->space_info->bytes_pinned += num_bytes;
5445 cache->space_info->bytes_used -= num_bytes;
5446 cache->space_info->disk_used -= num_bytes * factor;
5447 spin_unlock(&cache->lock);
5448 spin_unlock(&cache->space_info->lock);
5449
5450 set_extent_dirty(info->pinned_extents,
5451 bytenr, bytenr + num_bytes - 1,
5452 GFP_NOFS | __GFP_NOFAIL);
5453 } 5465 }
5454 btrfs_put_block_group(cache); 5466 btrfs_put_block_group(cache);
5455 total -= num_bytes; 5467 total -= num_bytes;
@@ -8511,6 +8523,7 @@ static int set_block_group_ro(struct btrfs_block_group_cache *cache, int force)
8511 min_allocable_bytes <= sinfo->total_bytes) { 8523 min_allocable_bytes <= sinfo->total_bytes) {
8512 sinfo->bytes_readonly += num_bytes; 8524 sinfo->bytes_readonly += num_bytes;
8513 cache->ro = 1; 8525 cache->ro = 1;
8526 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
8514 ret = 0; 8527 ret = 0;
8515 } 8528 }
8516out: 8529out:
@@ -8565,15 +8578,20 @@ int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
8565 8578
8566/* 8579/*
8567 * helper to account the unused space of all the readonly block group in the 8580 * helper to account the unused space of all the readonly block group in the
8568 * list. takes mirrors into account. 8581 * space_info. takes mirrors into account.
8569 */ 8582 */
8570static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list) 8583u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8571{ 8584{
8572 struct btrfs_block_group_cache *block_group; 8585 struct btrfs_block_group_cache *block_group;
8573 u64 free_bytes = 0; 8586 u64 free_bytes = 0;
8574 int factor; 8587 int factor;
8575 8588
8576 list_for_each_entry(block_group, groups_list, list) { 8589 /* It's df, we don't care if it's racey */
8590 if (list_empty(&sinfo->ro_bgs))
8591 return 0;
8592
8593 spin_lock(&sinfo->lock);
8594 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
8577 spin_lock(&block_group->lock); 8595 spin_lock(&block_group->lock);
8578 8596
8579 if (!block_group->ro) { 8597 if (!block_group->ro) {
@@ -8594,26 +8612,6 @@ static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8594 8612
8595 spin_unlock(&block_group->lock); 8613 spin_unlock(&block_group->lock);
8596 } 8614 }
8597
8598 return free_bytes;
8599}
8600
8601/*
8602 * helper to account the unused space of all the readonly block group in the
8603 * space_info. takes mirrors into account.
8604 */
8605u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8606{
8607 int i;
8608 u64 free_bytes = 0;
8609
8610 spin_lock(&sinfo->lock);
8611
8612 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8613 if (!list_empty(&sinfo->block_groups[i]))
8614 free_bytes += __btrfs_get_ro_block_group_free_space(
8615 &sinfo->block_groups[i]);
8616
8617 spin_unlock(&sinfo->lock); 8615 spin_unlock(&sinfo->lock);
8618 8616
8619 return free_bytes; 8617 return free_bytes;
@@ -8633,6 +8631,7 @@ void btrfs_set_block_group_rw(struct btrfs_root *root,
8633 cache->bytes_super - btrfs_block_group_used(&cache->item); 8631 cache->bytes_super - btrfs_block_group_used(&cache->item);
8634 sinfo->bytes_readonly -= num_bytes; 8632 sinfo->bytes_readonly -= num_bytes;
8635 cache->ro = 0; 8633 cache->ro = 0;
8634 list_del_init(&cache->ro_list);
8636 spin_unlock(&cache->lock); 8635 spin_unlock(&cache->lock);
8637 spin_unlock(&sinfo->lock); 8636 spin_unlock(&sinfo->lock);
8638} 8637}
@@ -9002,7 +9001,9 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9002 INIT_LIST_HEAD(&cache->list); 9001 INIT_LIST_HEAD(&cache->list);
9003 INIT_LIST_HEAD(&cache->cluster_list); 9002 INIT_LIST_HEAD(&cache->cluster_list);
9004 INIT_LIST_HEAD(&cache->bg_list); 9003 INIT_LIST_HEAD(&cache->bg_list);
9004 INIT_LIST_HEAD(&cache->ro_list);
9005 btrfs_init_free_space_ctl(cache); 9005 btrfs_init_free_space_ctl(cache);
9006 atomic_set(&cache->trimming, 0);
9006 9007
9007 return cache; 9008 return cache;
9008} 9009}
@@ -9195,9 +9196,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9195 int ret = 0; 9196 int ret = 0;
9196 9197
9197 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) { 9198 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9198 list_del_init(&block_group->bg_list);
9199 if (ret) 9199 if (ret)
9200 continue; 9200 goto next;
9201 9201
9202 spin_lock(&block_group->lock); 9202 spin_lock(&block_group->lock);
9203 memcpy(&item, &block_group->item, sizeof(item)); 9203 memcpy(&item, &block_group->item, sizeof(item));
@@ -9212,6 +9212,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9212 key.objectid, key.offset); 9212 key.objectid, key.offset);
9213 if (ret) 9213 if (ret)
9214 btrfs_abort_transaction(trans, extent_root, ret); 9214 btrfs_abort_transaction(trans, extent_root, ret);
9215next:
9216 list_del_init(&block_group->bg_list);
9215 } 9217 }
9216} 9218}
9217 9219
@@ -9304,7 +9306,8 @@ static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
9304} 9306}
9305 9307
9306int btrfs_remove_block_group(struct btrfs_trans_handle *trans, 9308int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9307 struct btrfs_root *root, u64 group_start) 9309 struct btrfs_root *root, u64 group_start,
9310 struct extent_map *em)
9308{ 9311{
9309 struct btrfs_path *path; 9312 struct btrfs_path *path;
9310 struct btrfs_block_group_cache *block_group; 9313 struct btrfs_block_group_cache *block_group;
@@ -9316,6 +9319,8 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9316 int ret; 9319 int ret;
9317 int index; 9320 int index;
9318 int factor; 9321 int factor;
9322 struct btrfs_caching_control *caching_ctl = NULL;
9323 bool remove_em;
9319 9324
9320 root = root->fs_info->extent_root; 9325 root = root->fs_info->extent_root;
9321 9326
@@ -9400,6 +9405,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9400 spin_lock(&root->fs_info->block_group_cache_lock); 9405 spin_lock(&root->fs_info->block_group_cache_lock);
9401 rb_erase(&block_group->cache_node, 9406 rb_erase(&block_group->cache_node,
9402 &root->fs_info->block_group_cache_tree); 9407 &root->fs_info->block_group_cache_tree);
9408 RB_CLEAR_NODE(&block_group->cache_node);
9403 9409
9404 if (root->fs_info->first_logical_byte == block_group->key.objectid) 9410 if (root->fs_info->first_logical_byte == block_group->key.objectid)
9405 root->fs_info->first_logical_byte = (u64)-1; 9411 root->fs_info->first_logical_byte = (u64)-1;
@@ -9411,6 +9417,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9411 * are still on the list after taking the semaphore 9417 * are still on the list after taking the semaphore
9412 */ 9418 */
9413 list_del_init(&block_group->list); 9419 list_del_init(&block_group->list);
9420 list_del_init(&block_group->ro_list);
9414 if (list_empty(&block_group->space_info->block_groups[index])) { 9421 if (list_empty(&block_group->space_info->block_groups[index])) {
9415 kobj = block_group->space_info->block_group_kobjs[index]; 9422 kobj = block_group->space_info->block_group_kobjs[index];
9416 block_group->space_info->block_group_kobjs[index] = NULL; 9423 block_group->space_info->block_group_kobjs[index] = NULL;
@@ -9422,8 +9429,32 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9422 kobject_put(kobj); 9429 kobject_put(kobj);
9423 } 9430 }
9424 9431
9432 if (block_group->has_caching_ctl)
9433 caching_ctl = get_caching_control(block_group);
9425 if (block_group->cached == BTRFS_CACHE_STARTED) 9434 if (block_group->cached == BTRFS_CACHE_STARTED)
9426 wait_block_group_cache_done(block_group); 9435 wait_block_group_cache_done(block_group);
9436 if (block_group->has_caching_ctl) {
9437 down_write(&root->fs_info->commit_root_sem);
9438 if (!caching_ctl) {
9439 struct btrfs_caching_control *ctl;
9440
9441 list_for_each_entry(ctl,
9442 &root->fs_info->caching_block_groups, list)
9443 if (ctl->block_group == block_group) {
9444 caching_ctl = ctl;
9445 atomic_inc(&caching_ctl->count);
9446 break;
9447 }
9448 }
9449 if (caching_ctl)
9450 list_del_init(&caching_ctl->list);
9451 up_write(&root->fs_info->commit_root_sem);
9452 if (caching_ctl) {
9453 /* Once for the caching bgs list and once for us. */
9454 put_caching_control(caching_ctl);
9455 put_caching_control(caching_ctl);
9456 }
9457 }
9427 9458
9428 btrfs_remove_free_space_cache(block_group); 9459 btrfs_remove_free_space_cache(block_group);
9429 9460
@@ -9435,6 +9466,71 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9435 9466
9436 memcpy(&key, &block_group->key, sizeof(key)); 9467 memcpy(&key, &block_group->key, sizeof(key));
9437 9468
9469 lock_chunks(root);
9470 if (!list_empty(&em->list)) {
9471 /* We're in the transaction->pending_chunks list. */
9472 free_extent_map(em);
9473 }
9474 spin_lock(&block_group->lock);
9475 block_group->removed = 1;
9476 /*
9477 * At this point trimming can't start on this block group, because we
9478 * removed the block group from the tree fs_info->block_group_cache_tree
9479 * so no one can't find it anymore and even if someone already got this
9480 * block group before we removed it from the rbtree, they have already
9481 * incremented block_group->trimming - if they didn't, they won't find
9482 * any free space entries because we already removed them all when we
9483 * called btrfs_remove_free_space_cache().
9484 *
9485 * And we must not remove the extent map from the fs_info->mapping_tree
9486 * to prevent the same logical address range and physical device space
9487 * ranges from being reused for a new block group. This is because our
9488 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
9489 * completely transactionless, so while it is trimming a range the
9490 * currently running transaction might finish and a new one start,
9491 * allowing for new block groups to be created that can reuse the same
9492 * physical device locations unless we take this special care.
9493 */
9494 remove_em = (atomic_read(&block_group->trimming) == 0);
9495 /*
9496 * Make sure a trimmer task always sees the em in the pinned_chunks list
9497 * if it sees block_group->removed == 1 (needs to lock block_group->lock
9498 * before checking block_group->removed).
9499 */
9500 if (!remove_em) {
9501 /*
9502 * Our em might be in trans->transaction->pending_chunks which
9503 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
9504 * and so is the fs_info->pinned_chunks list.
9505 *
9506 * So at this point we must be holding the chunk_mutex to avoid
9507 * any races with chunk allocation (more specifically at
9508 * volumes.c:contains_pending_extent()), to ensure it always
9509 * sees the em, either in the pending_chunks list or in the
9510 * pinned_chunks list.
9511 */
9512 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
9513 }
9514 spin_unlock(&block_group->lock);
9515
9516 if (remove_em) {
9517 struct extent_map_tree *em_tree;
9518
9519 em_tree = &root->fs_info->mapping_tree.map_tree;
9520 write_lock(&em_tree->lock);
9521 /*
9522 * The em might be in the pending_chunks list, so make sure the
9523 * chunk mutex is locked, since remove_extent_mapping() will
9524 * delete us from that list.
9525 */
9526 remove_extent_mapping(em_tree, em);
9527 write_unlock(&em_tree->lock);
9528 /* once for the tree */
9529 free_extent_map(em);
9530 }
9531
9532 unlock_chunks(root);
9533
9438 btrfs_put_block_group(block_group); 9534 btrfs_put_block_group(block_group);
9439 btrfs_put_block_group(block_group); 9535 btrfs_put_block_group(block_group);
9440 9536
@@ -9523,10 +9619,18 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9523 */ 9619 */
9524 start = block_group->key.objectid; 9620 start = block_group->key.objectid;
9525 end = start + block_group->key.offset - 1; 9621 end = start + block_group->key.offset - 1;
9526 clear_extent_bits(&fs_info->freed_extents[0], start, end, 9622 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
9527 EXTENT_DIRTY, GFP_NOFS); 9623 EXTENT_DIRTY, GFP_NOFS);
9528 clear_extent_bits(&fs_info->freed_extents[1], start, end, 9624 if (ret) {
9625 btrfs_set_block_group_rw(root, block_group);
9626 goto end_trans;
9627 }
9628 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
9529 EXTENT_DIRTY, GFP_NOFS); 9629 EXTENT_DIRTY, GFP_NOFS);
9630 if (ret) {
9631 btrfs_set_block_group_rw(root, block_group);
9632 goto end_trans;
9633 }
9530 9634
9531 /* Reset pinned so btrfs_put_block_group doesn't complain */ 9635 /* Reset pinned so btrfs_put_block_group doesn't complain */
9532 block_group->pinned = 0; 9636 block_group->pinned = 0;
@@ -9537,6 +9641,7 @@ void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
9537 */ 9641 */
9538 ret = btrfs_remove_chunk(trans, root, 9642 ret = btrfs_remove_chunk(trans, root,
9539 block_group->key.objectid); 9643 block_group->key.objectid);
9644end_trans:
9540 btrfs_end_transaction(trans, root); 9645 btrfs_end_transaction(trans, root);
9541next: 9646next:
9542 btrfs_put_block_group(block_group); 9647 btrfs_put_block_group(block_group);
@@ -9657,12 +9762,14 @@ int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
9657} 9762}
9658 9763
9659/* 9764/*
9660 * btrfs_{start,end}_write() is similar to mnt_{want, drop}_write(), 9765 * btrfs_{start,end}_write_no_snapshoting() are similar to
9661 * they are used to prevent the some tasks writing data into the page cache 9766 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
9662 * by nocow before the subvolume is snapshoted, but flush the data into 9767 * data into the page cache through nocow before the subvolume is snapshoted,
9663 * the disk after the snapshot creation. 9768 * but flush the data into disk after the snapshot creation, or to prevent
9769 * operations while snapshoting is ongoing and that cause the snapshot to be
9770 * inconsistent (writes followed by expanding truncates for example).
9664 */ 9771 */
9665void btrfs_end_nocow_write(struct btrfs_root *root) 9772void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
9666{ 9773{
9667 percpu_counter_dec(&root->subv_writers->counter); 9774 percpu_counter_dec(&root->subv_writers->counter);
9668 /* 9775 /*
@@ -9674,7 +9781,7 @@ void btrfs_end_nocow_write(struct btrfs_root *root)
9674 wake_up(&root->subv_writers->wait); 9781 wake_up(&root->subv_writers->wait);
9675} 9782}
9676 9783
9677int btrfs_start_nocow_write(struct btrfs_root *root) 9784int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
9678{ 9785{
9679 if (atomic_read(&root->will_be_snapshoted)) 9786 if (atomic_read(&root->will_be_snapshoted))
9680 return 0; 9787 return 0;
@@ -9685,7 +9792,7 @@ int btrfs_start_nocow_write(struct btrfs_root *root)
9685 */ 9792 */
9686 smp_mb(); 9793 smp_mb();
9687 if (atomic_read(&root->will_be_snapshoted)) { 9794 if (atomic_read(&root->will_be_snapshoted)) {
9688 btrfs_end_nocow_write(root); 9795 btrfs_end_write_no_snapshoting(root);
9689 return 0; 9796 return 0;
9690 } 9797 }
9691 return 1; 9798 return 1;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index bf3f424e0013..4ebabd237153 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -595,9 +595,14 @@ int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
595 clear = 1; 595 clear = 1;
596again: 596again:
597 if (!prealloc && (mask & __GFP_WAIT)) { 597 if (!prealloc && (mask & __GFP_WAIT)) {
598 /*
599 * Don't care for allocation failure here because we might end
600 * up not needing the pre-allocated extent state at all, which
601 * is the case if we only have in the tree extent states that
602 * cover our input range and don't cover too any other range.
603 * If we end up needing a new extent state we allocate it later.
604 */
598 prealloc = alloc_extent_state(mask); 605 prealloc = alloc_extent_state(mask);
599 if (!prealloc)
600 return -ENOMEM;
601 } 606 }
602 607
603 spin_lock(&tree->lock); 608 spin_lock(&tree->lock);
@@ -796,17 +801,25 @@ static void set_state_bits(struct extent_io_tree *tree,
796 state->state |= bits_to_set; 801 state->state |= bits_to_set;
797} 802}
798 803
799static void cache_state(struct extent_state *state, 804static void cache_state_if_flags(struct extent_state *state,
800 struct extent_state **cached_ptr) 805 struct extent_state **cached_ptr,
806 const u64 flags)
801{ 807{
802 if (cached_ptr && !(*cached_ptr)) { 808 if (cached_ptr && !(*cached_ptr)) {
803 if (state->state & (EXTENT_IOBITS | EXTENT_BOUNDARY)) { 809 if (!flags || (state->state & flags)) {
804 *cached_ptr = state; 810 *cached_ptr = state;
805 atomic_inc(&state->refs); 811 atomic_inc(&state->refs);
806 } 812 }
807 } 813 }
808} 814}
809 815
816static void cache_state(struct extent_state *state,
817 struct extent_state **cached_ptr)
818{
819 return cache_state_if_flags(state, cached_ptr,
820 EXTENT_IOBITS | EXTENT_BOUNDARY);
821}
822
810/* 823/*
811 * set some bits on a range in the tree. This may require allocations or 824 * set some bits on a range in the tree. This may require allocations or
812 * sleeping, so the gfp mask is used to indicate what is allowed. 825 * sleeping, so the gfp mask is used to indicate what is allowed.
@@ -1058,13 +1071,21 @@ int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
1058 int err = 0; 1071 int err = 0;
1059 u64 last_start; 1072 u64 last_start;
1060 u64 last_end; 1073 u64 last_end;
1074 bool first_iteration = true;
1061 1075
1062 btrfs_debug_check_extent_io_range(tree, start, end); 1076 btrfs_debug_check_extent_io_range(tree, start, end);
1063 1077
1064again: 1078again:
1065 if (!prealloc && (mask & __GFP_WAIT)) { 1079 if (!prealloc && (mask & __GFP_WAIT)) {
1080 /*
1081 * Best effort, don't worry if extent state allocation fails
1082 * here for the first iteration. We might have a cached state
1083 * that matches exactly the target range, in which case no
1084 * extent state allocations are needed. We'll only know this
1085 * after locking the tree.
1086 */
1066 prealloc = alloc_extent_state(mask); 1087 prealloc = alloc_extent_state(mask);
1067 if (!prealloc) 1088 if (!prealloc && !first_iteration)
1068 return -ENOMEM; 1089 return -ENOMEM;
1069 } 1090 }
1070 1091
@@ -1234,6 +1255,7 @@ search_again:
1234 spin_unlock(&tree->lock); 1255 spin_unlock(&tree->lock);
1235 if (mask & __GFP_WAIT) 1256 if (mask & __GFP_WAIT)
1236 cond_resched(); 1257 cond_resched();
1258 first_iteration = false;
1237 goto again; 1259 goto again;
1238} 1260}
1239 1261
@@ -1482,7 +1504,7 @@ int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1482 state = find_first_extent_bit_state(tree, start, bits); 1504 state = find_first_extent_bit_state(tree, start, bits);
1483got_it: 1505got_it:
1484 if (state) { 1506 if (state) {
1485 cache_state(state, cached_state); 1507 cache_state_if_flags(state, cached_state, 0);
1486 *start_ret = state->start; 1508 *start_ret = state->start;
1487 *end_ret = state->end; 1509 *end_ret = state->end;
1488 ret = 0; 1510 ret = 0;
@@ -1746,6 +1768,9 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1746 if (page_ops == 0) 1768 if (page_ops == 0)
1747 return 0; 1769 return 0;
1748 1770
1771 if ((page_ops & PAGE_SET_ERROR) && nr_pages > 0)
1772 mapping_set_error(inode->i_mapping, -EIO);
1773
1749 while (nr_pages > 0) { 1774 while (nr_pages > 0) {
1750 ret = find_get_pages_contig(inode->i_mapping, index, 1775 ret = find_get_pages_contig(inode->i_mapping, index,
1751 min_t(unsigned long, 1776 min_t(unsigned long,
@@ -1763,6 +1788,8 @@ int extent_clear_unlock_delalloc(struct inode *inode, u64 start, u64 end,
1763 clear_page_dirty_for_io(pages[i]); 1788 clear_page_dirty_for_io(pages[i]);
1764 if (page_ops & PAGE_SET_WRITEBACK) 1789 if (page_ops & PAGE_SET_WRITEBACK)
1765 set_page_writeback(pages[i]); 1790 set_page_writeback(pages[i]);
1791 if (page_ops & PAGE_SET_ERROR)
1792 SetPageError(pages[i]);
1766 if (page_ops & PAGE_END_WRITEBACK) 1793 if (page_ops & PAGE_END_WRITEBACK)
1767 end_page_writeback(pages[i]); 1794 end_page_writeback(pages[i]);
1768 if (page_ops & PAGE_UNLOCK) 1795 if (page_ops & PAGE_UNLOCK)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 6d4b938be986..ece9ce87edff 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -49,6 +49,7 @@
49#define PAGE_SET_WRITEBACK (1 << 2) 49#define PAGE_SET_WRITEBACK (1 << 2)
50#define PAGE_END_WRITEBACK (1 << 3) 50#define PAGE_END_WRITEBACK (1 << 3)
51#define PAGE_SET_PRIVATE2 (1 << 4) 51#define PAGE_SET_PRIVATE2 (1 << 4)
52#define PAGE_SET_ERROR (1 << 5)
52 53
53/* 54/*
54 * page->private values. Every page that is controlled by the extent 55 * page->private values. Every page that is controlled by the extent
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 225302b39afb..6a98bddd8f33 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -287,8 +287,6 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
287 if (!em) 287 if (!em)
288 goto out; 288 goto out;
289 289
290 if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
291 list_move(&em->list, &tree->modified_extents);
292 em->generation = gen; 290 em->generation = gen;
293 clear_bit(EXTENT_FLAG_PINNED, &em->flags); 291 clear_bit(EXTENT_FLAG_PINNED, &em->flags);
294 em->mod_start = em->start; 292 em->mod_start = em->start;
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index a18ceabd99a8..e4090259569b 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -1428,7 +1428,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1428 u64 num_bytes; 1428 u64 num_bytes;
1429 int ret; 1429 int ret;
1430 1430
1431 ret = btrfs_start_nocow_write(root); 1431 ret = btrfs_start_write_no_snapshoting(root);
1432 if (!ret) 1432 if (!ret)
1433 return -ENOSPC; 1433 return -ENOSPC;
1434 1434
@@ -1451,7 +1451,7 @@ static noinline int check_can_nocow(struct inode *inode, loff_t pos,
1451 ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL); 1451 ret = can_nocow_extent(inode, lockstart, &num_bytes, NULL, NULL, NULL);
1452 if (ret <= 0) { 1452 if (ret <= 0) {
1453 ret = 0; 1453 ret = 0;
1454 btrfs_end_nocow_write(root); 1454 btrfs_end_write_no_snapshoting(root);
1455 } else { 1455 } else {
1456 *write_bytes = min_t(size_t, *write_bytes , 1456 *write_bytes = min_t(size_t, *write_bytes ,
1457 num_bytes - pos + lockstart); 1457 num_bytes - pos + lockstart);
@@ -1543,7 +1543,7 @@ static noinline ssize_t __btrfs_buffered_write(struct file *file,
1543 btrfs_free_reserved_data_space(inode, 1543 btrfs_free_reserved_data_space(inode,
1544 reserve_bytes); 1544 reserve_bytes);
1545 else 1545 else
1546 btrfs_end_nocow_write(root); 1546 btrfs_end_write_no_snapshoting(root);
1547 break; 1547 break;
1548 } 1548 }
1549 1549
@@ -1632,7 +1632,7 @@ again:
1632 1632
1633 release_bytes = 0; 1633 release_bytes = 0;
1634 if (only_release_metadata) 1634 if (only_release_metadata)
1635 btrfs_end_nocow_write(root); 1635 btrfs_end_write_no_snapshoting(root);
1636 1636
1637 if (only_release_metadata && copied > 0) { 1637 if (only_release_metadata && copied > 0) {
1638 u64 lockstart = round_down(pos, root->sectorsize); 1638 u64 lockstart = round_down(pos, root->sectorsize);
@@ -1661,7 +1661,7 @@ again:
1661 1661
1662 if (release_bytes) { 1662 if (release_bytes) {
1663 if (only_release_metadata) { 1663 if (only_release_metadata) {
1664 btrfs_end_nocow_write(root); 1664 btrfs_end_write_no_snapshoting(root);
1665 btrfs_delalloc_release_metadata(inode, release_bytes); 1665 btrfs_delalloc_release_metadata(inode, release_bytes);
1666 } else { 1666 } else {
1667 btrfs_delalloc_release_space(inode, release_bytes); 1667 btrfs_delalloc_release_space(inode, release_bytes);
@@ -1676,6 +1676,7 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1676 loff_t pos) 1676 loff_t pos)
1677{ 1677{
1678 struct file *file = iocb->ki_filp; 1678 struct file *file = iocb->ki_filp;
1679 struct inode *inode = file_inode(file);
1679 ssize_t written; 1680 ssize_t written;
1680 ssize_t written_buffered; 1681 ssize_t written_buffered;
1681 loff_t endbyte; 1682 loff_t endbyte;
@@ -1692,8 +1693,15 @@ static ssize_t __btrfs_direct_write(struct kiocb *iocb,
1692 err = written_buffered; 1693 err = written_buffered;
1693 goto out; 1694 goto out;
1694 } 1695 }
1696 /*
1697 * Ensure all data is persisted. We want the next direct IO read to be
1698 * able to read what was just written.
1699 */
1695 endbyte = pos + written_buffered - 1; 1700 endbyte = pos + written_buffered - 1;
1696 err = filemap_write_and_wait_range(file->f_mapping, pos, endbyte); 1701 err = btrfs_fdatawrite_range(inode, pos, endbyte);
1702 if (err)
1703 goto out;
1704 err = filemap_fdatawait_range(inode->i_mapping, pos, endbyte);
1697 if (err) 1705 if (err)
1698 goto out; 1706 goto out;
1699 written += written_buffered; 1707 written += written_buffered;
@@ -1854,10 +1862,7 @@ static int start_ordered_ops(struct inode *inode, loff_t start, loff_t end)
1854 int ret; 1862 int ret;
1855 1863
1856 atomic_inc(&BTRFS_I(inode)->sync_writers); 1864 atomic_inc(&BTRFS_I(inode)->sync_writers);
1857 ret = filemap_fdatawrite_range(inode->i_mapping, start, end); 1865 ret = btrfs_fdatawrite_range(inode, start, end);
1858 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1859 &BTRFS_I(inode)->runtime_flags))
1860 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
1861 atomic_dec(&BTRFS_I(inode)->sync_writers); 1866 atomic_dec(&BTRFS_I(inode)->sync_writers);
1862 1867
1863 return ret; 1868 return ret;
@@ -2810,3 +2815,29 @@ int btrfs_auto_defrag_init(void)
2810 2815
2811 return 0; 2816 return 0;
2812} 2817}
2818
2819int btrfs_fdatawrite_range(struct inode *inode, loff_t start, loff_t end)
2820{
2821 int ret;
2822
2823 /*
2824 * So with compression we will find and lock a dirty page and clear the
2825 * first one as dirty, setup an async extent, and immediately return
2826 * with the entire range locked but with nobody actually marked with
2827 * writeback. So we can't just filemap_write_and_wait_range() and
2828 * expect it to work since it will just kick off a thread to do the
2829 * actual work. So we need to call filemap_fdatawrite_range _again_
2830 * since it will wait on the page lock, which won't be unlocked until
2831 * after the pages have been marked as writeback and so we're good to go
2832 * from there. We have to do this otherwise we'll miss the ordered
2833 * extents and that results in badness. Please Josef, do not think you
2834 * know better and pull this out at some point in the future, it is
2835 * right and you are wrong.
2836 */
2837 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
2838 if (!ret && test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
2839 &BTRFS_I(inode)->runtime_flags))
2840 ret = filemap_fdatawrite_range(inode->i_mapping, start, end);
2841
2842 return ret;
2843}
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 33848196550e..030847bf7cec 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -27,10 +27,17 @@
27#include "disk-io.h" 27#include "disk-io.h"
28#include "extent_io.h" 28#include "extent_io.h"
29#include "inode-map.h" 29#include "inode-map.h"
30#include "volumes.h"
30 31
31#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) 32#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32#define MAX_CACHE_BYTES_PER_GIG (32 * 1024) 33#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
33 34
35struct btrfs_trim_range {
36 u64 start;
37 u64 bytes;
38 struct list_head list;
39};
40
34static int link_free_space(struct btrfs_free_space_ctl *ctl, 41static int link_free_space(struct btrfs_free_space_ctl *ctl,
35 struct btrfs_free_space *info); 42 struct btrfs_free_space *info);
36static void unlink_free_space(struct btrfs_free_space_ctl *ctl, 43static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
@@ -881,6 +888,7 @@ int write_cache_extent_entries(struct io_ctl *io_ctl,
881 int ret; 888 int ret;
882 struct btrfs_free_cluster *cluster = NULL; 889 struct btrfs_free_cluster *cluster = NULL;
883 struct rb_node *node = rb_first(&ctl->free_space_offset); 890 struct rb_node *node = rb_first(&ctl->free_space_offset);
891 struct btrfs_trim_range *trim_entry;
884 892
885 /* Get the cluster for this block_group if it exists */ 893 /* Get the cluster for this block_group if it exists */
886 if (block_group && !list_empty(&block_group->cluster_list)) { 894 if (block_group && !list_empty(&block_group->cluster_list)) {
@@ -916,6 +924,21 @@ int write_cache_extent_entries(struct io_ctl *io_ctl,
916 cluster = NULL; 924 cluster = NULL;
917 } 925 }
918 } 926 }
927
928 /*
929 * Make sure we don't miss any range that was removed from our rbtree
930 * because trimming is running. Otherwise after a umount+mount (or crash
931 * after committing the transaction) we would leak free space and get
932 * an inconsistent free space cache report from fsck.
933 */
934 list_for_each_entry(trim_entry, &ctl->trimming_ranges, list) {
935 ret = io_ctl_add_entry(io_ctl, trim_entry->start,
936 trim_entry->bytes, NULL);
937 if (ret)
938 goto fail;
939 *entries += 1;
940 }
941
919 return 0; 942 return 0;
920fail: 943fail:
921 return -ENOSPC; 944 return -ENOSPC;
@@ -1135,12 +1158,15 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1135 1158
1136 io_ctl_set_generation(&io_ctl, trans->transid); 1159 io_ctl_set_generation(&io_ctl, trans->transid);
1137 1160
1161 mutex_lock(&ctl->cache_writeout_mutex);
1138 /* Write out the extent entries in the free space cache */ 1162 /* Write out the extent entries in the free space cache */
1139 ret = write_cache_extent_entries(&io_ctl, ctl, 1163 ret = write_cache_extent_entries(&io_ctl, ctl,
1140 block_group, &entries, &bitmaps, 1164 block_group, &entries, &bitmaps,
1141 &bitmap_list); 1165 &bitmap_list);
1142 if (ret) 1166 if (ret) {
1167 mutex_unlock(&ctl->cache_writeout_mutex);
1143 goto out_nospc; 1168 goto out_nospc;
1169 }
1144 1170
1145 /* 1171 /*
1146 * Some spaces that are freed in the current transaction are pinned, 1172 * Some spaces that are freed in the current transaction are pinned,
@@ -1148,11 +1174,18 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
1148 * committed, we shouldn't lose them. 1174 * committed, we shouldn't lose them.
1149 */ 1175 */
1150 ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries); 1176 ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
1151 if (ret) 1177 if (ret) {
1178 mutex_unlock(&ctl->cache_writeout_mutex);
1152 goto out_nospc; 1179 goto out_nospc;
1180 }
1153 1181
1154 /* At last, we write out all the bitmaps. */ 1182 /*
1183 * At last, we write out all the bitmaps and keep cache_writeout_mutex
1184 * locked while doing it because a concurrent trim can be manipulating
1185 * or freeing the bitmap.
1186 */
1155 ret = write_bitmap_entries(&io_ctl, &bitmap_list); 1187 ret = write_bitmap_entries(&io_ctl, &bitmap_list);
1188 mutex_unlock(&ctl->cache_writeout_mutex);
1156 if (ret) 1189 if (ret)
1157 goto out_nospc; 1190 goto out_nospc;
1158 1191
@@ -2295,6 +2328,8 @@ void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group)
2295 ctl->start = block_group->key.objectid; 2328 ctl->start = block_group->key.objectid;
2296 ctl->private = block_group; 2329 ctl->private = block_group;
2297 ctl->op = &free_space_op; 2330 ctl->op = &free_space_op;
2331 INIT_LIST_HEAD(&ctl->trimming_ranges);
2332 mutex_init(&ctl->cache_writeout_mutex);
2298 2333
2299 /* 2334 /*
2300 * we only want to have 32k of ram per block group for keeping 2335 * we only want to have 32k of ram per block group for keeping
@@ -2911,10 +2946,12 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster)
2911 2946
2912static int do_trimming(struct btrfs_block_group_cache *block_group, 2947static int do_trimming(struct btrfs_block_group_cache *block_group,
2913 u64 *total_trimmed, u64 start, u64 bytes, 2948 u64 *total_trimmed, u64 start, u64 bytes,
2914 u64 reserved_start, u64 reserved_bytes) 2949 u64 reserved_start, u64 reserved_bytes,
2950 struct btrfs_trim_range *trim_entry)
2915{ 2951{
2916 struct btrfs_space_info *space_info = block_group->space_info; 2952 struct btrfs_space_info *space_info = block_group->space_info;
2917 struct btrfs_fs_info *fs_info = block_group->fs_info; 2953 struct btrfs_fs_info *fs_info = block_group->fs_info;
2954 struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl;
2918 int ret; 2955 int ret;
2919 int update = 0; 2956 int update = 0;
2920 u64 trimmed = 0; 2957 u64 trimmed = 0;
@@ -2934,7 +2971,10 @@ static int do_trimming(struct btrfs_block_group_cache *block_group,
2934 if (!ret) 2971 if (!ret)
2935 *total_trimmed += trimmed; 2972 *total_trimmed += trimmed;
2936 2973
2974 mutex_lock(&ctl->cache_writeout_mutex);
2937 btrfs_add_free_space(block_group, reserved_start, reserved_bytes); 2975 btrfs_add_free_space(block_group, reserved_start, reserved_bytes);
2976 list_del(&trim_entry->list);
2977 mutex_unlock(&ctl->cache_writeout_mutex);
2938 2978
2939 if (update) { 2979 if (update) {
2940 spin_lock(&space_info->lock); 2980 spin_lock(&space_info->lock);
@@ -2962,16 +3002,21 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2962 u64 bytes; 3002 u64 bytes;
2963 3003
2964 while (start < end) { 3004 while (start < end) {
3005 struct btrfs_trim_range trim_entry;
3006
3007 mutex_lock(&ctl->cache_writeout_mutex);
2965 spin_lock(&ctl->tree_lock); 3008 spin_lock(&ctl->tree_lock);
2966 3009
2967 if (ctl->free_space < minlen) { 3010 if (ctl->free_space < minlen) {
2968 spin_unlock(&ctl->tree_lock); 3011 spin_unlock(&ctl->tree_lock);
3012 mutex_unlock(&ctl->cache_writeout_mutex);
2969 break; 3013 break;
2970 } 3014 }
2971 3015
2972 entry = tree_search_offset(ctl, start, 0, 1); 3016 entry = tree_search_offset(ctl, start, 0, 1);
2973 if (!entry) { 3017 if (!entry) {
2974 spin_unlock(&ctl->tree_lock); 3018 spin_unlock(&ctl->tree_lock);
3019 mutex_unlock(&ctl->cache_writeout_mutex);
2975 break; 3020 break;
2976 } 3021 }
2977 3022
@@ -2980,6 +3025,7 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2980 node = rb_next(&entry->offset_index); 3025 node = rb_next(&entry->offset_index);
2981 if (!node) { 3026 if (!node) {
2982 spin_unlock(&ctl->tree_lock); 3027 spin_unlock(&ctl->tree_lock);
3028 mutex_unlock(&ctl->cache_writeout_mutex);
2983 goto out; 3029 goto out;
2984 } 3030 }
2985 entry = rb_entry(node, struct btrfs_free_space, 3031 entry = rb_entry(node, struct btrfs_free_space,
@@ -2988,6 +3034,7 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2988 3034
2989 if (entry->offset >= end) { 3035 if (entry->offset >= end) {
2990 spin_unlock(&ctl->tree_lock); 3036 spin_unlock(&ctl->tree_lock);
3037 mutex_unlock(&ctl->cache_writeout_mutex);
2991 break; 3038 break;
2992 } 3039 }
2993 3040
@@ -2997,6 +3044,7 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
2997 bytes = min(extent_start + extent_bytes, end) - start; 3044 bytes = min(extent_start + extent_bytes, end) - start;
2998 if (bytes < minlen) { 3045 if (bytes < minlen) {
2999 spin_unlock(&ctl->tree_lock); 3046 spin_unlock(&ctl->tree_lock);
3047 mutex_unlock(&ctl->cache_writeout_mutex);
3000 goto next; 3048 goto next;
3001 } 3049 }
3002 3050
@@ -3004,9 +3052,13 @@ static int trim_no_bitmap(struct btrfs_block_group_cache *block_group,
3004 kmem_cache_free(btrfs_free_space_cachep, entry); 3052 kmem_cache_free(btrfs_free_space_cachep, entry);
3005 3053
3006 spin_unlock(&ctl->tree_lock); 3054 spin_unlock(&ctl->tree_lock);
3055 trim_entry.start = extent_start;
3056 trim_entry.bytes = extent_bytes;
3057 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3058 mutex_unlock(&ctl->cache_writeout_mutex);
3007 3059
3008 ret = do_trimming(block_group, total_trimmed, start, bytes, 3060 ret = do_trimming(block_group, total_trimmed, start, bytes,
3009 extent_start, extent_bytes); 3061 extent_start, extent_bytes, &trim_entry);
3010 if (ret) 3062 if (ret)
3011 break; 3063 break;
3012next: 3064next:
@@ -3035,17 +3087,21 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
3035 3087
3036 while (offset < end) { 3088 while (offset < end) {
3037 bool next_bitmap = false; 3089 bool next_bitmap = false;
3090 struct btrfs_trim_range trim_entry;
3038 3091
3092 mutex_lock(&ctl->cache_writeout_mutex);
3039 spin_lock(&ctl->tree_lock); 3093 spin_lock(&ctl->tree_lock);
3040 3094
3041 if (ctl->free_space < minlen) { 3095 if (ctl->free_space < minlen) {
3042 spin_unlock(&ctl->tree_lock); 3096 spin_unlock(&ctl->tree_lock);
3097 mutex_unlock(&ctl->cache_writeout_mutex);
3043 break; 3098 break;
3044 } 3099 }
3045 3100
3046 entry = tree_search_offset(ctl, offset, 1, 0); 3101 entry = tree_search_offset(ctl, offset, 1, 0);
3047 if (!entry) { 3102 if (!entry) {
3048 spin_unlock(&ctl->tree_lock); 3103 spin_unlock(&ctl->tree_lock);
3104 mutex_unlock(&ctl->cache_writeout_mutex);
3049 next_bitmap = true; 3105 next_bitmap = true;
3050 goto next; 3106 goto next;
3051 } 3107 }
@@ -3054,6 +3110,7 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
3054 ret2 = search_bitmap(ctl, entry, &start, &bytes); 3110 ret2 = search_bitmap(ctl, entry, &start, &bytes);
3055 if (ret2 || start >= end) { 3111 if (ret2 || start >= end) {
3056 spin_unlock(&ctl->tree_lock); 3112 spin_unlock(&ctl->tree_lock);
3113 mutex_unlock(&ctl->cache_writeout_mutex);
3057 next_bitmap = true; 3114 next_bitmap = true;
3058 goto next; 3115 goto next;
3059 } 3116 }
@@ -3061,6 +3118,7 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
3061 bytes = min(bytes, end - start); 3118 bytes = min(bytes, end - start);
3062 if (bytes < minlen) { 3119 if (bytes < minlen) {
3063 spin_unlock(&ctl->tree_lock); 3120 spin_unlock(&ctl->tree_lock);
3121 mutex_unlock(&ctl->cache_writeout_mutex);
3064 goto next; 3122 goto next;
3065 } 3123 }
3066 3124
@@ -3069,9 +3127,13 @@ static int trim_bitmaps(struct btrfs_block_group_cache *block_group,
3069 free_bitmap(ctl, entry); 3127 free_bitmap(ctl, entry);
3070 3128
3071 spin_unlock(&ctl->tree_lock); 3129 spin_unlock(&ctl->tree_lock);
3130 trim_entry.start = start;
3131 trim_entry.bytes = bytes;
3132 list_add_tail(&trim_entry.list, &ctl->trimming_ranges);
3133 mutex_unlock(&ctl->cache_writeout_mutex);
3072 3134
3073 ret = do_trimming(block_group, total_trimmed, start, bytes, 3135 ret = do_trimming(block_group, total_trimmed, start, bytes,
3074 start, bytes); 3136 start, bytes, &trim_entry);
3075 if (ret) 3137 if (ret)
3076 break; 3138 break;
3077next: 3139next:
@@ -3101,11 +3163,52 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group,
3101 3163
3102 *trimmed = 0; 3164 *trimmed = 0;
3103 3165
3166 spin_lock(&block_group->lock);
3167 if (block_group->removed) {
3168 spin_unlock(&block_group->lock);
3169 return 0;
3170 }
3171 atomic_inc(&block_group->trimming);
3172 spin_unlock(&block_group->lock);
3173
3104 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen); 3174 ret = trim_no_bitmap(block_group, trimmed, start, end, minlen);
3105 if (ret) 3175 if (ret)
3106 return ret; 3176 goto out;
3107 3177
3108 ret = trim_bitmaps(block_group, trimmed, start, end, minlen); 3178 ret = trim_bitmaps(block_group, trimmed, start, end, minlen);
3179out:
3180 spin_lock(&block_group->lock);
3181 if (atomic_dec_and_test(&block_group->trimming) &&
3182 block_group->removed) {
3183 struct extent_map_tree *em_tree;
3184 struct extent_map *em;
3185
3186 spin_unlock(&block_group->lock);
3187
3188 em_tree = &block_group->fs_info->mapping_tree.map_tree;
3189 write_lock(&em_tree->lock);
3190 em = lookup_extent_mapping(em_tree, block_group->key.objectid,
3191 1);
3192 BUG_ON(!em); /* logic error, can't happen */
3193 remove_extent_mapping(em_tree, em);
3194 write_unlock(&em_tree->lock);
3195
3196 lock_chunks(block_group->fs_info->chunk_root);
3197 list_del_init(&em->list);
3198 unlock_chunks(block_group->fs_info->chunk_root);
3199
3200 /* once for us and once for the tree */
3201 free_extent_map(em);
3202 free_extent_map(em);
3203
3204 /*
3205 * We've left one free space entry and other tasks trimming
3206 * this block group have left 1 entry each one. Free them.
3207 */
3208 __btrfs_remove_free_space_cache(block_group->free_space_ctl);
3209 } else {
3210 spin_unlock(&block_group->lock);
3211 }
3109 3212
3110 return ret; 3213 return ret;
3111} 3214}
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h
index 0cf4977ef70d..88b2238a0aed 100644
--- a/fs/btrfs/free-space-cache.h
+++ b/fs/btrfs/free-space-cache.h
@@ -38,6 +38,8 @@ struct btrfs_free_space_ctl {
38 u64 start; 38 u64 start;
39 struct btrfs_free_space_op *op; 39 struct btrfs_free_space_op *op;
40 void *private; 40 void *private;
41 struct mutex cache_writeout_mutex;
42 struct list_head trimming_ranges;
41}; 43};
42 44
43struct btrfs_free_space_op { 45struct btrfs_free_space_op {
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c
index 83d646bd2e4b..74faea3a516e 100644
--- a/fs/btrfs/inode-map.c
+++ b/fs/btrfs/inode-map.c
@@ -178,7 +178,7 @@ static void start_caching(struct btrfs_root *root)
178 root->root_key.objectid); 178 root->root_key.objectid);
179 if (IS_ERR(tsk)) { 179 if (IS_ERR(tsk)) {
180 btrfs_warn(root->fs_info, "failed to start inode caching task"); 180 btrfs_warn(root->fs_info, "failed to start inode caching task");
181 btrfs_clear_and_info(root, CHANGE_INODE_CACHE, 181 btrfs_clear_pending_and_info(root->fs_info, INODE_MAP_CACHE,
182 "disabling inode map caching"); 182 "disabling inode map caching");
183 } 183 }
184} 184}
@@ -364,6 +364,8 @@ void btrfs_init_free_ino_ctl(struct btrfs_root *root)
364 ctl->start = 0; 364 ctl->start = 0;
365 ctl->private = NULL; 365 ctl->private = NULL;
366 ctl->op = &free_ino_op; 366 ctl->op = &free_ino_op;
367 INIT_LIST_HEAD(&ctl->trimming_ranges);
368 mutex_init(&ctl->cache_writeout_mutex);
367 369
368 /* 370 /*
369 * Initially we allow to use 16K of ram to cache chunks of 371 * Initially we allow to use 16K of ram to cache chunks of
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ff0dcc016b71..e687bb0dc73a 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -382,7 +382,7 @@ static inline int inode_need_compress(struct inode *inode)
382 * are written in the same order that the flusher thread sent them 382 * are written in the same order that the flusher thread sent them
383 * down. 383 * down.
384 */ 384 */
385static noinline int compress_file_range(struct inode *inode, 385static noinline void compress_file_range(struct inode *inode,
386 struct page *locked_page, 386 struct page *locked_page,
387 u64 start, u64 end, 387 u64 start, u64 end,
388 struct async_cow *async_cow, 388 struct async_cow *async_cow,
@@ -411,14 +411,6 @@ static noinline int compress_file_range(struct inode *inode,
411 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size)) 411 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
412 btrfs_add_inode_defrag(NULL, inode); 412 btrfs_add_inode_defrag(NULL, inode);
413 413
414 /*
415 * skip compression for a small file range(<=blocksize) that
416 * isn't an inline extent, since it dosen't save disk space at all.
417 */
418 if ((end - start + 1) <= blocksize &&
419 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
420 goto cleanup_and_bail_uncompressed;
421
422 actual_end = min_t(u64, isize, end + 1); 414 actual_end = min_t(u64, isize, end + 1);
423again: 415again:
424 will_compress = 0; 416 will_compress = 0;
@@ -440,6 +432,14 @@ again:
440 432
441 total_compressed = actual_end - start; 433 total_compressed = actual_end - start;
442 434
435 /*
436 * skip compression for a small file range(<=blocksize) that
437 * isn't an inline extent, since it dosen't save disk space at all.
438 */
439 if (total_compressed <= blocksize &&
440 (start > 0 || end + 1 < BTRFS_I(inode)->disk_i_size))
441 goto cleanup_and_bail_uncompressed;
442
443 /* we want to make sure that amount of ram required to uncompress 443 /* we want to make sure that amount of ram required to uncompress
444 * an extent is reasonable, so we limit the total size in ram 444 * an extent is reasonable, so we limit the total size in ram
445 * of a compressed extent to 128k. This is a crucial number 445 * of a compressed extent to 128k. This is a crucial number
@@ -527,7 +527,10 @@ cont:
527 if (ret <= 0) { 527 if (ret <= 0) {
528 unsigned long clear_flags = EXTENT_DELALLOC | 528 unsigned long clear_flags = EXTENT_DELALLOC |
529 EXTENT_DEFRAG; 529 EXTENT_DEFRAG;
530 unsigned long page_error_op;
531
530 clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0; 532 clear_flags |= (ret < 0) ? EXTENT_DO_ACCOUNTING : 0;
533 page_error_op = ret < 0 ? PAGE_SET_ERROR : 0;
531 534
532 /* 535 /*
533 * inline extent creation worked or returned error, 536 * inline extent creation worked or returned error,
@@ -538,6 +541,7 @@ cont:
538 clear_flags, PAGE_UNLOCK | 541 clear_flags, PAGE_UNLOCK |
539 PAGE_CLEAR_DIRTY | 542 PAGE_CLEAR_DIRTY |
540 PAGE_SET_WRITEBACK | 543 PAGE_SET_WRITEBACK |
544 page_error_op |
541 PAGE_END_WRITEBACK); 545 PAGE_END_WRITEBACK);
542 goto free_pages_out; 546 goto free_pages_out;
543 } 547 }
@@ -620,8 +624,7 @@ cleanup_and_bail_uncompressed:
620 *num_added += 1; 624 *num_added += 1;
621 } 625 }
622 626
623out: 627 return;
624 return ret;
625 628
626free_pages_out: 629free_pages_out:
627 for (i = 0; i < nr_pages_ret; i++) { 630 for (i = 0; i < nr_pages_ret; i++) {
@@ -629,8 +632,22 @@ free_pages_out:
629 page_cache_release(pages[i]); 632 page_cache_release(pages[i]);
630 } 633 }
631 kfree(pages); 634 kfree(pages);
635}
632 636
633 goto out; 637static void free_async_extent_pages(struct async_extent *async_extent)
638{
639 int i;
640
641 if (!async_extent->pages)
642 return;
643
644 for (i = 0; i < async_extent->nr_pages; i++) {
645 WARN_ON(async_extent->pages[i]->mapping);
646 page_cache_release(async_extent->pages[i]);
647 }
648 kfree(async_extent->pages);
649 async_extent->nr_pages = 0;
650 async_extent->pages = NULL;
634} 651}
635 652
636/* 653/*
@@ -639,7 +656,7 @@ free_pages_out:
639 * queued. We walk all the async extents created by compress_file_range 656 * queued. We walk all the async extents created by compress_file_range
640 * and send them down to the disk. 657 * and send them down to the disk.
641 */ 658 */
642static noinline int submit_compressed_extents(struct inode *inode, 659static noinline void submit_compressed_extents(struct inode *inode,
643 struct async_cow *async_cow) 660 struct async_cow *async_cow)
644{ 661{
645 struct async_extent *async_extent; 662 struct async_extent *async_extent;
@@ -651,9 +668,6 @@ static noinline int submit_compressed_extents(struct inode *inode,
651 struct extent_io_tree *io_tree; 668 struct extent_io_tree *io_tree;
652 int ret = 0; 669 int ret = 0;
653 670
654 if (list_empty(&async_cow->extents))
655 return 0;
656
657again: 671again:
658 while (!list_empty(&async_cow->extents)) { 672 while (!list_empty(&async_cow->extents)) {
659 async_extent = list_entry(async_cow->extents.next, 673 async_extent = list_entry(async_cow->extents.next,
@@ -709,15 +723,7 @@ retry:
709 async_extent->compressed_size, 723 async_extent->compressed_size,
710 0, alloc_hint, &ins, 1, 1); 724 0, alloc_hint, &ins, 1, 1);
711 if (ret) { 725 if (ret) {
712 int i; 726 free_async_extent_pages(async_extent);
713
714 for (i = 0; i < async_extent->nr_pages; i++) {
715 WARN_ON(async_extent->pages[i]->mapping);
716 page_cache_release(async_extent->pages[i]);
717 }
718 kfree(async_extent->pages);
719 async_extent->nr_pages = 0;
720 async_extent->pages = NULL;
721 727
722 if (ret == -ENOSPC) { 728 if (ret == -ENOSPC) {
723 unlock_extent(io_tree, async_extent->start, 729 unlock_extent(io_tree, async_extent->start,
@@ -814,15 +820,26 @@ retry:
814 ins.objectid, 820 ins.objectid,
815 ins.offset, async_extent->pages, 821 ins.offset, async_extent->pages,
816 async_extent->nr_pages); 822 async_extent->nr_pages);
823 if (ret) {
824 struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
825 struct page *p = async_extent->pages[0];
826 const u64 start = async_extent->start;
827 const u64 end = start + async_extent->ram_size - 1;
828
829 p->mapping = inode->i_mapping;
830 tree->ops->writepage_end_io_hook(p, start, end,
831 NULL, 0);
832 p->mapping = NULL;
833 extent_clear_unlock_delalloc(inode, start, end, NULL, 0,
834 PAGE_END_WRITEBACK |
835 PAGE_SET_ERROR);
836 free_async_extent_pages(async_extent);
837 }
817 alloc_hint = ins.objectid + ins.offset; 838 alloc_hint = ins.objectid + ins.offset;
818 kfree(async_extent); 839 kfree(async_extent);
819 if (ret)
820 goto out;
821 cond_resched(); 840 cond_resched();
822 } 841 }
823 ret = 0; 842 return;
824out:
825 return ret;
826out_free_reserve: 843out_free_reserve:
827 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1); 844 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
828out_free: 845out_free:
@@ -832,7 +849,9 @@ out_free:
832 NULL, EXTENT_LOCKED | EXTENT_DELALLOC | 849 NULL, EXTENT_LOCKED | EXTENT_DELALLOC |
833 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING, 850 EXTENT_DEFRAG | EXTENT_DO_ACCOUNTING,
834 PAGE_UNLOCK | PAGE_CLEAR_DIRTY | 851 PAGE_UNLOCK | PAGE_CLEAR_DIRTY |
835 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK); 852 PAGE_SET_WRITEBACK | PAGE_END_WRITEBACK |
853 PAGE_SET_ERROR);
854 free_async_extent_pages(async_extent);
836 kfree(async_extent); 855 kfree(async_extent);
837 goto again; 856 goto again;
838} 857}
@@ -1318,7 +1337,7 @@ next_slot:
1318 * we fall into common COW way. 1337 * we fall into common COW way.
1319 */ 1338 */
1320 if (!nolock) { 1339 if (!nolock) {
1321 err = btrfs_start_nocow_write(root); 1340 err = btrfs_start_write_no_snapshoting(root);
1322 if (!err) 1341 if (!err)
1323 goto out_check; 1342 goto out_check;
1324 } 1343 }
@@ -1342,7 +1361,7 @@ out_check:
1342 if (extent_end <= start) { 1361 if (extent_end <= start) {
1343 path->slots[0]++; 1362 path->slots[0]++;
1344 if (!nolock && nocow) 1363 if (!nolock && nocow)
1345 btrfs_end_nocow_write(root); 1364 btrfs_end_write_no_snapshoting(root);
1346 goto next_slot; 1365 goto next_slot;
1347 } 1366 }
1348 if (!nocow) { 1367 if (!nocow) {
@@ -1362,7 +1381,7 @@ out_check:
1362 page_started, nr_written, 1); 1381 page_started, nr_written, 1);
1363 if (ret) { 1382 if (ret) {
1364 if (!nolock && nocow) 1383 if (!nolock && nocow)
1365 btrfs_end_nocow_write(root); 1384 btrfs_end_write_no_snapshoting(root);
1366 goto error; 1385 goto error;
1367 } 1386 }
1368 cow_start = (u64)-1; 1387 cow_start = (u64)-1;
@@ -1413,7 +1432,7 @@ out_check:
1413 num_bytes); 1432 num_bytes);
1414 if (ret) { 1433 if (ret) {
1415 if (!nolock && nocow) 1434 if (!nolock && nocow)
1416 btrfs_end_nocow_write(root); 1435 btrfs_end_write_no_snapshoting(root);
1417 goto error; 1436 goto error;
1418 } 1437 }
1419 } 1438 }
@@ -1424,7 +1443,7 @@ out_check:
1424 EXTENT_DELALLOC, PAGE_UNLOCK | 1443 EXTENT_DELALLOC, PAGE_UNLOCK |
1425 PAGE_SET_PRIVATE2); 1444 PAGE_SET_PRIVATE2);
1426 if (!nolock && nocow) 1445 if (!nolock && nocow)
1427 btrfs_end_nocow_write(root); 1446 btrfs_end_write_no_snapshoting(root);
1428 cur_offset = extent_end; 1447 cur_offset = extent_end;
1429 if (cur_offset > end) 1448 if (cur_offset > end)
1430 break; 1449 break;
@@ -4580,6 +4599,26 @@ next:
4580 return err; 4599 return err;
4581} 4600}
4582 4601
4602static int wait_snapshoting_atomic_t(atomic_t *a)
4603{
4604 schedule();
4605 return 0;
4606}
4607
4608static void wait_for_snapshot_creation(struct btrfs_root *root)
4609{
4610 while (true) {
4611 int ret;
4612
4613 ret = btrfs_start_write_no_snapshoting(root);
4614 if (ret)
4615 break;
4616 wait_on_atomic_t(&root->will_be_snapshoted,
4617 wait_snapshoting_atomic_t,
4618 TASK_UNINTERRUPTIBLE);
4619 }
4620}
4621
4583static int btrfs_setsize(struct inode *inode, struct iattr *attr) 4622static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4584{ 4623{
4585 struct btrfs_root *root = BTRFS_I(inode)->root; 4624 struct btrfs_root *root = BTRFS_I(inode)->root;
@@ -4604,17 +4643,30 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
4604 4643
4605 if (newsize > oldsize) { 4644 if (newsize > oldsize) {
4606 truncate_pagecache(inode, newsize); 4645 truncate_pagecache(inode, newsize);
4646 /*
4647 * Don't do an expanding truncate while snapshoting is ongoing.
4648 * This is to ensure the snapshot captures a fully consistent
4649 * state of this file - if the snapshot captures this expanding
4650 * truncation, it must capture all writes that happened before
4651 * this truncation.
4652 */
4653 wait_for_snapshot_creation(root);
4607 ret = btrfs_cont_expand(inode, oldsize, newsize); 4654 ret = btrfs_cont_expand(inode, oldsize, newsize);
4608 if (ret) 4655 if (ret) {
4656 btrfs_end_write_no_snapshoting(root);
4609 return ret; 4657 return ret;
4658 }
4610 4659
4611 trans = btrfs_start_transaction(root, 1); 4660 trans = btrfs_start_transaction(root, 1);
4612 if (IS_ERR(trans)) 4661 if (IS_ERR(trans)) {
4662 btrfs_end_write_no_snapshoting(root);
4613 return PTR_ERR(trans); 4663 return PTR_ERR(trans);
4664 }
4614 4665
4615 i_size_write(inode, newsize); 4666 i_size_write(inode, newsize);
4616 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL); 4667 btrfs_ordered_update_i_size(inode, i_size_read(inode), NULL);
4617 ret = btrfs_update_inode(trans, root, inode); 4668 ret = btrfs_update_inode(trans, root, inode);
4669 btrfs_end_write_no_snapshoting(root);
4618 btrfs_end_transaction(trans, root); 4670 btrfs_end_transaction(trans, root);
4619 } else { 4671 } else {
4620 4672
@@ -7000,9 +7052,12 @@ static int lock_extent_direct(struct inode *inode, u64 lockstart, u64 lockend,
7000 btrfs_put_ordered_extent(ordered); 7052 btrfs_put_ordered_extent(ordered);
7001 } else { 7053 } else {
7002 /* Screw you mmap */ 7054 /* Screw you mmap */
7003 ret = filemap_write_and_wait_range(inode->i_mapping, 7055 ret = btrfs_fdatawrite_range(inode, lockstart, lockend);
7004 lockstart, 7056 if (ret)
7005 lockend); 7057 break;
7058 ret = filemap_fdatawait_range(inode->i_mapping,
7059 lockstart,
7060 lockend);
7006 if (ret) 7061 if (ret)
7007 break; 7062 break;
7008 7063
@@ -9442,6 +9497,21 @@ out_inode:
9442 9497
9443} 9498}
9444 9499
9500/* Inspired by filemap_check_errors() */
9501int btrfs_inode_check_errors(struct inode *inode)
9502{
9503 int ret = 0;
9504
9505 if (test_bit(AS_ENOSPC, &inode->i_mapping->flags) &&
9506 test_and_clear_bit(AS_ENOSPC, &inode->i_mapping->flags))
9507 ret = -ENOSPC;
9508 if (test_bit(AS_EIO, &inode->i_mapping->flags) &&
9509 test_and_clear_bit(AS_EIO, &inode->i_mapping->flags))
9510 ret = -EIO;
9511
9512 return ret;
9513}
9514
9445static const struct inode_operations btrfs_dir_inode_operations = { 9515static const struct inode_operations btrfs_dir_inode_operations = {
9446 .getattr = btrfs_getattr, 9516 .getattr = btrfs_getattr,
9447 .lookup = btrfs_lookup, 9517 .lookup = btrfs_lookup,
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 080fe66c0349..d49fe8a0f6b5 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -617,7 +617,7 @@ fail:
617 return ret; 617 return ret;
618} 618}
619 619
620static void btrfs_wait_nocow_write(struct btrfs_root *root) 620static void btrfs_wait_for_no_snapshoting_writes(struct btrfs_root *root)
621{ 621{
622 s64 writers; 622 s64 writers;
623 DEFINE_WAIT(wait); 623 DEFINE_WAIT(wait);
@@ -649,7 +649,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
649 649
650 atomic_inc(&root->will_be_snapshoted); 650 atomic_inc(&root->will_be_snapshoted);
651 smp_mb__after_atomic(); 651 smp_mb__after_atomic();
652 btrfs_wait_nocow_write(root); 652 btrfs_wait_for_no_snapshoting_writes(root);
653 653
654 ret = btrfs_start_delalloc_inodes(root, 0); 654 ret = btrfs_start_delalloc_inodes(root, 0);
655 if (ret) 655 if (ret)
@@ -717,35 +717,6 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir,
717 if (ret) 717 if (ret)
718 goto fail; 718 goto fail;
719 719
720 /*
721 * If orphan cleanup did remove any orphans, it means the tree was
722 * modified and therefore the commit root is not the same as the
723 * current root anymore. This is a problem, because send uses the
724 * commit root and therefore can see inode items that don't exist
725 * in the current root anymore, and for example make calls to
726 * btrfs_iget, which will do tree lookups based on the current root
727 * and not on the commit root. Those lookups will fail, returning a
728 * -ESTALE error, and making send fail with that error. So make sure
729 * a send does not see any orphans we have just removed, and that it
730 * will see the same inodes regardless of whether a transaction
731 * commit happened before it started (meaning that the commit root
732 * will be the same as the current root) or not.
733 */
734 if (readonly && pending_snapshot->snap->node !=
735 pending_snapshot->snap->commit_root) {
736 trans = btrfs_join_transaction(pending_snapshot->snap);
737 if (IS_ERR(trans) && PTR_ERR(trans) != -ENOENT) {
738 ret = PTR_ERR(trans);
739 goto fail;
740 }
741 if (!IS_ERR(trans)) {
742 ret = btrfs_commit_transaction(trans,
743 pending_snapshot->snap);
744 if (ret)
745 goto fail;
746 }
747 }
748
749 inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry); 720 inode = btrfs_lookup_dentry(dentry->d_parent->d_inode, dentry);
750 if (IS_ERR(inode)) { 721 if (IS_ERR(inode)) {
751 ret = PTR_ERR(inode); 722 ret = PTR_ERR(inode);
@@ -761,7 +732,8 @@ fail:
761free: 732free:
762 kfree(pending_snapshot); 733 kfree(pending_snapshot);
763out: 734out:
764 atomic_dec(&root->will_be_snapshoted); 735 if (atomic_dec_and_test(&root->will_be_snapshoted))
736 wake_up_atomic_t(&root->will_be_snapshoted);
765 return ret; 737 return ret;
766} 738}
767 739
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index ac734ec4cc20..534544e08f76 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -220,6 +220,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
220 INIT_LIST_HEAD(&entry->work_list); 220 INIT_LIST_HEAD(&entry->work_list);
221 init_completion(&entry->completion); 221 init_completion(&entry->completion);
222 INIT_LIST_HEAD(&entry->log_list); 222 INIT_LIST_HEAD(&entry->log_list);
223 INIT_LIST_HEAD(&entry->trans_list);
223 224
224 trace_btrfs_ordered_extent_add(inode, entry); 225 trace_btrfs_ordered_extent_add(inode, entry);
225 226
@@ -431,19 +432,31 @@ out:
431 432
432/* Needs to either be called under a log transaction or the log_mutex */ 433/* Needs to either be called under a log transaction or the log_mutex */
433void btrfs_get_logged_extents(struct inode *inode, 434void btrfs_get_logged_extents(struct inode *inode,
434 struct list_head *logged_list) 435 struct list_head *logged_list,
436 const loff_t start,
437 const loff_t end)
435{ 438{
436 struct btrfs_ordered_inode_tree *tree; 439 struct btrfs_ordered_inode_tree *tree;
437 struct btrfs_ordered_extent *ordered; 440 struct btrfs_ordered_extent *ordered;
438 struct rb_node *n; 441 struct rb_node *n;
442 struct rb_node *prev;
439 443
440 tree = &BTRFS_I(inode)->ordered_tree; 444 tree = &BTRFS_I(inode)->ordered_tree;
441 spin_lock_irq(&tree->lock); 445 spin_lock_irq(&tree->lock);
442 for (n = rb_first(&tree->tree); n; n = rb_next(n)) { 446 n = __tree_search(&tree->tree, end, &prev);
447 if (!n)
448 n = prev;
449 for (; n; n = rb_prev(n)) {
443 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node); 450 ordered = rb_entry(n, struct btrfs_ordered_extent, rb_node);
451 if (ordered->file_offset > end)
452 continue;
453 if (entry_end(ordered) <= start)
454 break;
444 if (!list_empty(&ordered->log_list)) 455 if (!list_empty(&ordered->log_list))
445 continue; 456 continue;
446 list_add_tail(&ordered->log_list, logged_list); 457 if (test_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
458 continue;
459 list_add(&ordered->log_list, logged_list);
447 atomic_inc(&ordered->refs); 460 atomic_inc(&ordered->refs);
448 } 461 }
449 spin_unlock_irq(&tree->lock); 462 spin_unlock_irq(&tree->lock);
@@ -472,7 +485,8 @@ void btrfs_submit_logged_extents(struct list_head *logged_list,
472 spin_unlock_irq(&log->log_extents_lock[index]); 485 spin_unlock_irq(&log->log_extents_lock[index]);
473} 486}
474 487
475void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid) 488void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
489 struct btrfs_root *log, u64 transid)
476{ 490{
477 struct btrfs_ordered_extent *ordered; 491 struct btrfs_ordered_extent *ordered;
478 int index = transid % 2; 492 int index = transid % 2;
@@ -497,7 +511,8 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
497 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 511 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
498 &ordered->flags)); 512 &ordered->flags));
499 513
500 btrfs_put_ordered_extent(ordered); 514 if (!test_and_set_bit(BTRFS_ORDERED_LOGGED, &ordered->flags))
515 list_add_tail(&ordered->trans_list, &trans->ordered);
501 spin_lock_irq(&log->log_extents_lock[index]); 516 spin_lock_irq(&log->log_extents_lock[index]);
502 } 517 }
503 spin_unlock_irq(&log->log_extents_lock[index]); 518 spin_unlock_irq(&log->log_extents_lock[index]);
@@ -725,30 +740,10 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
725 /* start IO across the range first to instantiate any delalloc 740 /* start IO across the range first to instantiate any delalloc
726 * extents 741 * extents
727 */ 742 */
728 ret = filemap_fdatawrite_range(inode->i_mapping, start, orig_end); 743 ret = btrfs_fdatawrite_range(inode, start, orig_end);
729 if (ret) 744 if (ret)
730 return ret; 745 return ret;
731 /* 746
732 * So with compression we will find and lock a dirty page and clear the
733 * first one as dirty, setup an async extent, and immediately return
734 * with the entire range locked but with nobody actually marked with
735 * writeback. So we can't just filemap_write_and_wait_range() and
736 * expect it to work since it will just kick off a thread to do the
737 * actual work. So we need to call filemap_fdatawrite_range _again_
738 * since it will wait on the page lock, which won't be unlocked until
739 * after the pages have been marked as writeback and so we're good to go
740 * from there. We have to do this otherwise we'll miss the ordered
741 * extents and that results in badness. Please Josef, do not think you
742 * know better and pull this out at some point in the future, it is
743 * right and you are wrong.
744 */
745 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
746 &BTRFS_I(inode)->runtime_flags)) {
747 ret = filemap_fdatawrite_range(inode->i_mapping, start,
748 orig_end);
749 if (ret)
750 return ret;
751 }
752 ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end); 747 ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
753 if (ret) 748 if (ret)
754 return ret; 749 return ret;
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index d81a274d621e..e96cd4ccd805 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -71,6 +71,8 @@ struct btrfs_ordered_sum {
71 ordered extent */ 71 ordered extent */
72#define BTRFS_ORDERED_TRUNCATED 9 /* Set when we have to truncate an extent */ 72#define BTRFS_ORDERED_TRUNCATED 9 /* Set when we have to truncate an extent */
73 73
74#define BTRFS_ORDERED_LOGGED 10 /* Set when we've waited on this ordered extent
75 * in the logging code. */
74struct btrfs_ordered_extent { 76struct btrfs_ordered_extent {
75 /* logical offset in the file */ 77 /* logical offset in the file */
76 u64 file_offset; 78 u64 file_offset;
@@ -121,6 +123,9 @@ struct btrfs_ordered_extent {
121 /* If we need to wait on this to be done */ 123 /* If we need to wait on this to be done */
122 struct list_head log_list; 124 struct list_head log_list;
123 125
126 /* If the transaction needs to wait on this ordered extent */
127 struct list_head trans_list;
128
124 /* used to wait for the BTRFS_ORDERED_COMPLETE bit */ 129 /* used to wait for the BTRFS_ORDERED_COMPLETE bit */
125 wait_queue_head_t wait; 130 wait_queue_head_t wait;
126 131
@@ -193,11 +198,14 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
193int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr); 198int btrfs_wait_ordered_extents(struct btrfs_root *root, int nr);
194void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr); 199void btrfs_wait_ordered_roots(struct btrfs_fs_info *fs_info, int nr);
195void btrfs_get_logged_extents(struct inode *inode, 200void btrfs_get_logged_extents(struct inode *inode,
196 struct list_head *logged_list); 201 struct list_head *logged_list,
202 const loff_t start,
203 const loff_t end);
197void btrfs_put_logged_extents(struct list_head *logged_list); 204void btrfs_put_logged_extents(struct list_head *logged_list);
198void btrfs_submit_logged_extents(struct list_head *logged_list, 205void btrfs_submit_logged_extents(struct list_head *logged_list,
199 struct btrfs_root *log); 206 struct btrfs_root *log);
200void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid); 207void btrfs_wait_logged_extents(struct btrfs_trans_handle *trans,
208 struct btrfs_root *log, u64 transid);
201void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid); 209void btrfs_free_logged_extents(struct btrfs_root *log, u64 transid);
202int __init ordered_data_init(void); 210int __init ordered_data_init(void);
203void ordered_data_exit(void); 211void ordered_data_exit(void);
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 6a41631cb959..8ab2a17bbba8 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -58,9 +58,23 @@
58 */ 58 */
59#define RBIO_CACHE_READY_BIT 3 59#define RBIO_CACHE_READY_BIT 3
60 60
61/*
62 * bbio and raid_map is managed by the caller, so we shouldn't free
63 * them here. And besides that, all rbios with this flag should not
64 * be cached, because we need raid_map to check the rbios' stripe
65 * is the same or not, but it is very likely that the caller has
66 * free raid_map, so don't cache those rbios.
67 */
68#define RBIO_HOLD_BBIO_MAP_BIT 4
61 69
62#define RBIO_CACHE_SIZE 1024 70#define RBIO_CACHE_SIZE 1024
63 71
72enum btrfs_rbio_ops {
73 BTRFS_RBIO_WRITE = 0,
74 BTRFS_RBIO_READ_REBUILD = 1,
75 BTRFS_RBIO_PARITY_SCRUB = 2,
76};
77
64struct btrfs_raid_bio { 78struct btrfs_raid_bio {
65 struct btrfs_fs_info *fs_info; 79 struct btrfs_fs_info *fs_info;
66 struct btrfs_bio *bbio; 80 struct btrfs_bio *bbio;
@@ -117,13 +131,16 @@ struct btrfs_raid_bio {
117 /* number of data stripes (no p/q) */ 131 /* number of data stripes (no p/q) */
118 int nr_data; 132 int nr_data;
119 133
134 int real_stripes;
135
136 int stripe_npages;
120 /* 137 /*
121 * set if we're doing a parity rebuild 138 * set if we're doing a parity rebuild
122 * for a read from higher up, which is handled 139 * for a read from higher up, which is handled
123 * differently from a parity rebuild as part of 140 * differently from a parity rebuild as part of
124 * rmw 141 * rmw
125 */ 142 */
126 int read_rebuild; 143 enum btrfs_rbio_ops operation;
127 144
128 /* first bad stripe */ 145 /* first bad stripe */
129 int faila; 146 int faila;
@@ -131,6 +148,7 @@ struct btrfs_raid_bio {
131 /* second bad stripe (for raid6 use) */ 148 /* second bad stripe (for raid6 use) */
132 int failb; 149 int failb;
133 150
151 int scrubp;
134 /* 152 /*
135 * number of pages needed to represent the full 153 * number of pages needed to represent the full
136 * stripe 154 * stripe
@@ -144,8 +162,13 @@ struct btrfs_raid_bio {
144 */ 162 */
145 int bio_list_bytes; 163 int bio_list_bytes;
146 164
165 int generic_bio_cnt;
166
147 atomic_t refs; 167 atomic_t refs;
148 168
169 atomic_t stripes_pending;
170
171 atomic_t error;
149 /* 172 /*
150 * these are two arrays of pointers. We allocate the 173 * these are two arrays of pointers. We allocate the
151 * rbio big enough to hold them both and setup their 174 * rbio big enough to hold them both and setup their
@@ -162,6 +185,11 @@ struct btrfs_raid_bio {
162 * here for faster lookup 185 * here for faster lookup
163 */ 186 */
164 struct page **bio_pages; 187 struct page **bio_pages;
188
189 /*
190 * bitmap to record which horizontal stripe has data
191 */
192 unsigned long *dbitmap;
165}; 193};
166 194
167static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); 195static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
@@ -176,6 +204,10 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio);
176static void index_rbio_pages(struct btrfs_raid_bio *rbio); 204static void index_rbio_pages(struct btrfs_raid_bio *rbio);
177static int alloc_rbio_pages(struct btrfs_raid_bio *rbio); 205static int alloc_rbio_pages(struct btrfs_raid_bio *rbio);
178 206
207static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
208 int need_check);
209static void async_scrub_parity(struct btrfs_raid_bio *rbio);
210
179/* 211/*
180 * the stripe hash table is used for locking, and to collect 212 * the stripe hash table is used for locking, and to collect
181 * bios in hopes of making a full stripe 213 * bios in hopes of making a full stripe
@@ -324,6 +356,7 @@ static void merge_rbio(struct btrfs_raid_bio *dest,
324{ 356{
325 bio_list_merge(&dest->bio_list, &victim->bio_list); 357 bio_list_merge(&dest->bio_list, &victim->bio_list);
326 dest->bio_list_bytes += victim->bio_list_bytes; 358 dest->bio_list_bytes += victim->bio_list_bytes;
359 dest->generic_bio_cnt += victim->generic_bio_cnt;
327 bio_list_init(&victim->bio_list); 360 bio_list_init(&victim->bio_list);
328} 361}
329 362
@@ -577,11 +610,20 @@ static int rbio_can_merge(struct btrfs_raid_bio *last,
577 cur->raid_map[0]) 610 cur->raid_map[0])
578 return 0; 611 return 0;
579 612
580 /* reads can't merge with writes */ 613 /* we can't merge with different operations */
581 if (last->read_rebuild != 614 if (last->operation != cur->operation)
582 cur->read_rebuild) { 615 return 0;
616 /*
617 * We've need read the full stripe from the drive.
618 * check and repair the parity and write the new results.
619 *
620 * We're not allowed to add any new bios to the
621 * bio list here, anyone else that wants to
622 * change this stripe needs to do their own rmw.
623 */
624 if (last->operation == BTRFS_RBIO_PARITY_SCRUB ||
625 cur->operation == BTRFS_RBIO_PARITY_SCRUB)
583 return 0; 626 return 0;
584 }
585 627
586 return 1; 628 return 1;
587} 629}
@@ -601,7 +643,7 @@ static struct page *rbio_pstripe_page(struct btrfs_raid_bio *rbio, int index)
601 */ 643 */
602static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index) 644static struct page *rbio_qstripe_page(struct btrfs_raid_bio *rbio, int index)
603{ 645{
604 if (rbio->nr_data + 1 == rbio->bbio->num_stripes) 646 if (rbio->nr_data + 1 == rbio->real_stripes)
605 return NULL; 647 return NULL;
606 648
607 index += ((rbio->nr_data + 1) * rbio->stripe_len) >> 649 index += ((rbio->nr_data + 1) * rbio->stripe_len) >>
@@ -772,11 +814,14 @@ static noinline void unlock_stripe(struct btrfs_raid_bio *rbio)
772 spin_unlock(&rbio->bio_list_lock); 814 spin_unlock(&rbio->bio_list_lock);
773 spin_unlock_irqrestore(&h->lock, flags); 815 spin_unlock_irqrestore(&h->lock, flags);
774 816
775 if (next->read_rebuild) 817 if (next->operation == BTRFS_RBIO_READ_REBUILD)
776 async_read_rebuild(next); 818 async_read_rebuild(next);
777 else { 819 else if (next->operation == BTRFS_RBIO_WRITE) {
778 steal_rbio(rbio, next); 820 steal_rbio(rbio, next);
779 async_rmw_stripe(next); 821 async_rmw_stripe(next);
822 } else if (next->operation == BTRFS_RBIO_PARITY_SCRUB) {
823 steal_rbio(rbio, next);
824 async_scrub_parity(next);
780 } 825 }
781 826
782 goto done_nolock; 827 goto done_nolock;
@@ -796,6 +841,21 @@ done_nolock:
796 remove_rbio_from_cache(rbio); 841 remove_rbio_from_cache(rbio);
797} 842}
798 843
844static inline void
845__free_bbio_and_raid_map(struct btrfs_bio *bbio, u64 *raid_map, int need)
846{
847 if (need) {
848 kfree(raid_map);
849 kfree(bbio);
850 }
851}
852
853static inline void free_bbio_and_raid_map(struct btrfs_raid_bio *rbio)
854{
855 __free_bbio_and_raid_map(rbio->bbio, rbio->raid_map,
856 !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags));
857}
858
799static void __free_raid_bio(struct btrfs_raid_bio *rbio) 859static void __free_raid_bio(struct btrfs_raid_bio *rbio)
800{ 860{
801 int i; 861 int i;
@@ -814,8 +874,9 @@ static void __free_raid_bio(struct btrfs_raid_bio *rbio)
814 rbio->stripe_pages[i] = NULL; 874 rbio->stripe_pages[i] = NULL;
815 } 875 }
816 } 876 }
817 kfree(rbio->raid_map); 877
818 kfree(rbio->bbio); 878 free_bbio_and_raid_map(rbio);
879
819 kfree(rbio); 880 kfree(rbio);
820} 881}
821 882
@@ -833,6 +894,10 @@ static void rbio_orig_end_io(struct btrfs_raid_bio *rbio, int err, int uptodate)
833{ 894{
834 struct bio *cur = bio_list_get(&rbio->bio_list); 895 struct bio *cur = bio_list_get(&rbio->bio_list);
835 struct bio *next; 896 struct bio *next;
897
898 if (rbio->generic_bio_cnt)
899 btrfs_bio_counter_sub(rbio->fs_info, rbio->generic_bio_cnt);
900
836 free_raid_bio(rbio); 901 free_raid_bio(rbio);
837 902
838 while (cur) { 903 while (cur) {
@@ -858,13 +923,13 @@ static void raid_write_end_io(struct bio *bio, int err)
858 923
859 bio_put(bio); 924 bio_put(bio);
860 925
861 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 926 if (!atomic_dec_and_test(&rbio->stripes_pending))
862 return; 927 return;
863 928
864 err = 0; 929 err = 0;
865 930
866 /* OK, we have read all the stripes we need to. */ 931 /* OK, we have read all the stripes we need to. */
867 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 932 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
868 err = -EIO; 933 err = -EIO;
869 934
870 rbio_orig_end_io(rbio, err, 0); 935 rbio_orig_end_io(rbio, err, 0);
@@ -925,16 +990,16 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
925{ 990{
926 struct btrfs_raid_bio *rbio; 991 struct btrfs_raid_bio *rbio;
927 int nr_data = 0; 992 int nr_data = 0;
928 int num_pages = rbio_nr_pages(stripe_len, bbio->num_stripes); 993 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
994 int num_pages = rbio_nr_pages(stripe_len, real_stripes);
995 int stripe_npages = DIV_ROUND_UP(stripe_len, PAGE_SIZE);
929 void *p; 996 void *p;
930 997
931 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2, 998 rbio = kzalloc(sizeof(*rbio) + num_pages * sizeof(struct page *) * 2 +
999 DIV_ROUND_UP(stripe_npages, BITS_PER_LONG / 8),
932 GFP_NOFS); 1000 GFP_NOFS);
933 if (!rbio) { 1001 if (!rbio)
934 kfree(raid_map);
935 kfree(bbio);
936 return ERR_PTR(-ENOMEM); 1002 return ERR_PTR(-ENOMEM);
937 }
938 1003
939 bio_list_init(&rbio->bio_list); 1004 bio_list_init(&rbio->bio_list);
940 INIT_LIST_HEAD(&rbio->plug_list); 1005 INIT_LIST_HEAD(&rbio->plug_list);
@@ -946,9 +1011,13 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
946 rbio->fs_info = root->fs_info; 1011 rbio->fs_info = root->fs_info;
947 rbio->stripe_len = stripe_len; 1012 rbio->stripe_len = stripe_len;
948 rbio->nr_pages = num_pages; 1013 rbio->nr_pages = num_pages;
1014 rbio->real_stripes = real_stripes;
1015 rbio->stripe_npages = stripe_npages;
949 rbio->faila = -1; 1016 rbio->faila = -1;
950 rbio->failb = -1; 1017 rbio->failb = -1;
951 atomic_set(&rbio->refs, 1); 1018 atomic_set(&rbio->refs, 1);
1019 atomic_set(&rbio->error, 0);
1020 atomic_set(&rbio->stripes_pending, 0);
952 1021
953 /* 1022 /*
954 * the stripe_pages and bio_pages array point to the extra 1023 * the stripe_pages and bio_pages array point to the extra
@@ -957,11 +1026,12 @@ static struct btrfs_raid_bio *alloc_rbio(struct btrfs_root *root,
957 p = rbio + 1; 1026 p = rbio + 1;
958 rbio->stripe_pages = p; 1027 rbio->stripe_pages = p;
959 rbio->bio_pages = p + sizeof(struct page *) * num_pages; 1028 rbio->bio_pages = p + sizeof(struct page *) * num_pages;
1029 rbio->dbitmap = p + sizeof(struct page *) * num_pages * 2;
960 1030
961 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE) 1031 if (raid_map[real_stripes - 1] == RAID6_Q_STRIPE)
962 nr_data = bbio->num_stripes - 2; 1032 nr_data = real_stripes - 2;
963 else 1033 else
964 nr_data = bbio->num_stripes - 1; 1034 nr_data = real_stripes - 1;
965 1035
966 rbio->nr_data = nr_data; 1036 rbio->nr_data = nr_data;
967 return rbio; 1037 return rbio;
@@ -1073,7 +1143,7 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1073static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio) 1143static void validate_rbio_for_rmw(struct btrfs_raid_bio *rbio)
1074{ 1144{
1075 if (rbio->faila >= 0 || rbio->failb >= 0) { 1145 if (rbio->faila >= 0 || rbio->failb >= 0) {
1076 BUG_ON(rbio->faila == rbio->bbio->num_stripes - 1); 1146 BUG_ON(rbio->faila == rbio->real_stripes - 1);
1077 __raid56_parity_recover(rbio); 1147 __raid56_parity_recover(rbio);
1078 } else { 1148 } else {
1079 finish_rmw(rbio); 1149 finish_rmw(rbio);
@@ -1134,7 +1204,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1134static noinline void finish_rmw(struct btrfs_raid_bio *rbio) 1204static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1135{ 1205{
1136 struct btrfs_bio *bbio = rbio->bbio; 1206 struct btrfs_bio *bbio = rbio->bbio;
1137 void *pointers[bbio->num_stripes]; 1207 void *pointers[rbio->real_stripes];
1138 int stripe_len = rbio->stripe_len; 1208 int stripe_len = rbio->stripe_len;
1139 int nr_data = rbio->nr_data; 1209 int nr_data = rbio->nr_data;
1140 int stripe; 1210 int stripe;
@@ -1148,11 +1218,11 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1148 1218
1149 bio_list_init(&bio_list); 1219 bio_list_init(&bio_list);
1150 1220
1151 if (bbio->num_stripes - rbio->nr_data == 1) { 1221 if (rbio->real_stripes - rbio->nr_data == 1) {
1152 p_stripe = bbio->num_stripes - 1; 1222 p_stripe = rbio->real_stripes - 1;
1153 } else if (bbio->num_stripes - rbio->nr_data == 2) { 1223 } else if (rbio->real_stripes - rbio->nr_data == 2) {
1154 p_stripe = bbio->num_stripes - 2; 1224 p_stripe = rbio->real_stripes - 2;
1155 q_stripe = bbio->num_stripes - 1; 1225 q_stripe = rbio->real_stripes - 1;
1156 } else { 1226 } else {
1157 BUG(); 1227 BUG();
1158 } 1228 }
@@ -1169,7 +1239,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1169 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 1239 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1170 spin_unlock_irq(&rbio->bio_list_lock); 1240 spin_unlock_irq(&rbio->bio_list_lock);
1171 1241
1172 atomic_set(&rbio->bbio->error, 0); 1242 atomic_set(&rbio->error, 0);
1173 1243
1174 /* 1244 /*
1175 * now that we've set rmw_locked, run through the 1245 * now that we've set rmw_locked, run through the
@@ -1209,7 +1279,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1209 SetPageUptodate(p); 1279 SetPageUptodate(p);
1210 pointers[stripe++] = kmap(p); 1280 pointers[stripe++] = kmap(p);
1211 1281
1212 raid6_call.gen_syndrome(bbio->num_stripes, PAGE_SIZE, 1282 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
1213 pointers); 1283 pointers);
1214 } else { 1284 } else {
1215 /* raid5 */ 1285 /* raid5 */
@@ -1218,7 +1288,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1218 } 1288 }
1219 1289
1220 1290
1221 for (stripe = 0; stripe < bbio->num_stripes; stripe++) 1291 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
1222 kunmap(page_in_rbio(rbio, stripe, pagenr, 0)); 1292 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
1223 } 1293 }
1224 1294
@@ -1227,7 +1297,7 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1227 * higher layers (the bio_list in our rbio) and our p/q. Ignore 1297 * higher layers (the bio_list in our rbio) and our p/q. Ignore
1228 * everything else. 1298 * everything else.
1229 */ 1299 */
1230 for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 1300 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1231 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) { 1301 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1232 struct page *page; 1302 struct page *page;
1233 if (stripe < rbio->nr_data) { 1303 if (stripe < rbio->nr_data) {
@@ -1245,8 +1315,34 @@ static noinline void finish_rmw(struct btrfs_raid_bio *rbio)
1245 } 1315 }
1246 } 1316 }
1247 1317
1248 atomic_set(&bbio->stripes_pending, bio_list_size(&bio_list)); 1318 if (likely(!bbio->num_tgtdevs))
1249 BUG_ON(atomic_read(&bbio->stripes_pending) == 0); 1319 goto write_data;
1320
1321 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1322 if (!bbio->tgtdev_map[stripe])
1323 continue;
1324
1325 for (pagenr = 0; pagenr < pages_per_stripe; pagenr++) {
1326 struct page *page;
1327 if (stripe < rbio->nr_data) {
1328 page = page_in_rbio(rbio, stripe, pagenr, 1);
1329 if (!page)
1330 continue;
1331 } else {
1332 page = rbio_stripe_page(rbio, stripe, pagenr);
1333 }
1334
1335 ret = rbio_add_io_page(rbio, &bio_list, page,
1336 rbio->bbio->tgtdev_map[stripe],
1337 pagenr, rbio->stripe_len);
1338 if (ret)
1339 goto cleanup;
1340 }
1341 }
1342
1343write_data:
1344 atomic_set(&rbio->stripes_pending, bio_list_size(&bio_list));
1345 BUG_ON(atomic_read(&rbio->stripes_pending) == 0);
1250 1346
1251 while (1) { 1347 while (1) {
1252 bio = bio_list_pop(&bio_list); 1348 bio = bio_list_pop(&bio_list);
@@ -1283,7 +1379,8 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1283 stripe = &rbio->bbio->stripes[i]; 1379 stripe = &rbio->bbio->stripes[i];
1284 stripe_start = stripe->physical; 1380 stripe_start = stripe->physical;
1285 if (physical >= stripe_start && 1381 if (physical >= stripe_start &&
1286 physical < stripe_start + rbio->stripe_len) { 1382 physical < stripe_start + rbio->stripe_len &&
1383 bio->bi_bdev == stripe->dev->bdev) {
1287 return i; 1384 return i;
1288 } 1385 }
1289 } 1386 }
@@ -1331,11 +1428,11 @@ static int fail_rbio_index(struct btrfs_raid_bio *rbio, int failed)
1331 if (rbio->faila == -1) { 1428 if (rbio->faila == -1) {
1332 /* first failure on this rbio */ 1429 /* first failure on this rbio */
1333 rbio->faila = failed; 1430 rbio->faila = failed;
1334 atomic_inc(&rbio->bbio->error); 1431 atomic_inc(&rbio->error);
1335 } else if (rbio->failb == -1) { 1432 } else if (rbio->failb == -1) {
1336 /* second failure on this rbio */ 1433 /* second failure on this rbio */
1337 rbio->failb = failed; 1434 rbio->failb = failed;
1338 atomic_inc(&rbio->bbio->error); 1435 atomic_inc(&rbio->error);
1339 } else { 1436 } else {
1340 ret = -EIO; 1437 ret = -EIO;
1341 } 1438 }
@@ -1394,11 +1491,11 @@ static void raid_rmw_end_io(struct bio *bio, int err)
1394 1491
1395 bio_put(bio); 1492 bio_put(bio);
1396 1493
1397 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 1494 if (!atomic_dec_and_test(&rbio->stripes_pending))
1398 return; 1495 return;
1399 1496
1400 err = 0; 1497 err = 0;
1401 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 1498 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1402 goto cleanup; 1499 goto cleanup;
1403 1500
1404 /* 1501 /*
@@ -1439,7 +1536,6 @@ static void async_read_rebuild(struct btrfs_raid_bio *rbio)
1439static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio) 1536static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1440{ 1537{
1441 int bios_to_read = 0; 1538 int bios_to_read = 0;
1442 struct btrfs_bio *bbio = rbio->bbio;
1443 struct bio_list bio_list; 1539 struct bio_list bio_list;
1444 int ret; 1540 int ret;
1445 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); 1541 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
@@ -1455,7 +1551,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1455 1551
1456 index_rbio_pages(rbio); 1552 index_rbio_pages(rbio);
1457 1553
1458 atomic_set(&rbio->bbio->error, 0); 1554 atomic_set(&rbio->error, 0);
1459 /* 1555 /*
1460 * build a list of bios to read all the missing parts of this 1556 * build a list of bios to read all the missing parts of this
1461 * stripe 1557 * stripe
@@ -1503,7 +1599,7 @@ static int raid56_rmw_stripe(struct btrfs_raid_bio *rbio)
1503 * the bbio may be freed once we submit the last bio. Make sure 1599 * the bbio may be freed once we submit the last bio. Make sure
1504 * not to touch it after that 1600 * not to touch it after that
1505 */ 1601 */
1506 atomic_set(&bbio->stripes_pending, bios_to_read); 1602 atomic_set(&rbio->stripes_pending, bios_to_read);
1507 while (1) { 1603 while (1) {
1508 bio = bio_list_pop(&bio_list); 1604 bio = bio_list_pop(&bio_list);
1509 if (!bio) 1605 if (!bio)
@@ -1686,19 +1782,30 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1686 struct btrfs_raid_bio *rbio; 1782 struct btrfs_raid_bio *rbio;
1687 struct btrfs_plug_cb *plug = NULL; 1783 struct btrfs_plug_cb *plug = NULL;
1688 struct blk_plug_cb *cb; 1784 struct blk_plug_cb *cb;
1785 int ret;
1689 1786
1690 rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 1787 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
1691 if (IS_ERR(rbio)) 1788 if (IS_ERR(rbio)) {
1789 __free_bbio_and_raid_map(bbio, raid_map, 1);
1692 return PTR_ERR(rbio); 1790 return PTR_ERR(rbio);
1791 }
1693 bio_list_add(&rbio->bio_list, bio); 1792 bio_list_add(&rbio->bio_list, bio);
1694 rbio->bio_list_bytes = bio->bi_iter.bi_size; 1793 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1794 rbio->operation = BTRFS_RBIO_WRITE;
1795
1796 btrfs_bio_counter_inc_noblocked(root->fs_info);
1797 rbio->generic_bio_cnt = 1;
1695 1798
1696 /* 1799 /*
1697 * don't plug on full rbios, just get them out the door 1800 * don't plug on full rbios, just get them out the door
1698 * as quickly as we can 1801 * as quickly as we can
1699 */ 1802 */
1700 if (rbio_is_full(rbio)) 1803 if (rbio_is_full(rbio)) {
1701 return full_stripe_write(rbio); 1804 ret = full_stripe_write(rbio);
1805 if (ret)
1806 btrfs_bio_counter_dec(root->fs_info);
1807 return ret;
1808 }
1702 1809
1703 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info, 1810 cb = blk_check_plugged(btrfs_raid_unplug, root->fs_info,
1704 sizeof(*plug)); 1811 sizeof(*plug));
@@ -1709,10 +1816,13 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1709 INIT_LIST_HEAD(&plug->rbio_list); 1816 INIT_LIST_HEAD(&plug->rbio_list);
1710 } 1817 }
1711 list_add_tail(&rbio->plug_list, &plug->rbio_list); 1818 list_add_tail(&rbio->plug_list, &plug->rbio_list);
1819 ret = 0;
1712 } else { 1820 } else {
1713 return __raid56_parity_write(rbio); 1821 ret = __raid56_parity_write(rbio);
1822 if (ret)
1823 btrfs_bio_counter_dec(root->fs_info);
1714 } 1824 }
1715 return 0; 1825 return ret;
1716} 1826}
1717 1827
1718/* 1828/*
@@ -1730,7 +1840,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1730 int err; 1840 int err;
1731 int i; 1841 int i;
1732 1842
1733 pointers = kzalloc(rbio->bbio->num_stripes * sizeof(void *), 1843 pointers = kzalloc(rbio->real_stripes * sizeof(void *),
1734 GFP_NOFS); 1844 GFP_NOFS);
1735 if (!pointers) { 1845 if (!pointers) {
1736 err = -ENOMEM; 1846 err = -ENOMEM;
@@ -1740,7 +1850,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1740 faila = rbio->faila; 1850 faila = rbio->faila;
1741 failb = rbio->failb; 1851 failb = rbio->failb;
1742 1852
1743 if (rbio->read_rebuild) { 1853 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1744 spin_lock_irq(&rbio->bio_list_lock); 1854 spin_lock_irq(&rbio->bio_list_lock);
1745 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags); 1855 set_bit(RBIO_RMW_LOCKED_BIT, &rbio->flags);
1746 spin_unlock_irq(&rbio->bio_list_lock); 1856 spin_unlock_irq(&rbio->bio_list_lock);
@@ -1749,15 +1859,23 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1749 index_rbio_pages(rbio); 1859 index_rbio_pages(rbio);
1750 1860
1751 for (pagenr = 0; pagenr < nr_pages; pagenr++) { 1861 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1862 /*
1863 * Now we just use bitmap to mark the horizontal stripes in
1864 * which we have data when doing parity scrub.
1865 */
1866 if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB &&
1867 !test_bit(pagenr, rbio->dbitmap))
1868 continue;
1869
1752 /* setup our array of pointers with pages 1870 /* setup our array of pointers with pages
1753 * from each stripe 1871 * from each stripe
1754 */ 1872 */
1755 for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) { 1873 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1756 /* 1874 /*
1757 * if we're rebuilding a read, we have to use 1875 * if we're rebuilding a read, we have to use
1758 * pages from the bio list 1876 * pages from the bio list
1759 */ 1877 */
1760 if (rbio->read_rebuild && 1878 if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
1761 (stripe == faila || stripe == failb)) { 1879 (stripe == faila || stripe == failb)) {
1762 page = page_in_rbio(rbio, stripe, pagenr, 0); 1880 page = page_in_rbio(rbio, stripe, pagenr, 0);
1763 } else { 1881 } else {
@@ -1767,7 +1885,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1767 } 1885 }
1768 1886
1769 /* all raid6 handling here */ 1887 /* all raid6 handling here */
1770 if (rbio->raid_map[rbio->bbio->num_stripes - 1] == 1888 if (rbio->raid_map[rbio->real_stripes - 1] ==
1771 RAID6_Q_STRIPE) { 1889 RAID6_Q_STRIPE) {
1772 1890
1773 /* 1891 /*
@@ -1817,10 +1935,10 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
1817 } 1935 }
1818 1936
1819 if (rbio->raid_map[failb] == RAID5_P_STRIPE) { 1937 if (rbio->raid_map[failb] == RAID5_P_STRIPE) {
1820 raid6_datap_recov(rbio->bbio->num_stripes, 1938 raid6_datap_recov(rbio->real_stripes,
1821 PAGE_SIZE, faila, pointers); 1939 PAGE_SIZE, faila, pointers);
1822 } else { 1940 } else {
1823 raid6_2data_recov(rbio->bbio->num_stripes, 1941 raid6_2data_recov(rbio->real_stripes,
1824 PAGE_SIZE, faila, failb, 1942 PAGE_SIZE, faila, failb,
1825 pointers); 1943 pointers);
1826 } 1944 }
@@ -1850,7 +1968,7 @@ pstripe:
1850 * know they can be trusted. If this was a read reconstruction, 1968 * know they can be trusted. If this was a read reconstruction,
1851 * other endio functions will fiddle the uptodate bits 1969 * other endio functions will fiddle the uptodate bits
1852 */ 1970 */
1853 if (!rbio->read_rebuild) { 1971 if (rbio->operation == BTRFS_RBIO_WRITE) {
1854 for (i = 0; i < nr_pages; i++) { 1972 for (i = 0; i < nr_pages; i++) {
1855 if (faila != -1) { 1973 if (faila != -1) {
1856 page = rbio_stripe_page(rbio, faila, i); 1974 page = rbio_stripe_page(rbio, faila, i);
@@ -1862,12 +1980,12 @@ pstripe:
1862 } 1980 }
1863 } 1981 }
1864 } 1982 }
1865 for (stripe = 0; stripe < rbio->bbio->num_stripes; stripe++) { 1983 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1866 /* 1984 /*
1867 * if we're rebuilding a read, we have to use 1985 * if we're rebuilding a read, we have to use
1868 * pages from the bio list 1986 * pages from the bio list
1869 */ 1987 */
1870 if (rbio->read_rebuild && 1988 if (rbio->operation == BTRFS_RBIO_READ_REBUILD &&
1871 (stripe == faila || stripe == failb)) { 1989 (stripe == faila || stripe == failb)) {
1872 page = page_in_rbio(rbio, stripe, pagenr, 0); 1990 page = page_in_rbio(rbio, stripe, pagenr, 0);
1873 } else { 1991 } else {
@@ -1882,9 +2000,9 @@ cleanup:
1882 kfree(pointers); 2000 kfree(pointers);
1883 2001
1884cleanup_io: 2002cleanup_io:
1885 2003 if (rbio->operation == BTRFS_RBIO_READ_REBUILD) {
1886 if (rbio->read_rebuild) { 2004 if (err == 0 &&
1887 if (err == 0) 2005 !test_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags))
1888 cache_rbio_pages(rbio); 2006 cache_rbio_pages(rbio);
1889 else 2007 else
1890 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags); 2008 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
@@ -1893,7 +2011,13 @@ cleanup_io:
1893 } else if (err == 0) { 2011 } else if (err == 0) {
1894 rbio->faila = -1; 2012 rbio->faila = -1;
1895 rbio->failb = -1; 2013 rbio->failb = -1;
1896 finish_rmw(rbio); 2014
2015 if (rbio->operation == BTRFS_RBIO_WRITE)
2016 finish_rmw(rbio);
2017 else if (rbio->operation == BTRFS_RBIO_PARITY_SCRUB)
2018 finish_parity_scrub(rbio, 0);
2019 else
2020 BUG();
1897 } else { 2021 } else {
1898 rbio_orig_end_io(rbio, err, 0); 2022 rbio_orig_end_io(rbio, err, 0);
1899 } 2023 }
@@ -1917,10 +2041,10 @@ static void raid_recover_end_io(struct bio *bio, int err)
1917 set_bio_pages_uptodate(bio); 2041 set_bio_pages_uptodate(bio);
1918 bio_put(bio); 2042 bio_put(bio);
1919 2043
1920 if (!atomic_dec_and_test(&rbio->bbio->stripes_pending)) 2044 if (!atomic_dec_and_test(&rbio->stripes_pending))
1921 return; 2045 return;
1922 2046
1923 if (atomic_read(&rbio->bbio->error) > rbio->bbio->max_errors) 2047 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
1924 rbio_orig_end_io(rbio, -EIO, 0); 2048 rbio_orig_end_io(rbio, -EIO, 0);
1925 else 2049 else
1926 __raid_recover_end_io(rbio); 2050 __raid_recover_end_io(rbio);
@@ -1937,7 +2061,6 @@ static void raid_recover_end_io(struct bio *bio, int err)
1937static int __raid56_parity_recover(struct btrfs_raid_bio *rbio) 2061static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1938{ 2062{
1939 int bios_to_read = 0; 2063 int bios_to_read = 0;
1940 struct btrfs_bio *bbio = rbio->bbio;
1941 struct bio_list bio_list; 2064 struct bio_list bio_list;
1942 int ret; 2065 int ret;
1943 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE); 2066 int nr_pages = DIV_ROUND_UP(rbio->stripe_len, PAGE_CACHE_SIZE);
@@ -1951,16 +2074,16 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1951 if (ret) 2074 if (ret)
1952 goto cleanup; 2075 goto cleanup;
1953 2076
1954 atomic_set(&rbio->bbio->error, 0); 2077 atomic_set(&rbio->error, 0);
1955 2078
1956 /* 2079 /*
1957 * read everything that hasn't failed. Thanks to the 2080 * read everything that hasn't failed. Thanks to the
1958 * stripe cache, it is possible that some or all of these 2081 * stripe cache, it is possible that some or all of these
1959 * pages are going to be uptodate. 2082 * pages are going to be uptodate.
1960 */ 2083 */
1961 for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 2084 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
1962 if (rbio->faila == stripe || rbio->failb == stripe) { 2085 if (rbio->faila == stripe || rbio->failb == stripe) {
1963 atomic_inc(&rbio->bbio->error); 2086 atomic_inc(&rbio->error);
1964 continue; 2087 continue;
1965 } 2088 }
1966 2089
@@ -1990,7 +2113,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1990 * were up to date, or we might have no bios to read because 2113 * were up to date, or we might have no bios to read because
1991 * the devices were gone. 2114 * the devices were gone.
1992 */ 2115 */
1993 if (atomic_read(&rbio->bbio->error) <= rbio->bbio->max_errors) { 2116 if (atomic_read(&rbio->error) <= rbio->bbio->max_errors) {
1994 __raid_recover_end_io(rbio); 2117 __raid_recover_end_io(rbio);
1995 goto out; 2118 goto out;
1996 } else { 2119 } else {
@@ -2002,7 +2125,7 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
2002 * the bbio may be freed once we submit the last bio. Make sure 2125 * the bbio may be freed once we submit the last bio. Make sure
2003 * not to touch it after that 2126 * not to touch it after that
2004 */ 2127 */
2005 atomic_set(&bbio->stripes_pending, bios_to_read); 2128 atomic_set(&rbio->stripes_pending, bios_to_read);
2006 while (1) { 2129 while (1) {
2007 bio = bio_list_pop(&bio_list); 2130 bio = bio_list_pop(&bio_list);
2008 if (!bio) 2131 if (!bio)
@@ -2021,7 +2144,7 @@ out:
2021 return 0; 2144 return 0;
2022 2145
2023cleanup: 2146cleanup:
2024 if (rbio->read_rebuild) 2147 if (rbio->operation == BTRFS_RBIO_READ_REBUILD)
2025 rbio_orig_end_io(rbio, -EIO, 0); 2148 rbio_orig_end_io(rbio, -EIO, 0);
2026 return -EIO; 2149 return -EIO;
2027} 2150}
@@ -2034,34 +2157,42 @@ cleanup:
2034 */ 2157 */
2035int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 2158int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2036 struct btrfs_bio *bbio, u64 *raid_map, 2159 struct btrfs_bio *bbio, u64 *raid_map,
2037 u64 stripe_len, int mirror_num) 2160 u64 stripe_len, int mirror_num, int generic_io)
2038{ 2161{
2039 struct btrfs_raid_bio *rbio; 2162 struct btrfs_raid_bio *rbio;
2040 int ret; 2163 int ret;
2041 2164
2042 rbio = alloc_rbio(root, bbio, raid_map, stripe_len); 2165 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
2043 if (IS_ERR(rbio)) 2166 if (IS_ERR(rbio)) {
2167 __free_bbio_and_raid_map(bbio, raid_map, generic_io);
2044 return PTR_ERR(rbio); 2168 return PTR_ERR(rbio);
2169 }
2045 2170
2046 rbio->read_rebuild = 1; 2171 rbio->operation = BTRFS_RBIO_READ_REBUILD;
2047 bio_list_add(&rbio->bio_list, bio); 2172 bio_list_add(&rbio->bio_list, bio);
2048 rbio->bio_list_bytes = bio->bi_iter.bi_size; 2173 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2049 2174
2050 rbio->faila = find_logical_bio_stripe(rbio, bio); 2175 rbio->faila = find_logical_bio_stripe(rbio, bio);
2051 if (rbio->faila == -1) { 2176 if (rbio->faila == -1) {
2052 BUG(); 2177 BUG();
2053 kfree(raid_map); 2178 __free_bbio_and_raid_map(bbio, raid_map, generic_io);
2054 kfree(bbio);
2055 kfree(rbio); 2179 kfree(rbio);
2056 return -EIO; 2180 return -EIO;
2057 } 2181 }
2058 2182
2183 if (generic_io) {
2184 btrfs_bio_counter_inc_noblocked(root->fs_info);
2185 rbio->generic_bio_cnt = 1;
2186 } else {
2187 set_bit(RBIO_HOLD_BBIO_MAP_BIT, &rbio->flags);
2188 }
2189
2059 /* 2190 /*
2060 * reconstruct from the q stripe if they are 2191 * reconstruct from the q stripe if they are
2061 * asking for mirror 3 2192 * asking for mirror 3
2062 */ 2193 */
2063 if (mirror_num == 3) 2194 if (mirror_num == 3)
2064 rbio->failb = bbio->num_stripes - 2; 2195 rbio->failb = rbio->real_stripes - 2;
2065 2196
2066 ret = lock_stripe_add(rbio); 2197 ret = lock_stripe_add(rbio);
2067 2198
@@ -2098,3 +2229,483 @@ static void read_rebuild_work(struct btrfs_work *work)
2098 rbio = container_of(work, struct btrfs_raid_bio, work); 2229 rbio = container_of(work, struct btrfs_raid_bio, work);
2099 __raid56_parity_recover(rbio); 2230 __raid56_parity_recover(rbio);
2100} 2231}
2232
2233/*
2234 * The following code is used to scrub/replace the parity stripe
2235 *
2236 * Note: We need make sure all the pages that add into the scrub/replace
2237 * raid bio are correct and not be changed during the scrub/replace. That
2238 * is those pages just hold metadata or file data with checksum.
2239 */
2240
2241struct btrfs_raid_bio *
2242raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
2243 struct btrfs_bio *bbio, u64 *raid_map,
2244 u64 stripe_len, struct btrfs_device *scrub_dev,
2245 unsigned long *dbitmap, int stripe_nsectors)
2246{
2247 struct btrfs_raid_bio *rbio;
2248 int i;
2249
2250 rbio = alloc_rbio(root, bbio, raid_map, stripe_len);
2251 if (IS_ERR(rbio))
2252 return NULL;
2253 bio_list_add(&rbio->bio_list, bio);
2254 /*
2255 * This is a special bio which is used to hold the completion handler
2256 * and make the scrub rbio is similar to the other types
2257 */
2258 ASSERT(!bio->bi_iter.bi_size);
2259 rbio->operation = BTRFS_RBIO_PARITY_SCRUB;
2260
2261 for (i = 0; i < rbio->real_stripes; i++) {
2262 if (bbio->stripes[i].dev == scrub_dev) {
2263 rbio->scrubp = i;
2264 break;
2265 }
2266 }
2267
2268 /* Now we just support the sectorsize equals to page size */
2269 ASSERT(root->sectorsize == PAGE_SIZE);
2270 ASSERT(rbio->stripe_npages == stripe_nsectors);
2271 bitmap_copy(rbio->dbitmap, dbitmap, stripe_nsectors);
2272
2273 return rbio;
2274}
2275
2276void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
2277 struct page *page, u64 logical)
2278{
2279 int stripe_offset;
2280 int index;
2281
2282 ASSERT(logical >= rbio->raid_map[0]);
2283 ASSERT(logical + PAGE_SIZE <= rbio->raid_map[0] +
2284 rbio->stripe_len * rbio->nr_data);
2285 stripe_offset = (int)(logical - rbio->raid_map[0]);
2286 index = stripe_offset >> PAGE_CACHE_SHIFT;
2287 rbio->bio_pages[index] = page;
2288}
2289
2290/*
2291 * We just scrub the parity that we have correct data on the same horizontal,
2292 * so we needn't allocate all pages for all the stripes.
2293 */
2294static int alloc_rbio_essential_pages(struct btrfs_raid_bio *rbio)
2295{
2296 int i;
2297 int bit;
2298 int index;
2299 struct page *page;
2300
2301 for_each_set_bit(bit, rbio->dbitmap, rbio->stripe_npages) {
2302 for (i = 0; i < rbio->real_stripes; i++) {
2303 index = i * rbio->stripe_npages + bit;
2304 if (rbio->stripe_pages[index])
2305 continue;
2306
2307 page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2308 if (!page)
2309 return -ENOMEM;
2310 rbio->stripe_pages[index] = page;
2311 ClearPageUptodate(page);
2312 }
2313 }
2314 return 0;
2315}
2316
2317/*
2318 * end io function used by finish_rmw. When we finally
2319 * get here, we've written a full stripe
2320 */
2321static void raid_write_parity_end_io(struct bio *bio, int err)
2322{
2323 struct btrfs_raid_bio *rbio = bio->bi_private;
2324
2325 if (err)
2326 fail_bio_stripe(rbio, bio);
2327
2328 bio_put(bio);
2329
2330 if (!atomic_dec_and_test(&rbio->stripes_pending))
2331 return;
2332
2333 err = 0;
2334
2335 if (atomic_read(&rbio->error))
2336 err = -EIO;
2337
2338 rbio_orig_end_io(rbio, err, 0);
2339}
2340
2341static noinline void finish_parity_scrub(struct btrfs_raid_bio *rbio,
2342 int need_check)
2343{
2344 struct btrfs_bio *bbio = rbio->bbio;
2345 void *pointers[rbio->real_stripes];
2346 DECLARE_BITMAP(pbitmap, rbio->stripe_npages);
2347 int nr_data = rbio->nr_data;
2348 int stripe;
2349 int pagenr;
2350 int p_stripe = -1;
2351 int q_stripe = -1;
2352 struct page *p_page = NULL;
2353 struct page *q_page = NULL;
2354 struct bio_list bio_list;
2355 struct bio *bio;
2356 int is_replace = 0;
2357 int ret;
2358
2359 bio_list_init(&bio_list);
2360
2361 if (rbio->real_stripes - rbio->nr_data == 1) {
2362 p_stripe = rbio->real_stripes - 1;
2363 } else if (rbio->real_stripes - rbio->nr_data == 2) {
2364 p_stripe = rbio->real_stripes - 2;
2365 q_stripe = rbio->real_stripes - 1;
2366 } else {
2367 BUG();
2368 }
2369
2370 if (bbio->num_tgtdevs && bbio->tgtdev_map[rbio->scrubp]) {
2371 is_replace = 1;
2372 bitmap_copy(pbitmap, rbio->dbitmap, rbio->stripe_npages);
2373 }
2374
2375 /*
2376 * Because the higher layers(scrubber) are unlikely to
2377 * use this area of the disk again soon, so don't cache
2378 * it.
2379 */
2380 clear_bit(RBIO_CACHE_READY_BIT, &rbio->flags);
2381
2382 if (!need_check)
2383 goto writeback;
2384
2385 p_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2386 if (!p_page)
2387 goto cleanup;
2388 SetPageUptodate(p_page);
2389
2390 if (q_stripe != -1) {
2391 q_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
2392 if (!q_page) {
2393 __free_page(p_page);
2394 goto cleanup;
2395 }
2396 SetPageUptodate(q_page);
2397 }
2398
2399 atomic_set(&rbio->error, 0);
2400
2401 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2402 struct page *p;
2403 void *parity;
2404 /* first collect one page from each data stripe */
2405 for (stripe = 0; stripe < nr_data; stripe++) {
2406 p = page_in_rbio(rbio, stripe, pagenr, 0);
2407 pointers[stripe] = kmap(p);
2408 }
2409
2410 /* then add the parity stripe */
2411 pointers[stripe++] = kmap(p_page);
2412
2413 if (q_stripe != -1) {
2414
2415 /*
2416 * raid6, add the qstripe and call the
2417 * library function to fill in our p/q
2418 */
2419 pointers[stripe++] = kmap(q_page);
2420
2421 raid6_call.gen_syndrome(rbio->real_stripes, PAGE_SIZE,
2422 pointers);
2423 } else {
2424 /* raid5 */
2425 memcpy(pointers[nr_data], pointers[0], PAGE_SIZE);
2426 run_xor(pointers + 1, nr_data - 1, PAGE_CACHE_SIZE);
2427 }
2428
2429 /* Check scrubbing pairty and repair it */
2430 p = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2431 parity = kmap(p);
2432 if (memcmp(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE))
2433 memcpy(parity, pointers[rbio->scrubp], PAGE_CACHE_SIZE);
2434 else
2435 /* Parity is right, needn't writeback */
2436 bitmap_clear(rbio->dbitmap, pagenr, 1);
2437 kunmap(p);
2438
2439 for (stripe = 0; stripe < rbio->real_stripes; stripe++)
2440 kunmap(page_in_rbio(rbio, stripe, pagenr, 0));
2441 }
2442
2443 __free_page(p_page);
2444 if (q_page)
2445 __free_page(q_page);
2446
2447writeback:
2448 /*
2449 * time to start writing. Make bios for everything from the
2450 * higher layers (the bio_list in our rbio) and our p/q. Ignore
2451 * everything else.
2452 */
2453 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2454 struct page *page;
2455
2456 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2457 ret = rbio_add_io_page(rbio, &bio_list,
2458 page, rbio->scrubp, pagenr, rbio->stripe_len);
2459 if (ret)
2460 goto cleanup;
2461 }
2462
2463 if (!is_replace)
2464 goto submit_write;
2465
2466 for_each_set_bit(pagenr, pbitmap, rbio->stripe_npages) {
2467 struct page *page;
2468
2469 page = rbio_stripe_page(rbio, rbio->scrubp, pagenr);
2470 ret = rbio_add_io_page(rbio, &bio_list, page,
2471 bbio->tgtdev_map[rbio->scrubp],
2472 pagenr, rbio->stripe_len);
2473 if (ret)
2474 goto cleanup;
2475 }
2476
2477submit_write:
2478 nr_data = bio_list_size(&bio_list);
2479 if (!nr_data) {
2480 /* Every parity is right */
2481 rbio_orig_end_io(rbio, 0, 0);
2482 return;
2483 }
2484
2485 atomic_set(&rbio->stripes_pending, nr_data);
2486
2487 while (1) {
2488 bio = bio_list_pop(&bio_list);
2489 if (!bio)
2490 break;
2491
2492 bio->bi_private = rbio;
2493 bio->bi_end_io = raid_write_parity_end_io;
2494 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2495 submit_bio(WRITE, bio);
2496 }
2497 return;
2498
2499cleanup:
2500 rbio_orig_end_io(rbio, -EIO, 0);
2501}
2502
2503static inline int is_data_stripe(struct btrfs_raid_bio *rbio, int stripe)
2504{
2505 if (stripe >= 0 && stripe < rbio->nr_data)
2506 return 1;
2507 return 0;
2508}
2509
2510/*
2511 * While we're doing the parity check and repair, we could have errors
2512 * in reading pages off the disk. This checks for errors and if we're
2513 * not able to read the page it'll trigger parity reconstruction. The
2514 * parity scrub will be finished after we've reconstructed the failed
2515 * stripes
2516 */
2517static void validate_rbio_for_parity_scrub(struct btrfs_raid_bio *rbio)
2518{
2519 if (atomic_read(&rbio->error) > rbio->bbio->max_errors)
2520 goto cleanup;
2521
2522 if (rbio->faila >= 0 || rbio->failb >= 0) {
2523 int dfail = 0, failp = -1;
2524
2525 if (is_data_stripe(rbio, rbio->faila))
2526 dfail++;
2527 else if (is_parity_stripe(rbio->faila))
2528 failp = rbio->faila;
2529
2530 if (is_data_stripe(rbio, rbio->failb))
2531 dfail++;
2532 else if (is_parity_stripe(rbio->failb))
2533 failp = rbio->failb;
2534
2535 /*
2536 * Because we can not use a scrubbing parity to repair
2537 * the data, so the capability of the repair is declined.
2538 * (In the case of RAID5, we can not repair anything)
2539 */
2540 if (dfail > rbio->bbio->max_errors - 1)
2541 goto cleanup;
2542
2543 /*
2544 * If all data is good, only parity is correctly, just
2545 * repair the parity.
2546 */
2547 if (dfail == 0) {
2548 finish_parity_scrub(rbio, 0);
2549 return;
2550 }
2551
2552 /*
2553 * Here means we got one corrupted data stripe and one
2554 * corrupted parity on RAID6, if the corrupted parity
2555 * is scrubbing parity, luckly, use the other one to repair
2556 * the data, or we can not repair the data stripe.
2557 */
2558 if (failp != rbio->scrubp)
2559 goto cleanup;
2560
2561 __raid_recover_end_io(rbio);
2562 } else {
2563 finish_parity_scrub(rbio, 1);
2564 }
2565 return;
2566
2567cleanup:
2568 rbio_orig_end_io(rbio, -EIO, 0);
2569}
2570
2571/*
2572 * end io for the read phase of the rmw cycle. All the bios here are physical
2573 * stripe bios we've read from the disk so we can recalculate the parity of the
2574 * stripe.
2575 *
2576 * This will usually kick off finish_rmw once all the bios are read in, but it
2577 * may trigger parity reconstruction if we had any errors along the way
2578 */
2579static void raid56_parity_scrub_end_io(struct bio *bio, int err)
2580{
2581 struct btrfs_raid_bio *rbio = bio->bi_private;
2582
2583 if (err)
2584 fail_bio_stripe(rbio, bio);
2585 else
2586 set_bio_pages_uptodate(bio);
2587
2588 bio_put(bio);
2589
2590 if (!atomic_dec_and_test(&rbio->stripes_pending))
2591 return;
2592
2593 /*
2594 * this will normally call finish_rmw to start our write
2595 * but if there are any failed stripes we'll reconstruct
2596 * from parity first
2597 */
2598 validate_rbio_for_parity_scrub(rbio);
2599}
2600
2601static void raid56_parity_scrub_stripe(struct btrfs_raid_bio *rbio)
2602{
2603 int bios_to_read = 0;
2604 struct bio_list bio_list;
2605 int ret;
2606 int pagenr;
2607 int stripe;
2608 struct bio *bio;
2609
2610 ret = alloc_rbio_essential_pages(rbio);
2611 if (ret)
2612 goto cleanup;
2613
2614 bio_list_init(&bio_list);
2615
2616 atomic_set(&rbio->error, 0);
2617 /*
2618 * build a list of bios to read all the missing parts of this
2619 * stripe
2620 */
2621 for (stripe = 0; stripe < rbio->real_stripes; stripe++) {
2622 for_each_set_bit(pagenr, rbio->dbitmap, rbio->stripe_npages) {
2623 struct page *page;
2624 /*
2625 * we want to find all the pages missing from
2626 * the rbio and read them from the disk. If
2627 * page_in_rbio finds a page in the bio list
2628 * we don't need to read it off the stripe.
2629 */
2630 page = page_in_rbio(rbio, stripe, pagenr, 1);
2631 if (page)
2632 continue;
2633
2634 page = rbio_stripe_page(rbio, stripe, pagenr);
2635 /*
2636 * the bio cache may have handed us an uptodate
2637 * page. If so, be happy and use it
2638 */
2639 if (PageUptodate(page))
2640 continue;
2641
2642 ret = rbio_add_io_page(rbio, &bio_list, page,
2643 stripe, pagenr, rbio->stripe_len);
2644 if (ret)
2645 goto cleanup;
2646 }
2647 }
2648
2649 bios_to_read = bio_list_size(&bio_list);
2650 if (!bios_to_read) {
2651 /*
2652 * this can happen if others have merged with
2653 * us, it means there is nothing left to read.
2654 * But if there are missing devices it may not be
2655 * safe to do the full stripe write yet.
2656 */
2657 goto finish;
2658 }
2659
2660 /*
2661 * the bbio may be freed once we submit the last bio. Make sure
2662 * not to touch it after that
2663 */
2664 atomic_set(&rbio->stripes_pending, bios_to_read);
2665 while (1) {
2666 bio = bio_list_pop(&bio_list);
2667 if (!bio)
2668 break;
2669
2670 bio->bi_private = rbio;
2671 bio->bi_end_io = raid56_parity_scrub_end_io;
2672
2673 btrfs_bio_wq_end_io(rbio->fs_info, bio,
2674 BTRFS_WQ_ENDIO_RAID56);
2675
2676 BUG_ON(!test_bit(BIO_UPTODATE, &bio->bi_flags));
2677 submit_bio(READ, bio);
2678 }
2679 /* the actual write will happen once the reads are done */
2680 return;
2681
2682cleanup:
2683 rbio_orig_end_io(rbio, -EIO, 0);
2684 return;
2685
2686finish:
2687 validate_rbio_for_parity_scrub(rbio);
2688}
2689
2690static void scrub_parity_work(struct btrfs_work *work)
2691{
2692 struct btrfs_raid_bio *rbio;
2693
2694 rbio = container_of(work, struct btrfs_raid_bio, work);
2695 raid56_parity_scrub_stripe(rbio);
2696}
2697
2698static void async_scrub_parity(struct btrfs_raid_bio *rbio)
2699{
2700 btrfs_init_work(&rbio->work, btrfs_rmw_helper,
2701 scrub_parity_work, NULL, NULL);
2702
2703 btrfs_queue_work(rbio->fs_info->rmw_workers,
2704 &rbio->work);
2705}
2706
2707void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio)
2708{
2709 if (!lock_stripe_add(rbio))
2710 async_scrub_parity(rbio);
2711}
diff --git a/fs/btrfs/raid56.h b/fs/btrfs/raid56.h
index ea5d73bfdfbe..31d4a157b5e3 100644
--- a/fs/btrfs/raid56.h
+++ b/fs/btrfs/raid56.h
@@ -39,13 +39,25 @@ static inline int nr_data_stripes(struct map_lookup *map)
39#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \ 39#define is_parity_stripe(x) (((x) == RAID5_P_STRIPE) || \
40 ((x) == RAID6_Q_STRIPE)) 40 ((x) == RAID6_Q_STRIPE))
41 41
42struct btrfs_raid_bio;
43struct btrfs_device;
44
42int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, 45int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
43 struct btrfs_bio *bbio, u64 *raid_map, 46 struct btrfs_bio *bbio, u64 *raid_map,
44 u64 stripe_len, int mirror_num); 47 u64 stripe_len, int mirror_num, int generic_io);
45int raid56_parity_write(struct btrfs_root *root, struct bio *bio, 48int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
46 struct btrfs_bio *bbio, u64 *raid_map, 49 struct btrfs_bio *bbio, u64 *raid_map,
47 u64 stripe_len); 50 u64 stripe_len);
48 51
52struct btrfs_raid_bio *
53raid56_parity_alloc_scrub_rbio(struct btrfs_root *root, struct bio *bio,
54 struct btrfs_bio *bbio, u64 *raid_map,
55 u64 stripe_len, struct btrfs_device *scrub_dev,
56 unsigned long *dbitmap, int stripe_nsectors);
57void raid56_parity_add_scrub_pages(struct btrfs_raid_bio *rbio,
58 struct page *page, u64 logical);
59void raid56_parity_submit_scrub_rbio(struct btrfs_raid_bio *rbio);
60
49int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info); 61int btrfs_alloc_stripe_hash_table(struct btrfs_fs_info *info);
50void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info); 62void btrfs_free_stripe_hash_table(struct btrfs_fs_info *info);
51#endif 63#endif
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index efa083113827..f2bb13a23f86 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -63,10 +63,18 @@ struct scrub_ctx;
63 */ 63 */
64#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */ 64#define SCRUB_MAX_PAGES_PER_BLOCK 16 /* 64k per node/leaf/sector */
65 65
66struct scrub_recover {
67 atomic_t refs;
68 struct btrfs_bio *bbio;
69 u64 *raid_map;
70 u64 map_length;
71};
72
66struct scrub_page { 73struct scrub_page {
67 struct scrub_block *sblock; 74 struct scrub_block *sblock;
68 struct page *page; 75 struct page *page;
69 struct btrfs_device *dev; 76 struct btrfs_device *dev;
77 struct list_head list;
70 u64 flags; /* extent flags */ 78 u64 flags; /* extent flags */
71 u64 generation; 79 u64 generation;
72 u64 logical; 80 u64 logical;
@@ -79,6 +87,8 @@ struct scrub_page {
79 unsigned int io_error:1; 87 unsigned int io_error:1;
80 }; 88 };
81 u8 csum[BTRFS_CSUM_SIZE]; 89 u8 csum[BTRFS_CSUM_SIZE];
90
91 struct scrub_recover *recover;
82}; 92};
83 93
84struct scrub_bio { 94struct scrub_bio {
@@ -105,14 +115,52 @@ struct scrub_block {
105 atomic_t outstanding_pages; 115 atomic_t outstanding_pages;
106 atomic_t ref_count; /* free mem on transition to zero */ 116 atomic_t ref_count; /* free mem on transition to zero */
107 struct scrub_ctx *sctx; 117 struct scrub_ctx *sctx;
118 struct scrub_parity *sparity;
108 struct { 119 struct {
109 unsigned int header_error:1; 120 unsigned int header_error:1;
110 unsigned int checksum_error:1; 121 unsigned int checksum_error:1;
111 unsigned int no_io_error_seen:1; 122 unsigned int no_io_error_seen:1;
112 unsigned int generation_error:1; /* also sets header_error */ 123 unsigned int generation_error:1; /* also sets header_error */
124
125 /* The following is for the data used to check parity */
126 /* It is for the data with checksum */
127 unsigned int data_corrected:1;
113 }; 128 };
114}; 129};
115 130
131/* Used for the chunks with parity stripe such RAID5/6 */
132struct scrub_parity {
133 struct scrub_ctx *sctx;
134
135 struct btrfs_device *scrub_dev;
136
137 u64 logic_start;
138
139 u64 logic_end;
140
141 int nsectors;
142
143 int stripe_len;
144
145 atomic_t ref_count;
146
147 struct list_head spages;
148
149 /* Work of parity check and repair */
150 struct btrfs_work work;
151
152 /* Mark the parity blocks which have data */
153 unsigned long *dbitmap;
154
155 /*
156 * Mark the parity blocks which have data, but errors happen when
157 * read data or check data
158 */
159 unsigned long *ebitmap;
160
161 unsigned long bitmap[0];
162};
163
116struct scrub_wr_ctx { 164struct scrub_wr_ctx {
117 struct scrub_bio *wr_curr_bio; 165 struct scrub_bio *wr_curr_bio;
118 struct btrfs_device *tgtdev; 166 struct btrfs_device *tgtdev;
@@ -196,7 +244,7 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
196static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 244static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
197 struct scrub_block *sblock, int is_metadata, 245 struct scrub_block *sblock, int is_metadata,
198 int have_csum, u8 *csum, u64 generation, 246 int have_csum, u8 *csum, u64 generation,
199 u16 csum_size); 247 u16 csum_size, int retry_failed_mirror);
200static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info, 248static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
201 struct scrub_block *sblock, 249 struct scrub_block *sblock,
202 int is_metadata, int have_csum, 250 int is_metadata, int have_csum,
@@ -218,6 +266,8 @@ static void scrub_block_get(struct scrub_block *sblock);
218static void scrub_block_put(struct scrub_block *sblock); 266static void scrub_block_put(struct scrub_block *sblock);
219static void scrub_page_get(struct scrub_page *spage); 267static void scrub_page_get(struct scrub_page *spage);
220static void scrub_page_put(struct scrub_page *spage); 268static void scrub_page_put(struct scrub_page *spage);
269static void scrub_parity_get(struct scrub_parity *sparity);
270static void scrub_parity_put(struct scrub_parity *sparity);
221static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx, 271static int scrub_add_page_to_rd_bio(struct scrub_ctx *sctx,
222 struct scrub_page *spage); 272 struct scrub_page *spage);
223static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, 273static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
@@ -790,6 +840,20 @@ out:
790 scrub_pending_trans_workers_dec(sctx); 840 scrub_pending_trans_workers_dec(sctx);
791} 841}
792 842
843static inline void scrub_get_recover(struct scrub_recover *recover)
844{
845 atomic_inc(&recover->refs);
846}
847
848static inline void scrub_put_recover(struct scrub_recover *recover)
849{
850 if (atomic_dec_and_test(&recover->refs)) {
851 kfree(recover->bbio);
852 kfree(recover->raid_map);
853 kfree(recover);
854 }
855}
856
793/* 857/*
794 * scrub_handle_errored_block gets called when either verification of the 858 * scrub_handle_errored_block gets called when either verification of the
795 * pages failed or the bio failed to read, e.g. with EIO. In the latter 859 * pages failed or the bio failed to read, e.g. with EIO. In the latter
@@ -906,7 +970,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
906 970
907 /* build and submit the bios for the failed mirror, check checksums */ 971 /* build and submit the bios for the failed mirror, check checksums */
908 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum, 972 scrub_recheck_block(fs_info, sblock_bad, is_metadata, have_csum,
909 csum, generation, sctx->csum_size); 973 csum, generation, sctx->csum_size, 1);
910 974
911 if (!sblock_bad->header_error && !sblock_bad->checksum_error && 975 if (!sblock_bad->header_error && !sblock_bad->checksum_error &&
912 sblock_bad->no_io_error_seen) { 976 sblock_bad->no_io_error_seen) {
@@ -920,6 +984,7 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
920 */ 984 */
921 spin_lock(&sctx->stat_lock); 985 spin_lock(&sctx->stat_lock);
922 sctx->stat.unverified_errors++; 986 sctx->stat.unverified_errors++;
987 sblock_to_check->data_corrected = 1;
923 spin_unlock(&sctx->stat_lock); 988 spin_unlock(&sctx->stat_lock);
924 989
925 if (sctx->is_dev_replace) 990 if (sctx->is_dev_replace)
@@ -1019,7 +1084,7 @@ nodatasum_case:
1019 /* build and submit the bios, check checksums */ 1084 /* build and submit the bios, check checksums */
1020 scrub_recheck_block(fs_info, sblock_other, is_metadata, 1085 scrub_recheck_block(fs_info, sblock_other, is_metadata,
1021 have_csum, csum, generation, 1086 have_csum, csum, generation,
1022 sctx->csum_size); 1087 sctx->csum_size, 0);
1023 1088
1024 if (!sblock_other->header_error && 1089 if (!sblock_other->header_error &&
1025 !sblock_other->checksum_error && 1090 !sblock_other->checksum_error &&
@@ -1169,7 +1234,7 @@ nodatasum_case:
1169 */ 1234 */
1170 scrub_recheck_block(fs_info, sblock_bad, 1235 scrub_recheck_block(fs_info, sblock_bad,
1171 is_metadata, have_csum, csum, 1236 is_metadata, have_csum, csum,
1172 generation, sctx->csum_size); 1237 generation, sctx->csum_size, 1);
1173 if (!sblock_bad->header_error && 1238 if (!sblock_bad->header_error &&
1174 !sblock_bad->checksum_error && 1239 !sblock_bad->checksum_error &&
1175 sblock_bad->no_io_error_seen) 1240 sblock_bad->no_io_error_seen)
@@ -1180,6 +1245,7 @@ nodatasum_case:
1180corrected_error: 1245corrected_error:
1181 spin_lock(&sctx->stat_lock); 1246 spin_lock(&sctx->stat_lock);
1182 sctx->stat.corrected_errors++; 1247 sctx->stat.corrected_errors++;
1248 sblock_to_check->data_corrected = 1;
1183 spin_unlock(&sctx->stat_lock); 1249 spin_unlock(&sctx->stat_lock);
1184 printk_ratelimited_in_rcu(KERN_ERR 1250 printk_ratelimited_in_rcu(KERN_ERR
1185 "BTRFS: fixed up error at logical %llu on dev %s\n", 1251 "BTRFS: fixed up error at logical %llu on dev %s\n",
@@ -1201,11 +1267,18 @@ out:
1201 mirror_index++) { 1267 mirror_index++) {
1202 struct scrub_block *sblock = sblocks_for_recheck + 1268 struct scrub_block *sblock = sblocks_for_recheck +
1203 mirror_index; 1269 mirror_index;
1270 struct scrub_recover *recover;
1204 int page_index; 1271 int page_index;
1205 1272
1206 for (page_index = 0; page_index < sblock->page_count; 1273 for (page_index = 0; page_index < sblock->page_count;
1207 page_index++) { 1274 page_index++) {
1208 sblock->pagev[page_index]->sblock = NULL; 1275 sblock->pagev[page_index]->sblock = NULL;
1276 recover = sblock->pagev[page_index]->recover;
1277 if (recover) {
1278 scrub_put_recover(recover);
1279 sblock->pagev[page_index]->recover =
1280 NULL;
1281 }
1209 scrub_page_put(sblock->pagev[page_index]); 1282 scrub_page_put(sblock->pagev[page_index]);
1210 } 1283 }
1211 } 1284 }
@@ -1215,14 +1288,63 @@ out:
1215 return 0; 1288 return 0;
1216} 1289}
1217 1290
1291static inline int scrub_nr_raid_mirrors(struct btrfs_bio *bbio, u64 *raid_map)
1292{
1293 if (raid_map) {
1294 if (raid_map[bbio->num_stripes - 1] == RAID6_Q_STRIPE)
1295 return 3;
1296 else
1297 return 2;
1298 } else {
1299 return (int)bbio->num_stripes;
1300 }
1301}
1302
1303static inline void scrub_stripe_index_and_offset(u64 logical, u64 *raid_map,
1304 u64 mapped_length,
1305 int nstripes, int mirror,
1306 int *stripe_index,
1307 u64 *stripe_offset)
1308{
1309 int i;
1310
1311 if (raid_map) {
1312 /* RAID5/6 */
1313 for (i = 0; i < nstripes; i++) {
1314 if (raid_map[i] == RAID6_Q_STRIPE ||
1315 raid_map[i] == RAID5_P_STRIPE)
1316 continue;
1317
1318 if (logical >= raid_map[i] &&
1319 logical < raid_map[i] + mapped_length)
1320 break;
1321 }
1322
1323 *stripe_index = i;
1324 *stripe_offset = logical - raid_map[i];
1325 } else {
1326 /* The other RAID type */
1327 *stripe_index = mirror;
1328 *stripe_offset = 0;
1329 }
1330}
1331
1218static int scrub_setup_recheck_block(struct scrub_ctx *sctx, 1332static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1219 struct btrfs_fs_info *fs_info, 1333 struct btrfs_fs_info *fs_info,
1220 struct scrub_block *original_sblock, 1334 struct scrub_block *original_sblock,
1221 u64 length, u64 logical, 1335 u64 length, u64 logical,
1222 struct scrub_block *sblocks_for_recheck) 1336 struct scrub_block *sblocks_for_recheck)
1223{ 1337{
1338 struct scrub_recover *recover;
1339 struct btrfs_bio *bbio;
1340 u64 *raid_map;
1341 u64 sublen;
1342 u64 mapped_length;
1343 u64 stripe_offset;
1344 int stripe_index;
1224 int page_index; 1345 int page_index;
1225 int mirror_index; 1346 int mirror_index;
1347 int nmirrors;
1226 int ret; 1348 int ret;
1227 1349
1228 /* 1350 /*
@@ -1233,23 +1355,39 @@ static int scrub_setup_recheck_block(struct scrub_ctx *sctx,
1233 1355
1234 page_index = 0; 1356 page_index = 0;
1235 while (length > 0) { 1357 while (length > 0) {
1236 u64 sublen = min_t(u64, length, PAGE_SIZE); 1358 sublen = min_t(u64, length, PAGE_SIZE);
1237 u64 mapped_length = sublen; 1359 mapped_length = sublen;
1238 struct btrfs_bio *bbio = NULL; 1360 bbio = NULL;
1361 raid_map = NULL;
1239 1362
1240 /* 1363 /*
1241 * with a length of PAGE_SIZE, each returned stripe 1364 * with a length of PAGE_SIZE, each returned stripe
1242 * represents one mirror 1365 * represents one mirror
1243 */ 1366 */
1244 ret = btrfs_map_block(fs_info, REQ_GET_READ_MIRRORS, logical, 1367 ret = btrfs_map_sblock(fs_info, REQ_GET_READ_MIRRORS, logical,
1245 &mapped_length, &bbio, 0); 1368 &mapped_length, &bbio, 0, &raid_map);
1246 if (ret || !bbio || mapped_length < sublen) { 1369 if (ret || !bbio || mapped_length < sublen) {
1247 kfree(bbio); 1370 kfree(bbio);
1371 kfree(raid_map);
1248 return -EIO; 1372 return -EIO;
1249 } 1373 }
1250 1374
1375 recover = kzalloc(sizeof(struct scrub_recover), GFP_NOFS);
1376 if (!recover) {
1377 kfree(bbio);
1378 kfree(raid_map);
1379 return -ENOMEM;
1380 }
1381
1382 atomic_set(&recover->refs, 1);
1383 recover->bbio = bbio;
1384 recover->raid_map = raid_map;
1385 recover->map_length = mapped_length;
1386
1251 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO); 1387 BUG_ON(page_index >= SCRUB_PAGES_PER_RD_BIO);
1252 for (mirror_index = 0; mirror_index < (int)bbio->num_stripes; 1388
1389 nmirrors = scrub_nr_raid_mirrors(bbio, raid_map);
1390 for (mirror_index = 0; mirror_index < nmirrors;
1253 mirror_index++) { 1391 mirror_index++) {
1254 struct scrub_block *sblock; 1392 struct scrub_block *sblock;
1255 struct scrub_page *page; 1393 struct scrub_page *page;
@@ -1265,26 +1403,38 @@ leave_nomem:
1265 spin_lock(&sctx->stat_lock); 1403 spin_lock(&sctx->stat_lock);
1266 sctx->stat.malloc_errors++; 1404 sctx->stat.malloc_errors++;
1267 spin_unlock(&sctx->stat_lock); 1405 spin_unlock(&sctx->stat_lock);
1268 kfree(bbio); 1406 scrub_put_recover(recover);
1269 return -ENOMEM; 1407 return -ENOMEM;
1270 } 1408 }
1271 scrub_page_get(page); 1409 scrub_page_get(page);
1272 sblock->pagev[page_index] = page; 1410 sblock->pagev[page_index] = page;
1273 page->logical = logical; 1411 page->logical = logical;
1274 page->physical = bbio->stripes[mirror_index].physical; 1412
1413 scrub_stripe_index_and_offset(logical, raid_map,
1414 mapped_length,
1415 bbio->num_stripes,
1416 mirror_index,
1417 &stripe_index,
1418 &stripe_offset);
1419 page->physical = bbio->stripes[stripe_index].physical +
1420 stripe_offset;
1421 page->dev = bbio->stripes[stripe_index].dev;
1422
1275 BUG_ON(page_index >= original_sblock->page_count); 1423 BUG_ON(page_index >= original_sblock->page_count);
1276 page->physical_for_dev_replace = 1424 page->physical_for_dev_replace =
1277 original_sblock->pagev[page_index]-> 1425 original_sblock->pagev[page_index]->
1278 physical_for_dev_replace; 1426 physical_for_dev_replace;
1279 /* for missing devices, dev->bdev is NULL */ 1427 /* for missing devices, dev->bdev is NULL */
1280 page->dev = bbio->stripes[mirror_index].dev;
1281 page->mirror_num = mirror_index + 1; 1428 page->mirror_num = mirror_index + 1;
1282 sblock->page_count++; 1429 sblock->page_count++;
1283 page->page = alloc_page(GFP_NOFS); 1430 page->page = alloc_page(GFP_NOFS);
1284 if (!page->page) 1431 if (!page->page)
1285 goto leave_nomem; 1432 goto leave_nomem;
1433
1434 scrub_get_recover(recover);
1435 page->recover = recover;
1286 } 1436 }
1287 kfree(bbio); 1437 scrub_put_recover(recover);
1288 length -= sublen; 1438 length -= sublen;
1289 logical += sublen; 1439 logical += sublen;
1290 page_index++; 1440 page_index++;
@@ -1293,6 +1443,51 @@ leave_nomem:
1293 return 0; 1443 return 0;
1294} 1444}
1295 1445
1446struct scrub_bio_ret {
1447 struct completion event;
1448 int error;
1449};
1450
1451static void scrub_bio_wait_endio(struct bio *bio, int error)
1452{
1453 struct scrub_bio_ret *ret = bio->bi_private;
1454
1455 ret->error = error;
1456 complete(&ret->event);
1457}
1458
1459static inline int scrub_is_page_on_raid56(struct scrub_page *page)
1460{
1461 return page->recover && page->recover->raid_map;
1462}
1463
1464static int scrub_submit_raid56_bio_wait(struct btrfs_fs_info *fs_info,
1465 struct bio *bio,
1466 struct scrub_page *page)
1467{
1468 struct scrub_bio_ret done;
1469 int ret;
1470
1471 init_completion(&done.event);
1472 done.error = 0;
1473 bio->bi_iter.bi_sector = page->logical >> 9;
1474 bio->bi_private = &done;
1475 bio->bi_end_io = scrub_bio_wait_endio;
1476
1477 ret = raid56_parity_recover(fs_info->fs_root, bio, page->recover->bbio,
1478 page->recover->raid_map,
1479 page->recover->map_length,
1480 page->mirror_num, 0);
1481 if (ret)
1482 return ret;
1483
1484 wait_for_completion(&done.event);
1485 if (done.error)
1486 return -EIO;
1487
1488 return 0;
1489}
1490
1296/* 1491/*
1297 * this function will check the on disk data for checksum errors, header 1492 * this function will check the on disk data for checksum errors, header
1298 * errors and read I/O errors. If any I/O errors happen, the exact pages 1493 * errors and read I/O errors. If any I/O errors happen, the exact pages
@@ -1303,7 +1498,7 @@ leave_nomem:
1303static void scrub_recheck_block(struct btrfs_fs_info *fs_info, 1498static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1304 struct scrub_block *sblock, int is_metadata, 1499 struct scrub_block *sblock, int is_metadata,
1305 int have_csum, u8 *csum, u64 generation, 1500 int have_csum, u8 *csum, u64 generation,
1306 u16 csum_size) 1501 u16 csum_size, int retry_failed_mirror)
1307{ 1502{
1308 int page_num; 1503 int page_num;
1309 1504
@@ -1329,11 +1524,17 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1329 continue; 1524 continue;
1330 } 1525 }
1331 bio->bi_bdev = page->dev->bdev; 1526 bio->bi_bdev = page->dev->bdev;
1332 bio->bi_iter.bi_sector = page->physical >> 9;
1333 1527
1334 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1528 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1335 if (btrfsic_submit_bio_wait(READ, bio)) 1529 if (!retry_failed_mirror && scrub_is_page_on_raid56(page)) {
1336 sblock->no_io_error_seen = 0; 1530 if (scrub_submit_raid56_bio_wait(fs_info, bio, page))
1531 sblock->no_io_error_seen = 0;
1532 } else {
1533 bio->bi_iter.bi_sector = page->physical >> 9;
1534
1535 if (btrfsic_submit_bio_wait(READ, bio))
1536 sblock->no_io_error_seen = 0;
1537 }
1337 1538
1338 bio_put(bio); 1539 bio_put(bio);
1339 } 1540 }
@@ -1486,6 +1687,13 @@ static void scrub_write_block_to_dev_replace(struct scrub_block *sblock)
1486{ 1687{
1487 int page_num; 1688 int page_num;
1488 1689
1690 /*
1691 * This block is used for the check of the parity on the source device,
1692 * so the data needn't be written into the destination device.
1693 */
1694 if (sblock->sparity)
1695 return;
1696
1489 for (page_num = 0; page_num < sblock->page_count; page_num++) { 1697 for (page_num = 0; page_num < sblock->page_count; page_num++) {
1490 int ret; 1698 int ret;
1491 1699
@@ -1867,6 +2075,9 @@ static void scrub_block_put(struct scrub_block *sblock)
1867 if (atomic_dec_and_test(&sblock->ref_count)) { 2075 if (atomic_dec_and_test(&sblock->ref_count)) {
1868 int i; 2076 int i;
1869 2077
2078 if (sblock->sparity)
2079 scrub_parity_put(sblock->sparity);
2080
1870 for (i = 0; i < sblock->page_count; i++) 2081 for (i = 0; i < sblock->page_count; i++)
1871 scrub_page_put(sblock->pagev[i]); 2082 scrub_page_put(sblock->pagev[i]);
1872 kfree(sblock); 2083 kfree(sblock);
@@ -2124,9 +2335,51 @@ static void scrub_bio_end_io_worker(struct btrfs_work *work)
2124 scrub_pending_bio_dec(sctx); 2335 scrub_pending_bio_dec(sctx);
2125} 2336}
2126 2337
2338static inline void __scrub_mark_bitmap(struct scrub_parity *sparity,
2339 unsigned long *bitmap,
2340 u64 start, u64 len)
2341{
2342 int offset;
2343 int nsectors;
2344 int sectorsize = sparity->sctx->dev_root->sectorsize;
2345
2346 if (len >= sparity->stripe_len) {
2347 bitmap_set(bitmap, 0, sparity->nsectors);
2348 return;
2349 }
2350
2351 start -= sparity->logic_start;
2352 offset = (int)do_div(start, sparity->stripe_len);
2353 offset /= sectorsize;
2354 nsectors = (int)len / sectorsize;
2355
2356 if (offset + nsectors <= sparity->nsectors) {
2357 bitmap_set(bitmap, offset, nsectors);
2358 return;
2359 }
2360
2361 bitmap_set(bitmap, offset, sparity->nsectors - offset);
2362 bitmap_set(bitmap, 0, nsectors - (sparity->nsectors - offset));
2363}
2364
2365static inline void scrub_parity_mark_sectors_error(struct scrub_parity *sparity,
2366 u64 start, u64 len)
2367{
2368 __scrub_mark_bitmap(sparity, sparity->ebitmap, start, len);
2369}
2370
2371static inline void scrub_parity_mark_sectors_data(struct scrub_parity *sparity,
2372 u64 start, u64 len)
2373{
2374 __scrub_mark_bitmap(sparity, sparity->dbitmap, start, len);
2375}
2376
2127static void scrub_block_complete(struct scrub_block *sblock) 2377static void scrub_block_complete(struct scrub_block *sblock)
2128{ 2378{
2379 int corrupted = 0;
2380
2129 if (!sblock->no_io_error_seen) { 2381 if (!sblock->no_io_error_seen) {
2382 corrupted = 1;
2130 scrub_handle_errored_block(sblock); 2383 scrub_handle_errored_block(sblock);
2131 } else { 2384 } else {
2132 /* 2385 /*
@@ -2134,9 +2387,19 @@ static void scrub_block_complete(struct scrub_block *sblock)
2134 * dev replace case, otherwise write here in dev replace 2387 * dev replace case, otherwise write here in dev replace
2135 * case. 2388 * case.
2136 */ 2389 */
2137 if (!scrub_checksum(sblock) && sblock->sctx->is_dev_replace) 2390 corrupted = scrub_checksum(sblock);
2391 if (!corrupted && sblock->sctx->is_dev_replace)
2138 scrub_write_block_to_dev_replace(sblock); 2392 scrub_write_block_to_dev_replace(sblock);
2139 } 2393 }
2394
2395 if (sblock->sparity && corrupted && !sblock->data_corrected) {
2396 u64 start = sblock->pagev[0]->logical;
2397 u64 end = sblock->pagev[sblock->page_count - 1]->logical +
2398 PAGE_SIZE;
2399
2400 scrub_parity_mark_sectors_error(sblock->sparity,
2401 start, end - start);
2402 }
2140} 2403}
2141 2404
2142static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len, 2405static int scrub_find_csum(struct scrub_ctx *sctx, u64 logical, u64 len,
@@ -2228,6 +2491,132 @@ behind_scrub_pages:
2228 return 0; 2491 return 0;
2229} 2492}
2230 2493
2494static int scrub_pages_for_parity(struct scrub_parity *sparity,
2495 u64 logical, u64 len,
2496 u64 physical, struct btrfs_device *dev,
2497 u64 flags, u64 gen, int mirror_num, u8 *csum)
2498{
2499 struct scrub_ctx *sctx = sparity->sctx;
2500 struct scrub_block *sblock;
2501 int index;
2502
2503 sblock = kzalloc(sizeof(*sblock), GFP_NOFS);
2504 if (!sblock) {
2505 spin_lock(&sctx->stat_lock);
2506 sctx->stat.malloc_errors++;
2507 spin_unlock(&sctx->stat_lock);
2508 return -ENOMEM;
2509 }
2510
2511 /* one ref inside this function, plus one for each page added to
2512 * a bio later on */
2513 atomic_set(&sblock->ref_count, 1);
2514 sblock->sctx = sctx;
2515 sblock->no_io_error_seen = 1;
2516 sblock->sparity = sparity;
2517 scrub_parity_get(sparity);
2518
2519 for (index = 0; len > 0; index++) {
2520 struct scrub_page *spage;
2521 u64 l = min_t(u64, len, PAGE_SIZE);
2522
2523 spage = kzalloc(sizeof(*spage), GFP_NOFS);
2524 if (!spage) {
2525leave_nomem:
2526 spin_lock(&sctx->stat_lock);
2527 sctx->stat.malloc_errors++;
2528 spin_unlock(&sctx->stat_lock);
2529 scrub_block_put(sblock);
2530 return -ENOMEM;
2531 }
2532 BUG_ON(index >= SCRUB_MAX_PAGES_PER_BLOCK);
2533 /* For scrub block */
2534 scrub_page_get(spage);
2535 sblock->pagev[index] = spage;
2536 /* For scrub parity */
2537 scrub_page_get(spage);
2538 list_add_tail(&spage->list, &sparity->spages);
2539 spage->sblock = sblock;
2540 spage->dev = dev;
2541 spage->flags = flags;
2542 spage->generation = gen;
2543 spage->logical = logical;
2544 spage->physical = physical;
2545 spage->mirror_num = mirror_num;
2546 if (csum) {
2547 spage->have_csum = 1;
2548 memcpy(spage->csum, csum, sctx->csum_size);
2549 } else {
2550 spage->have_csum = 0;
2551 }
2552 sblock->page_count++;
2553 spage->page = alloc_page(GFP_NOFS);
2554 if (!spage->page)
2555 goto leave_nomem;
2556 len -= l;
2557 logical += l;
2558 physical += l;
2559 }
2560
2561 WARN_ON(sblock->page_count == 0);
2562 for (index = 0; index < sblock->page_count; index++) {
2563 struct scrub_page *spage = sblock->pagev[index];
2564 int ret;
2565
2566 ret = scrub_add_page_to_rd_bio(sctx, spage);
2567 if (ret) {
2568 scrub_block_put(sblock);
2569 return ret;
2570 }
2571 }
2572
2573 /* last one frees, either here or in bio completion for last page */
2574 scrub_block_put(sblock);
2575 return 0;
2576}
2577
2578static int scrub_extent_for_parity(struct scrub_parity *sparity,
2579 u64 logical, u64 len,
2580 u64 physical, struct btrfs_device *dev,
2581 u64 flags, u64 gen, int mirror_num)
2582{
2583 struct scrub_ctx *sctx = sparity->sctx;
2584 int ret;
2585 u8 csum[BTRFS_CSUM_SIZE];
2586 u32 blocksize;
2587
2588 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2589 blocksize = sctx->sectorsize;
2590 } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
2591 blocksize = sctx->nodesize;
2592 } else {
2593 blocksize = sctx->sectorsize;
2594 WARN_ON(1);
2595 }
2596
2597 while (len) {
2598 u64 l = min_t(u64, len, blocksize);
2599 int have_csum = 0;
2600
2601 if (flags & BTRFS_EXTENT_FLAG_DATA) {
2602 /* push csums to sbio */
2603 have_csum = scrub_find_csum(sctx, logical, l, csum);
2604 if (have_csum == 0)
2605 goto skip;
2606 }
2607 ret = scrub_pages_for_parity(sparity, logical, l, physical, dev,
2608 flags, gen, mirror_num,
2609 have_csum ? csum : NULL);
2610skip:
2611 if (ret)
2612 return ret;
2613 len -= l;
2614 logical += l;
2615 physical += l;
2616 }
2617 return 0;
2618}
2619
2231/* 2620/*
2232 * Given a physical address, this will calculate it's 2621 * Given a physical address, this will calculate it's
2233 * logical offset. if this is a parity stripe, it will return 2622 * logical offset. if this is a parity stripe, it will return
@@ -2236,7 +2625,8 @@ behind_scrub_pages:
2236 * return 0 if it is a data stripe, 1 means parity stripe. 2625 * return 0 if it is a data stripe, 1 means parity stripe.
2237 */ 2626 */
2238static int get_raid56_logic_offset(u64 physical, int num, 2627static int get_raid56_logic_offset(u64 physical, int num,
2239 struct map_lookup *map, u64 *offset) 2628 struct map_lookup *map, u64 *offset,
2629 u64 *stripe_start)
2240{ 2630{
2241 int i; 2631 int i;
2242 int j = 0; 2632 int j = 0;
@@ -2247,6 +2637,9 @@ static int get_raid56_logic_offset(u64 physical, int num,
2247 2637
2248 last_offset = (physical - map->stripes[num].physical) * 2638 last_offset = (physical - map->stripes[num].physical) *
2249 nr_data_stripes(map); 2639 nr_data_stripes(map);
2640 if (stripe_start)
2641 *stripe_start = last_offset;
2642
2250 *offset = last_offset; 2643 *offset = last_offset;
2251 for (i = 0; i < nr_data_stripes(map); i++) { 2644 for (i = 0; i < nr_data_stripes(map); i++) {
2252 *offset = last_offset + i * map->stripe_len; 2645 *offset = last_offset + i * map->stripe_len;
@@ -2269,13 +2662,330 @@ static int get_raid56_logic_offset(u64 physical, int num,
2269 return 1; 2662 return 1;
2270} 2663}
2271 2664
2665static void scrub_free_parity(struct scrub_parity *sparity)
2666{
2667 struct scrub_ctx *sctx = sparity->sctx;
2668 struct scrub_page *curr, *next;
2669 int nbits;
2670
2671 nbits = bitmap_weight(sparity->ebitmap, sparity->nsectors);
2672 if (nbits) {
2673 spin_lock(&sctx->stat_lock);
2674 sctx->stat.read_errors += nbits;
2675 sctx->stat.uncorrectable_errors += nbits;
2676 spin_unlock(&sctx->stat_lock);
2677 }
2678
2679 list_for_each_entry_safe(curr, next, &sparity->spages, list) {
2680 list_del_init(&curr->list);
2681 scrub_page_put(curr);
2682 }
2683
2684 kfree(sparity);
2685}
2686
2687static void scrub_parity_bio_endio(struct bio *bio, int error)
2688{
2689 struct scrub_parity *sparity = (struct scrub_parity *)bio->bi_private;
2690 struct scrub_ctx *sctx = sparity->sctx;
2691
2692 if (error)
2693 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2694 sparity->nsectors);
2695
2696 scrub_free_parity(sparity);
2697 scrub_pending_bio_dec(sctx);
2698 bio_put(bio);
2699}
2700
2701static void scrub_parity_check_and_repair(struct scrub_parity *sparity)
2702{
2703 struct scrub_ctx *sctx = sparity->sctx;
2704 struct bio *bio;
2705 struct btrfs_raid_bio *rbio;
2706 struct scrub_page *spage;
2707 struct btrfs_bio *bbio = NULL;
2708 u64 *raid_map = NULL;
2709 u64 length;
2710 int ret;
2711
2712 if (!bitmap_andnot(sparity->dbitmap, sparity->dbitmap, sparity->ebitmap,
2713 sparity->nsectors))
2714 goto out;
2715
2716 length = sparity->logic_end - sparity->logic_start + 1;
2717 ret = btrfs_map_sblock(sctx->dev_root->fs_info, WRITE,
2718 sparity->logic_start,
2719 &length, &bbio, 0, &raid_map);
2720 if (ret || !bbio || !raid_map)
2721 goto bbio_out;
2722
2723 bio = btrfs_io_bio_alloc(GFP_NOFS, 0);
2724 if (!bio)
2725 goto bbio_out;
2726
2727 bio->bi_iter.bi_sector = sparity->logic_start >> 9;
2728 bio->bi_private = sparity;
2729 bio->bi_end_io = scrub_parity_bio_endio;
2730
2731 rbio = raid56_parity_alloc_scrub_rbio(sctx->dev_root, bio, bbio,
2732 raid_map, length,
2733 sparity->scrub_dev,
2734 sparity->dbitmap,
2735 sparity->nsectors);
2736 if (!rbio)
2737 goto rbio_out;
2738
2739 list_for_each_entry(spage, &sparity->spages, list)
2740 raid56_parity_add_scrub_pages(rbio, spage->page,
2741 spage->logical);
2742
2743 scrub_pending_bio_inc(sctx);
2744 raid56_parity_submit_scrub_rbio(rbio);
2745 return;
2746
2747rbio_out:
2748 bio_put(bio);
2749bbio_out:
2750 kfree(bbio);
2751 kfree(raid_map);
2752 bitmap_or(sparity->ebitmap, sparity->ebitmap, sparity->dbitmap,
2753 sparity->nsectors);
2754 spin_lock(&sctx->stat_lock);
2755 sctx->stat.malloc_errors++;
2756 spin_unlock(&sctx->stat_lock);
2757out:
2758 scrub_free_parity(sparity);
2759}
2760
2761static inline int scrub_calc_parity_bitmap_len(int nsectors)
2762{
2763 return DIV_ROUND_UP(nsectors, BITS_PER_LONG) * (BITS_PER_LONG / 8);
2764}
2765
2766static void scrub_parity_get(struct scrub_parity *sparity)
2767{
2768 atomic_inc(&sparity->ref_count);
2769}
2770
2771static void scrub_parity_put(struct scrub_parity *sparity)
2772{
2773 if (!atomic_dec_and_test(&sparity->ref_count))
2774 return;
2775
2776 scrub_parity_check_and_repair(sparity);
2777}
2778
2779static noinline_for_stack int scrub_raid56_parity(struct scrub_ctx *sctx,
2780 struct map_lookup *map,
2781 struct btrfs_device *sdev,
2782 struct btrfs_path *path,
2783 u64 logic_start,
2784 u64 logic_end)
2785{
2786 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2787 struct btrfs_root *root = fs_info->extent_root;
2788 struct btrfs_root *csum_root = fs_info->csum_root;
2789 struct btrfs_extent_item *extent;
2790 u64 flags;
2791 int ret;
2792 int slot;
2793 struct extent_buffer *l;
2794 struct btrfs_key key;
2795 u64 generation;
2796 u64 extent_logical;
2797 u64 extent_physical;
2798 u64 extent_len;
2799 struct btrfs_device *extent_dev;
2800 struct scrub_parity *sparity;
2801 int nsectors;
2802 int bitmap_len;
2803 int extent_mirror_num;
2804 int stop_loop = 0;
2805
2806 nsectors = map->stripe_len / root->sectorsize;
2807 bitmap_len = scrub_calc_parity_bitmap_len(nsectors);
2808 sparity = kzalloc(sizeof(struct scrub_parity) + 2 * bitmap_len,
2809 GFP_NOFS);
2810 if (!sparity) {
2811 spin_lock(&sctx->stat_lock);
2812 sctx->stat.malloc_errors++;
2813 spin_unlock(&sctx->stat_lock);
2814 return -ENOMEM;
2815 }
2816
2817 sparity->stripe_len = map->stripe_len;
2818 sparity->nsectors = nsectors;
2819 sparity->sctx = sctx;
2820 sparity->scrub_dev = sdev;
2821 sparity->logic_start = logic_start;
2822 sparity->logic_end = logic_end;
2823 atomic_set(&sparity->ref_count, 1);
2824 INIT_LIST_HEAD(&sparity->spages);
2825 sparity->dbitmap = sparity->bitmap;
2826 sparity->ebitmap = (void *)sparity->bitmap + bitmap_len;
2827
2828 ret = 0;
2829 while (logic_start < logic_end) {
2830 if (btrfs_fs_incompat(fs_info, SKINNY_METADATA))
2831 key.type = BTRFS_METADATA_ITEM_KEY;
2832 else
2833 key.type = BTRFS_EXTENT_ITEM_KEY;
2834 key.objectid = logic_start;
2835 key.offset = (u64)-1;
2836
2837 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
2838 if (ret < 0)
2839 goto out;
2840
2841 if (ret > 0) {
2842 ret = btrfs_previous_extent_item(root, path, 0);
2843 if (ret < 0)
2844 goto out;
2845 if (ret > 0) {
2846 btrfs_release_path(path);
2847 ret = btrfs_search_slot(NULL, root, &key,
2848 path, 0, 0);
2849 if (ret < 0)
2850 goto out;
2851 }
2852 }
2853
2854 stop_loop = 0;
2855 while (1) {
2856 u64 bytes;
2857
2858 l = path->nodes[0];
2859 slot = path->slots[0];
2860 if (slot >= btrfs_header_nritems(l)) {
2861 ret = btrfs_next_leaf(root, path);
2862 if (ret == 0)
2863 continue;
2864 if (ret < 0)
2865 goto out;
2866
2867 stop_loop = 1;
2868 break;
2869 }
2870 btrfs_item_key_to_cpu(l, &key, slot);
2871
2872 if (key.type == BTRFS_METADATA_ITEM_KEY)
2873 bytes = root->nodesize;
2874 else
2875 bytes = key.offset;
2876
2877 if (key.objectid + bytes <= logic_start)
2878 goto next;
2879
2880 if (key.type != BTRFS_EXTENT_ITEM_KEY &&
2881 key.type != BTRFS_METADATA_ITEM_KEY)
2882 goto next;
2883
2884 if (key.objectid > logic_end) {
2885 stop_loop = 1;
2886 break;
2887 }
2888
2889 while (key.objectid >= logic_start + map->stripe_len)
2890 logic_start += map->stripe_len;
2891
2892 extent = btrfs_item_ptr(l, slot,
2893 struct btrfs_extent_item);
2894 flags = btrfs_extent_flags(l, extent);
2895 generation = btrfs_extent_generation(l, extent);
2896
2897 if (key.objectid < logic_start &&
2898 (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) {
2899 btrfs_err(fs_info,
2900 "scrub: tree block %llu spanning stripes, ignored. logical=%llu",
2901 key.objectid, logic_start);
2902 goto next;
2903 }
2904again:
2905 extent_logical = key.objectid;
2906 extent_len = bytes;
2907
2908 if (extent_logical < logic_start) {
2909 extent_len -= logic_start - extent_logical;
2910 extent_logical = logic_start;
2911 }
2912
2913 if (extent_logical + extent_len >
2914 logic_start + map->stripe_len)
2915 extent_len = logic_start + map->stripe_len -
2916 extent_logical;
2917
2918 scrub_parity_mark_sectors_data(sparity, extent_logical,
2919 extent_len);
2920
2921 scrub_remap_extent(fs_info, extent_logical,
2922 extent_len, &extent_physical,
2923 &extent_dev,
2924 &extent_mirror_num);
2925
2926 ret = btrfs_lookup_csums_range(csum_root,
2927 extent_logical,
2928 extent_logical + extent_len - 1,
2929 &sctx->csum_list, 1);
2930 if (ret)
2931 goto out;
2932
2933 ret = scrub_extent_for_parity(sparity, extent_logical,
2934 extent_len,
2935 extent_physical,
2936 extent_dev, flags,
2937 generation,
2938 extent_mirror_num);
2939 if (ret)
2940 goto out;
2941
2942 scrub_free_csums(sctx);
2943 if (extent_logical + extent_len <
2944 key.objectid + bytes) {
2945 logic_start += map->stripe_len;
2946
2947 if (logic_start >= logic_end) {
2948 stop_loop = 1;
2949 break;
2950 }
2951
2952 if (logic_start < key.objectid + bytes) {
2953 cond_resched();
2954 goto again;
2955 }
2956 }
2957next:
2958 path->slots[0]++;
2959 }
2960
2961 btrfs_release_path(path);
2962
2963 if (stop_loop)
2964 break;
2965
2966 logic_start += map->stripe_len;
2967 }
2968out:
2969 if (ret < 0)
2970 scrub_parity_mark_sectors_error(sparity, logic_start,
2971 logic_end - logic_start + 1);
2972 scrub_parity_put(sparity);
2973 scrub_submit(sctx);
2974 mutex_lock(&sctx->wr_ctx.wr_lock);
2975 scrub_wr_submit(sctx);
2976 mutex_unlock(&sctx->wr_ctx.wr_lock);
2977
2978 btrfs_release_path(path);
2979 return ret < 0 ? ret : 0;
2980}
2981
2272static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx, 2982static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2273 struct map_lookup *map, 2983 struct map_lookup *map,
2274 struct btrfs_device *scrub_dev, 2984 struct btrfs_device *scrub_dev,
2275 int num, u64 base, u64 length, 2985 int num, u64 base, u64 length,
2276 int is_dev_replace) 2986 int is_dev_replace)
2277{ 2987{
2278 struct btrfs_path *path; 2988 struct btrfs_path *path, *ppath;
2279 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info; 2989 struct btrfs_fs_info *fs_info = sctx->dev_root->fs_info;
2280 struct btrfs_root *root = fs_info->extent_root; 2990 struct btrfs_root *root = fs_info->extent_root;
2281 struct btrfs_root *csum_root = fs_info->csum_root; 2991 struct btrfs_root *csum_root = fs_info->csum_root;
@@ -2302,6 +3012,8 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2302 u64 extent_logical; 3012 u64 extent_logical;
2303 u64 extent_physical; 3013 u64 extent_physical;
2304 u64 extent_len; 3014 u64 extent_len;
3015 u64 stripe_logical;
3016 u64 stripe_end;
2305 struct btrfs_device *extent_dev; 3017 struct btrfs_device *extent_dev;
2306 int extent_mirror_num; 3018 int extent_mirror_num;
2307 int stop_loop = 0; 3019 int stop_loop = 0;
@@ -2327,7 +3039,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2327 mirror_num = num % map->num_stripes + 1; 3039 mirror_num = num % map->num_stripes + 1;
2328 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 3040 } else if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2329 BTRFS_BLOCK_GROUP_RAID6)) { 3041 BTRFS_BLOCK_GROUP_RAID6)) {
2330 get_raid56_logic_offset(physical, num, map, &offset); 3042 get_raid56_logic_offset(physical, num, map, &offset, NULL);
2331 increment = map->stripe_len * nr_data_stripes(map); 3043 increment = map->stripe_len * nr_data_stripes(map);
2332 mirror_num = 1; 3044 mirror_num = 1;
2333 } else { 3045 } else {
@@ -2339,6 +3051,12 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2339 if (!path) 3051 if (!path)
2340 return -ENOMEM; 3052 return -ENOMEM;
2341 3053
3054 ppath = btrfs_alloc_path();
3055 if (!ppath) {
3056 btrfs_free_path(ppath);
3057 return -ENOMEM;
3058 }
3059
2342 /* 3060 /*
2343 * work on commit root. The related disk blocks are static as 3061 * work on commit root. The related disk blocks are static as
2344 * long as COW is applied. This means, it is save to rewrite 3062 * long as COW is applied. This means, it is save to rewrite
@@ -2357,7 +3075,7 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2357 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 3075 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2358 BTRFS_BLOCK_GROUP_RAID6)) { 3076 BTRFS_BLOCK_GROUP_RAID6)) {
2359 get_raid56_logic_offset(physical_end, num, 3077 get_raid56_logic_offset(physical_end, num,
2360 map, &logic_end); 3078 map, &logic_end, NULL);
2361 logic_end += base; 3079 logic_end += base;
2362 } else { 3080 } else {
2363 logic_end = logical + increment * nstripes; 3081 logic_end = logical + increment * nstripes;
@@ -2404,10 +3122,18 @@ static noinline_for_stack int scrub_stripe(struct scrub_ctx *sctx,
2404 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 | 3122 if (map->type & (BTRFS_BLOCK_GROUP_RAID5 |
2405 BTRFS_BLOCK_GROUP_RAID6)) { 3123 BTRFS_BLOCK_GROUP_RAID6)) {
2406 ret = get_raid56_logic_offset(physical, num, 3124 ret = get_raid56_logic_offset(physical, num,
2407 map, &logical); 3125 map, &logical, &stripe_logical);
2408 logical += base; 3126 logical += base;
2409 if (ret) 3127 if (ret) {
3128 stripe_logical += base;
3129 stripe_end = stripe_logical + increment - 1;
3130 ret = scrub_raid56_parity(sctx, map, scrub_dev,
3131 ppath, stripe_logical,
3132 stripe_end);
3133 if (ret)
3134 goto out;
2410 goto skip; 3135 goto skip;
3136 }
2411 } 3137 }
2412 /* 3138 /*
2413 * canceled? 3139 * canceled?
@@ -2558,13 +3284,25 @@ again:
2558 * loop until we find next data stripe 3284 * loop until we find next data stripe
2559 * or we have finished all stripes. 3285 * or we have finished all stripes.
2560 */ 3286 */
2561 do { 3287loop:
2562 physical += map->stripe_len; 3288 physical += map->stripe_len;
2563 ret = get_raid56_logic_offset( 3289 ret = get_raid56_logic_offset(physical,
2564 physical, num, 3290 num, map, &logical,
2565 map, &logical); 3291 &stripe_logical);
2566 logical += base; 3292 logical += base;
2567 } while (physical < physical_end && ret); 3293
3294 if (ret && physical < physical_end) {
3295 stripe_logical += base;
3296 stripe_end = stripe_logical +
3297 increment - 1;
3298 ret = scrub_raid56_parity(sctx,
3299 map, scrub_dev, ppath,
3300 stripe_logical,
3301 stripe_end);
3302 if (ret)
3303 goto out;
3304 goto loop;
3305 }
2568 } else { 3306 } else {
2569 physical += map->stripe_len; 3307 physical += map->stripe_len;
2570 logical += increment; 3308 logical += increment;
@@ -2605,6 +3343,7 @@ out:
2605 3343
2606 blk_finish_plug(&plug); 3344 blk_finish_plug(&plug);
2607 btrfs_free_path(path); 3345 btrfs_free_path(path);
3346 btrfs_free_path(ppath);
2608 return ret < 0 ? ret : 0; 3347 return ret < 0 ? ret : 0;
2609} 3348}
2610 3349
@@ -3310,6 +4049,50 @@ out:
3310 scrub_pending_trans_workers_dec(sctx); 4049 scrub_pending_trans_workers_dec(sctx);
3311} 4050}
3312 4051
4052static int check_extent_to_block(struct inode *inode, u64 start, u64 len,
4053 u64 logical)
4054{
4055 struct extent_state *cached_state = NULL;
4056 struct btrfs_ordered_extent *ordered;
4057 struct extent_io_tree *io_tree;
4058 struct extent_map *em;
4059 u64 lockstart = start, lockend = start + len - 1;
4060 int ret = 0;
4061
4062 io_tree = &BTRFS_I(inode)->io_tree;
4063
4064 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state);
4065 ordered = btrfs_lookup_ordered_range(inode, lockstart, len);
4066 if (ordered) {
4067 btrfs_put_ordered_extent(ordered);
4068 ret = 1;
4069 goto out_unlock;
4070 }
4071
4072 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
4073 if (IS_ERR(em)) {
4074 ret = PTR_ERR(em);
4075 goto out_unlock;
4076 }
4077
4078 /*
4079 * This extent does not actually cover the logical extent anymore,
4080 * move on to the next inode.
4081 */
4082 if (em->block_start > logical ||
4083 em->block_start + em->block_len < logical + len) {
4084 free_extent_map(em);
4085 ret = 1;
4086 goto out_unlock;
4087 }
4088 free_extent_map(em);
4089
4090out_unlock:
4091 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
4092 GFP_NOFS);
4093 return ret;
4094}
4095
3313static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, 4096static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
3314 struct scrub_copy_nocow_ctx *nocow_ctx) 4097 struct scrub_copy_nocow_ctx *nocow_ctx)
3315{ 4098{
@@ -3318,13 +4101,10 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
3318 struct inode *inode; 4101 struct inode *inode;
3319 struct page *page; 4102 struct page *page;
3320 struct btrfs_root *local_root; 4103 struct btrfs_root *local_root;
3321 struct btrfs_ordered_extent *ordered;
3322 struct extent_map *em;
3323 struct extent_state *cached_state = NULL;
3324 struct extent_io_tree *io_tree; 4104 struct extent_io_tree *io_tree;
3325 u64 physical_for_dev_replace; 4105 u64 physical_for_dev_replace;
4106 u64 nocow_ctx_logical;
3326 u64 len = nocow_ctx->len; 4107 u64 len = nocow_ctx->len;
3327 u64 lockstart = offset, lockend = offset + len - 1;
3328 unsigned long index; 4108 unsigned long index;
3329 int srcu_index; 4109 int srcu_index;
3330 int ret = 0; 4110 int ret = 0;
@@ -3356,30 +4136,13 @@ static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
3356 4136
3357 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace; 4137 physical_for_dev_replace = nocow_ctx->physical_for_dev_replace;
3358 io_tree = &BTRFS_I(inode)->io_tree; 4138 io_tree = &BTRFS_I(inode)->io_tree;
4139 nocow_ctx_logical = nocow_ctx->logical;
3359 4140
3360 lock_extent_bits(io_tree, lockstart, lockend, 0, &cached_state); 4141 ret = check_extent_to_block(inode, offset, len, nocow_ctx_logical);
3361 ordered = btrfs_lookup_ordered_range(inode, lockstart, len); 4142 if (ret) {
3362 if (ordered) { 4143 ret = ret > 0 ? 0 : ret;
3363 btrfs_put_ordered_extent(ordered); 4144 goto out;
3364 goto out_unlock;
3365 }
3366
3367 em = btrfs_get_extent(inode, NULL, 0, lockstart, len, 0);
3368 if (IS_ERR(em)) {
3369 ret = PTR_ERR(em);
3370 goto out_unlock;
3371 }
3372
3373 /*
3374 * This extent does not actually cover the logical extent anymore,
3375 * move on to the next inode.
3376 */
3377 if (em->block_start > nocow_ctx->logical ||
3378 em->block_start + em->block_len < nocow_ctx->logical + len) {
3379 free_extent_map(em);
3380 goto out_unlock;
3381 } 4145 }
3382 free_extent_map(em);
3383 4146
3384 while (len >= PAGE_CACHE_SIZE) { 4147 while (len >= PAGE_CACHE_SIZE) {
3385 index = offset >> PAGE_CACHE_SHIFT; 4148 index = offset >> PAGE_CACHE_SHIFT;
@@ -3396,7 +4159,7 @@ again:
3396 goto next_page; 4159 goto next_page;
3397 } else { 4160 } else {
3398 ClearPageError(page); 4161 ClearPageError(page);
3399 err = extent_read_full_page_nolock(io_tree, page, 4162 err = extent_read_full_page(io_tree, page,
3400 btrfs_get_extent, 4163 btrfs_get_extent,
3401 nocow_ctx->mirror_num); 4164 nocow_ctx->mirror_num);
3402 if (err) { 4165 if (err) {
@@ -3421,6 +4184,14 @@ again:
3421 goto next_page; 4184 goto next_page;
3422 } 4185 }
3423 } 4186 }
4187
4188 ret = check_extent_to_block(inode, offset, len,
4189 nocow_ctx_logical);
4190 if (ret) {
4191 ret = ret > 0 ? 0 : ret;
4192 goto next_page;
4193 }
4194
3424 err = write_page_nocow(nocow_ctx->sctx, 4195 err = write_page_nocow(nocow_ctx->sctx,
3425 physical_for_dev_replace, page); 4196 physical_for_dev_replace, page);
3426 if (err) 4197 if (err)
@@ -3434,12 +4205,10 @@ next_page:
3434 4205
3435 offset += PAGE_CACHE_SIZE; 4206 offset += PAGE_CACHE_SIZE;
3436 physical_for_dev_replace += PAGE_CACHE_SIZE; 4207 physical_for_dev_replace += PAGE_CACHE_SIZE;
4208 nocow_ctx_logical += PAGE_CACHE_SIZE;
3437 len -= PAGE_CACHE_SIZE; 4209 len -= PAGE_CACHE_SIZE;
3438 } 4210 }
3439 ret = COPY_COMPLETE; 4211 ret = COPY_COMPLETE;
3440out_unlock:
3441 unlock_extent_cached(io_tree, lockstart, lockend, &cached_state,
3442 GFP_NOFS);
3443out: 4212out:
3444 mutex_unlock(&inode->i_mutex); 4213 mutex_unlock(&inode->i_mutex);
3445 iput(inode); 4214 iput(inode);
diff --git a/fs/btrfs/send.c b/fs/btrfs/send.c
index 874828dd0a86..804432dbc351 100644
--- a/fs/btrfs/send.c
+++ b/fs/btrfs/send.c
@@ -5507,6 +5507,51 @@ out:
5507 return ret; 5507 return ret;
5508} 5508}
5509 5509
5510/*
5511 * If orphan cleanup did remove any orphans from a root, it means the tree
5512 * was modified and therefore the commit root is not the same as the current
5513 * root anymore. This is a problem, because send uses the commit root and
5514 * therefore can see inode items that don't exist in the current root anymore,
5515 * and for example make calls to btrfs_iget, which will do tree lookups based
5516 * on the current root and not on the commit root. Those lookups will fail,
5517 * returning a -ESTALE error, and making send fail with that error. So make
5518 * sure a send does not see any orphans we have just removed, and that it will
5519 * see the same inodes regardless of whether a transaction commit happened
5520 * before it started (meaning that the commit root will be the same as the
5521 * current root) or not.
5522 */
5523static int ensure_commit_roots_uptodate(struct send_ctx *sctx)
5524{
5525 int i;
5526 struct btrfs_trans_handle *trans = NULL;
5527
5528again:
5529 if (sctx->parent_root &&
5530 sctx->parent_root->node != sctx->parent_root->commit_root)
5531 goto commit_trans;
5532
5533 for (i = 0; i < sctx->clone_roots_cnt; i++)
5534 if (sctx->clone_roots[i].root->node !=
5535 sctx->clone_roots[i].root->commit_root)
5536 goto commit_trans;
5537
5538 if (trans)
5539 return btrfs_end_transaction(trans, sctx->send_root);
5540
5541 return 0;
5542
5543commit_trans:
5544 /* Use any root, all fs roots will get their commit roots updated. */
5545 if (!trans) {
5546 trans = btrfs_join_transaction(sctx->send_root);
5547 if (IS_ERR(trans))
5548 return PTR_ERR(trans);
5549 goto again;
5550 }
5551
5552 return btrfs_commit_transaction(trans, sctx->send_root);
5553}
5554
5510static void btrfs_root_dec_send_in_progress(struct btrfs_root* root) 5555static void btrfs_root_dec_send_in_progress(struct btrfs_root* root)
5511{ 5556{
5512 spin_lock(&root->root_item_lock); 5557 spin_lock(&root->root_item_lock);
@@ -5728,6 +5773,10 @@ long btrfs_ioctl_send(struct file *mnt_file, void __user *arg_)
5728 NULL); 5773 NULL);
5729 sort_clone_roots = 1; 5774 sort_clone_roots = 1;
5730 5775
5776 ret = ensure_commit_roots_uptodate(sctx);
5777 if (ret)
5778 goto out;
5779
5731 current->journal_info = BTRFS_SEND_TRANS_STUB; 5780 current->journal_info = BTRFS_SEND_TRANS_STUB;
5732 ret = send_subvol(sctx); 5781 ret = send_subvol(sctx);
5733 current->journal_info = NULL; 5782 current->journal_info = NULL;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 54bd91ece35b..60f7cbe815e9 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -262,7 +262,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
262 trans->aborted = errno; 262 trans->aborted = errno;
263 /* Nothing used. The other threads that have joined this 263 /* Nothing used. The other threads that have joined this
264 * transaction may be able to continue. */ 264 * transaction may be able to continue. */
265 if (!trans->blocks_used) { 265 if (!trans->blocks_used && list_empty(&trans->new_bgs)) {
266 const char *errstr; 266 const char *errstr;
267 267
268 errstr = btrfs_decode_error(errno); 268 errstr = btrfs_decode_error(errno);
@@ -642,11 +642,11 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
642 "disabling disk space caching"); 642 "disabling disk space caching");
643 break; 643 break;
644 case Opt_inode_cache: 644 case Opt_inode_cache:
645 btrfs_set_and_info(root, CHANGE_INODE_CACHE, 645 btrfs_set_pending_and_info(info, INODE_MAP_CACHE,
646 "enabling inode map caching"); 646 "enabling inode map caching");
647 break; 647 break;
648 case Opt_noinode_cache: 648 case Opt_noinode_cache:
649 btrfs_clear_and_info(root, CHANGE_INODE_CACHE, 649 btrfs_clear_pending_and_info(info, INODE_MAP_CACHE,
650 "disabling inode map caching"); 650 "disabling inode map caching");
651 break; 651 break;
652 case Opt_clear_cache: 652 case Opt_clear_cache:
@@ -993,9 +993,17 @@ int btrfs_sync_fs(struct super_block *sb, int wait)
993 trans = btrfs_attach_transaction_barrier(root); 993 trans = btrfs_attach_transaction_barrier(root);
994 if (IS_ERR(trans)) { 994 if (IS_ERR(trans)) {
995 /* no transaction, don't bother */ 995 /* no transaction, don't bother */
996 if (PTR_ERR(trans) == -ENOENT) 996 if (PTR_ERR(trans) == -ENOENT) {
997 return 0; 997 /*
998 return PTR_ERR(trans); 998 * Exit unless we have some pending changes
999 * that need to go through commit
1000 */
1001 if (fs_info->pending_changes == 0)
1002 return 0;
1003 trans = btrfs_start_transaction(root, 0);
1004 } else {
1005 return PTR_ERR(trans);
1006 }
999 } 1007 }
1000 return btrfs_commit_transaction(trans, root); 1008 return btrfs_commit_transaction(trans, root);
1001} 1009}
@@ -1644,8 +1652,20 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1644 int i = 0, nr_devices; 1652 int i = 0, nr_devices;
1645 int ret; 1653 int ret;
1646 1654
1655 /*
1656 * We aren't under the device list lock, so this is racey-ish, but good
1657 * enough for our purposes.
1658 */
1647 nr_devices = fs_info->fs_devices->open_devices; 1659 nr_devices = fs_info->fs_devices->open_devices;
1648 BUG_ON(!nr_devices); 1660 if (!nr_devices) {
1661 smp_mb();
1662 nr_devices = fs_info->fs_devices->open_devices;
1663 ASSERT(nr_devices);
1664 if (!nr_devices) {
1665 *free_bytes = 0;
1666 return 0;
1667 }
1668 }
1649 1669
1650 devices_info = kmalloc_array(nr_devices, sizeof(*devices_info), 1670 devices_info = kmalloc_array(nr_devices, sizeof(*devices_info),
1651 GFP_NOFS); 1671 GFP_NOFS);
@@ -1670,11 +1690,17 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1670 else 1690 else
1671 min_stripe_size = BTRFS_STRIPE_LEN; 1691 min_stripe_size = BTRFS_STRIPE_LEN;
1672 1692
1673 list_for_each_entry(device, &fs_devices->devices, dev_list) { 1693 if (fs_info->alloc_start)
1694 mutex_lock(&fs_devices->device_list_mutex);
1695 rcu_read_lock();
1696 list_for_each_entry_rcu(device, &fs_devices->devices, dev_list) {
1674 if (!device->in_fs_metadata || !device->bdev || 1697 if (!device->in_fs_metadata || !device->bdev ||
1675 device->is_tgtdev_for_dev_replace) 1698 device->is_tgtdev_for_dev_replace)
1676 continue; 1699 continue;
1677 1700
1701 if (i >= nr_devices)
1702 break;
1703
1678 avail_space = device->total_bytes - device->bytes_used; 1704 avail_space = device->total_bytes - device->bytes_used;
1679 1705
1680 /* align with stripe_len */ 1706 /* align with stripe_len */
@@ -1689,24 +1715,32 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1689 skip_space = 1024 * 1024; 1715 skip_space = 1024 * 1024;
1690 1716
1691 /* user can set the offset in fs_info->alloc_start. */ 1717 /* user can set the offset in fs_info->alloc_start. */
1692 if (fs_info->alloc_start + BTRFS_STRIPE_LEN <= 1718 if (fs_info->alloc_start &&
1693 device->total_bytes) 1719 fs_info->alloc_start + BTRFS_STRIPE_LEN <=
1720 device->total_bytes) {
1721 rcu_read_unlock();
1694 skip_space = max(fs_info->alloc_start, skip_space); 1722 skip_space = max(fs_info->alloc_start, skip_space);
1695 1723
1696 /* 1724 /*
1697 * btrfs can not use the free space in [0, skip_space - 1], 1725 * btrfs can not use the free space in
1698 * we must subtract it from the total. In order to implement 1726 * [0, skip_space - 1], we must subtract it from the
1699 * it, we account the used space in this range first. 1727 * total. In order to implement it, we account the used
1700 */ 1728 * space in this range first.
1701 ret = btrfs_account_dev_extents_size(device, 0, skip_space - 1, 1729 */
1702 &used_space); 1730 ret = btrfs_account_dev_extents_size(device, 0,
1703 if (ret) { 1731 skip_space - 1,
1704 kfree(devices_info); 1732 &used_space);
1705 return ret; 1733 if (ret) {
1706 } 1734 kfree(devices_info);
1735 mutex_unlock(&fs_devices->device_list_mutex);
1736 return ret;
1737 }
1707 1738
1708 /* calc the free space in [0, skip_space - 1] */ 1739 rcu_read_lock();
1709 skip_space -= used_space; 1740
1741 /* calc the free space in [0, skip_space - 1] */
1742 skip_space -= used_space;
1743 }
1710 1744
1711 /* 1745 /*
1712 * we can use the free space in [0, skip_space - 1], subtract 1746 * we can use the free space in [0, skip_space - 1], subtract
@@ -1725,6 +1759,9 @@ static int btrfs_calc_avail_data_space(struct btrfs_root *root, u64 *free_bytes)
1725 1759
1726 i++; 1760 i++;
1727 } 1761 }
1762 rcu_read_unlock();
1763 if (fs_info->alloc_start)
1764 mutex_unlock(&fs_devices->device_list_mutex);
1728 1765
1729 nr_devices = i; 1766 nr_devices = i;
1730 1767
@@ -1787,8 +1824,6 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1787 * holding chunk_muext to avoid allocating new chunks, holding 1824 * holding chunk_muext to avoid allocating new chunks, holding
1788 * device_list_mutex to avoid the device being removed 1825 * device_list_mutex to avoid the device being removed
1789 */ 1826 */
1790 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1791 mutex_lock(&fs_info->chunk_mutex);
1792 rcu_read_lock(); 1827 rcu_read_lock();
1793 list_for_each_entry_rcu(found, head, list) { 1828 list_for_each_entry_rcu(found, head, list) {
1794 if (found->flags & BTRFS_BLOCK_GROUP_DATA) { 1829 if (found->flags & BTRFS_BLOCK_GROUP_DATA) {
@@ -1824,17 +1859,12 @@ static int btrfs_statfs(struct dentry *dentry, struct kstatfs *buf)
1824 buf->f_bfree -= block_rsv->size >> bits; 1859 buf->f_bfree -= block_rsv->size >> bits;
1825 spin_unlock(&block_rsv->lock); 1860 spin_unlock(&block_rsv->lock);
1826 1861
1827 buf->f_bavail = total_free_data; 1862 buf->f_bavail = div_u64(total_free_data, factor);
1828 ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data); 1863 ret = btrfs_calc_avail_data_space(fs_info->tree_root, &total_free_data);
1829 if (ret) { 1864 if (ret)
1830 mutex_unlock(&fs_info->chunk_mutex);
1831 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1832 return ret; 1865 return ret;
1833 }
1834 buf->f_bavail += div_u64(total_free_data, factor); 1866 buf->f_bavail += div_u64(total_free_data, factor);
1835 buf->f_bavail = buf->f_bavail >> bits; 1867 buf->f_bavail = buf->f_bavail >> bits;
1836 mutex_unlock(&fs_info->chunk_mutex);
1837 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1838 1868
1839 buf->f_type = BTRFS_SUPER_MAGIC; 1869 buf->f_type = BTRFS_SUPER_MAGIC;
1840 buf->f_bsize = dentry->d_sb->s_blocksize; 1870 buf->f_bsize = dentry->d_sb->s_blocksize;
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index b2e7bb4393f6..92db3f648df4 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -111,7 +111,6 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
111{ 111{
112 struct btrfs_fs_info *fs_info; 112 struct btrfs_fs_info *fs_info;
113 struct btrfs_feature_attr *fa = to_btrfs_feature_attr(a); 113 struct btrfs_feature_attr *fa = to_btrfs_feature_attr(a);
114 struct btrfs_trans_handle *trans;
115 u64 features, set, clear; 114 u64 features, set, clear;
116 unsigned long val; 115 unsigned long val;
117 int ret; 116 int ret;
@@ -153,10 +152,6 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
153 btrfs_info(fs_info, "%s %s feature flag", 152 btrfs_info(fs_info, "%s %s feature flag",
154 val ? "Setting" : "Clearing", fa->kobj_attr.attr.name); 153 val ? "Setting" : "Clearing", fa->kobj_attr.attr.name);
155 154
156 trans = btrfs_start_transaction(fs_info->fs_root, 0);
157 if (IS_ERR(trans))
158 return PTR_ERR(trans);
159
160 spin_lock(&fs_info->super_lock); 155 spin_lock(&fs_info->super_lock);
161 features = get_features(fs_info, fa->feature_set); 156 features = get_features(fs_info, fa->feature_set);
162 if (val) 157 if (val)
@@ -166,9 +161,11 @@ static ssize_t btrfs_feature_attr_store(struct kobject *kobj,
166 set_features(fs_info, fa->feature_set, features); 161 set_features(fs_info, fa->feature_set, features);
167 spin_unlock(&fs_info->super_lock); 162 spin_unlock(&fs_info->super_lock);
168 163
169 ret = btrfs_commit_transaction(trans, fs_info->fs_root); 164 /*
170 if (ret) 165 * We don't want to do full transaction commit from inside sysfs
171 return ret; 166 */
167 btrfs_set_pending(fs_info, COMMIT);
168 wake_up_process(fs_info->transaction_kthread);
172 169
173 return count; 170 return count;
174} 171}
@@ -372,9 +369,6 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
372 const char *buf, size_t len) 369 const char *buf, size_t len)
373{ 370{
374 struct btrfs_fs_info *fs_info = to_fs_info(kobj); 371 struct btrfs_fs_info *fs_info = to_fs_info(kobj);
375 struct btrfs_trans_handle *trans;
376 struct btrfs_root *root = fs_info->fs_root;
377 int ret;
378 size_t p_len; 372 size_t p_len;
379 373
380 if (fs_info->sb->s_flags & MS_RDONLY) 374 if (fs_info->sb->s_flags & MS_RDONLY)
@@ -389,20 +383,18 @@ static ssize_t btrfs_label_store(struct kobject *kobj,
389 if (p_len >= BTRFS_LABEL_SIZE) 383 if (p_len >= BTRFS_LABEL_SIZE)
390 return -EINVAL; 384 return -EINVAL;
391 385
392 trans = btrfs_start_transaction(root, 0); 386 spin_lock(&fs_info->super_lock);
393 if (IS_ERR(trans))
394 return PTR_ERR(trans);
395
396 spin_lock(&root->fs_info->super_lock);
397 memset(fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE); 387 memset(fs_info->super_copy->label, 0, BTRFS_LABEL_SIZE);
398 memcpy(fs_info->super_copy->label, buf, p_len); 388 memcpy(fs_info->super_copy->label, buf, p_len);
399 spin_unlock(&root->fs_info->super_lock); 389 spin_unlock(&fs_info->super_lock);
400 ret = btrfs_commit_transaction(trans, root);
401 390
402 if (!ret) 391 /*
403 return len; 392 * We don't want to do full transaction commit from inside sysfs
393 */
394 btrfs_set_pending(fs_info, COMMIT);
395 wake_up_process(fs_info->transaction_kthread);
404 396
405 return ret; 397 return len;
406} 398}
407BTRFS_ATTR_RW(label, btrfs_label_show, btrfs_label_store); 399BTRFS_ATTR_RW(label, btrfs_label_show, btrfs_label_store);
408 400
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index dcaae3616728..a605d4e2f2bc 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -76,6 +76,32 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
76 } 76 }
77} 77}
78 78
79static void clear_btree_io_tree(struct extent_io_tree *tree)
80{
81 spin_lock(&tree->lock);
82 while (!RB_EMPTY_ROOT(&tree->state)) {
83 struct rb_node *node;
84 struct extent_state *state;
85
86 node = rb_first(&tree->state);
87 state = rb_entry(node, struct extent_state, rb_node);
88 rb_erase(&state->rb_node, &tree->state);
89 RB_CLEAR_NODE(&state->rb_node);
90 /*
91 * btree io trees aren't supposed to have tasks waiting for
92 * changes in the flags of extent states ever.
93 */
94 ASSERT(!waitqueue_active(&state->wq));
95 free_extent_state(state);
96 if (need_resched()) {
97 spin_unlock(&tree->lock);
98 cond_resched();
99 spin_lock(&tree->lock);
100 }
101 }
102 spin_unlock(&tree->lock);
103}
104
79static noinline void switch_commit_roots(struct btrfs_transaction *trans, 105static noinline void switch_commit_roots(struct btrfs_transaction *trans,
80 struct btrfs_fs_info *fs_info) 106 struct btrfs_fs_info *fs_info)
81{ 107{
@@ -89,6 +115,7 @@ static noinline void switch_commit_roots(struct btrfs_transaction *trans,
89 root->commit_root = btrfs_root_node(root); 115 root->commit_root = btrfs_root_node(root);
90 if (is_fstree(root->objectid)) 116 if (is_fstree(root->objectid))
91 btrfs_unpin_free_ino(root); 117 btrfs_unpin_free_ino(root);
118 clear_btree_io_tree(&root->dirty_log_pages);
92 } 119 }
93 up_write(&fs_info->commit_root_sem); 120 up_write(&fs_info->commit_root_sem);
94} 121}
@@ -220,6 +247,7 @@ loop:
220 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 247 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
221 INIT_LIST_HEAD(&cur_trans->pending_chunks); 248 INIT_LIST_HEAD(&cur_trans->pending_chunks);
222 INIT_LIST_HEAD(&cur_trans->switch_commits); 249 INIT_LIST_HEAD(&cur_trans->switch_commits);
250 INIT_LIST_HEAD(&cur_trans->pending_ordered);
223 list_add_tail(&cur_trans->list, &fs_info->trans_list); 251 list_add_tail(&cur_trans->list, &fs_info->trans_list);
224 extent_io_tree_init(&cur_trans->dirty_pages, 252 extent_io_tree_init(&cur_trans->dirty_pages,
225 fs_info->btree_inode->i_mapping); 253 fs_info->btree_inode->i_mapping);
@@ -488,6 +516,7 @@ again:
488 h->sync = false; 516 h->sync = false;
489 INIT_LIST_HEAD(&h->qgroup_ref_list); 517 INIT_LIST_HEAD(&h->qgroup_ref_list);
490 INIT_LIST_HEAD(&h->new_bgs); 518 INIT_LIST_HEAD(&h->new_bgs);
519 INIT_LIST_HEAD(&h->ordered);
491 520
492 smp_mb(); 521 smp_mb();
493 if (cur_trans->state >= TRANS_STATE_BLOCKED && 522 if (cur_trans->state >= TRANS_STATE_BLOCKED &&
@@ -719,6 +748,12 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
719 if (!list_empty(&trans->new_bgs)) 748 if (!list_empty(&trans->new_bgs))
720 btrfs_create_pending_block_groups(trans, root); 749 btrfs_create_pending_block_groups(trans, root);
721 750
751 if (!list_empty(&trans->ordered)) {
752 spin_lock(&info->trans_lock);
753 list_splice(&trans->ordered, &cur_trans->pending_ordered);
754 spin_unlock(&info->trans_lock);
755 }
756
722 trans->delayed_ref_updates = 0; 757 trans->delayed_ref_updates = 0;
723 if (!trans->sync) { 758 if (!trans->sync) {
724 must_run_delayed_refs = 759 must_run_delayed_refs =
@@ -828,17 +863,39 @@ int btrfs_write_marked_extents(struct btrfs_root *root,
828 863
829 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 864 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
830 mark, &cached_state)) { 865 mark, &cached_state)) {
831 convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, 866 bool wait_writeback = false;
832 mark, &cached_state, GFP_NOFS); 867
833 cached_state = NULL; 868 err = convert_extent_bit(dirty_pages, start, end,
834 err = filemap_fdatawrite_range(mapping, start, end); 869 EXTENT_NEED_WAIT,
870 mark, &cached_state, GFP_NOFS);
871 /*
872 * convert_extent_bit can return -ENOMEM, which is most of the
873 * time a temporary error. So when it happens, ignore the error
874 * and wait for writeback of this range to finish - because we
875 * failed to set the bit EXTENT_NEED_WAIT for the range, a call
876 * to btrfs_wait_marked_extents() would not know that writeback
877 * for this range started and therefore wouldn't wait for it to
878 * finish - we don't want to commit a superblock that points to
879 * btree nodes/leafs for which writeback hasn't finished yet
880 * (and without errors).
881 * We cleanup any entries left in the io tree when committing
882 * the transaction (through clear_btree_io_tree()).
883 */
884 if (err == -ENOMEM) {
885 err = 0;
886 wait_writeback = true;
887 }
888 if (!err)
889 err = filemap_fdatawrite_range(mapping, start, end);
835 if (err) 890 if (err)
836 werr = err; 891 werr = err;
892 else if (wait_writeback)
893 werr = filemap_fdatawait_range(mapping, start, end);
894 free_extent_state(cached_state);
895 cached_state = NULL;
837 cond_resched(); 896 cond_resched();
838 start = end + 1; 897 start = end + 1;
839 } 898 }
840 if (err)
841 werr = err;
842 return werr; 899 return werr;
843} 900}
844 901
@@ -862,11 +919,25 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
862 919
863 while (!find_first_extent_bit(dirty_pages, start, &start, &end, 920 while (!find_first_extent_bit(dirty_pages, start, &start, &end,
864 EXTENT_NEED_WAIT, &cached_state)) { 921 EXTENT_NEED_WAIT, &cached_state)) {
865 clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT, 922 /*
866 0, 0, &cached_state, GFP_NOFS); 923 * Ignore -ENOMEM errors returned by clear_extent_bit().
867 err = filemap_fdatawait_range(mapping, start, end); 924 * When committing the transaction, we'll remove any entries
925 * left in the io tree. For a log commit, we don't remove them
926 * after committing the log because the tree can be accessed
927 * concurrently - we do it only at transaction commit time when
928 * it's safe to do it (through clear_btree_io_tree()).
929 */
930 err = clear_extent_bit(dirty_pages, start, end,
931 EXTENT_NEED_WAIT,
932 0, 0, &cached_state, GFP_NOFS);
933 if (err == -ENOMEM)
934 err = 0;
935 if (!err)
936 err = filemap_fdatawait_range(mapping, start, end);
868 if (err) 937 if (err)
869 werr = err; 938 werr = err;
939 free_extent_state(cached_state);
940 cached_state = NULL;
870 cond_resched(); 941 cond_resched();
871 start = end + 1; 942 start = end + 1;
872 } 943 }
@@ -919,17 +990,17 @@ static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
919 return 0; 990 return 0;
920} 991}
921 992
922int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, 993static int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
923 struct btrfs_root *root) 994 struct btrfs_root *root)
924{ 995{
925 if (!trans || !trans->transaction) { 996 int ret;
926 struct inode *btree_inode; 997
927 btree_inode = root->fs_info->btree_inode; 998 ret = btrfs_write_and_wait_marked_extents(root,
928 return filemap_write_and_wait(btree_inode->i_mapping);
929 }
930 return btrfs_write_and_wait_marked_extents(root,
931 &trans->transaction->dirty_pages, 999 &trans->transaction->dirty_pages,
932 EXTENT_DIRTY); 1000 EXTENT_DIRTY);
1001 clear_btree_io_tree(&trans->transaction->dirty_pages);
1002
1003 return ret;
933} 1004}
934 1005
935/* 1006/*
@@ -1652,6 +1723,28 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
1652 btrfs_wait_ordered_roots(fs_info, -1); 1723 btrfs_wait_ordered_roots(fs_info, -1);
1653} 1724}
1654 1725
1726static inline void
1727btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
1728 struct btrfs_fs_info *fs_info)
1729{
1730 struct btrfs_ordered_extent *ordered;
1731
1732 spin_lock(&fs_info->trans_lock);
1733 while (!list_empty(&cur_trans->pending_ordered)) {
1734 ordered = list_first_entry(&cur_trans->pending_ordered,
1735 struct btrfs_ordered_extent,
1736 trans_list);
1737 list_del_init(&ordered->trans_list);
1738 spin_unlock(&fs_info->trans_lock);
1739
1740 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
1741 &ordered->flags));
1742 btrfs_put_ordered_extent(ordered);
1743 spin_lock(&fs_info->trans_lock);
1744 }
1745 spin_unlock(&fs_info->trans_lock);
1746}
1747
1655int btrfs_commit_transaction(struct btrfs_trans_handle *trans, 1748int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1656 struct btrfs_root *root) 1749 struct btrfs_root *root)
1657{ 1750{
@@ -1702,6 +1795,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1702 } 1795 }
1703 1796
1704 spin_lock(&root->fs_info->trans_lock); 1797 spin_lock(&root->fs_info->trans_lock);
1798 list_splice(&trans->ordered, &cur_trans->pending_ordered);
1705 if (cur_trans->state >= TRANS_STATE_COMMIT_START) { 1799 if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
1706 spin_unlock(&root->fs_info->trans_lock); 1800 spin_unlock(&root->fs_info->trans_lock);
1707 atomic_inc(&cur_trans->use_count); 1801 atomic_inc(&cur_trans->use_count);
@@ -1754,6 +1848,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1754 1848
1755 btrfs_wait_delalloc_flush(root->fs_info); 1849 btrfs_wait_delalloc_flush(root->fs_info);
1756 1850
1851 btrfs_wait_pending_ordered(cur_trans, root->fs_info);
1852
1757 btrfs_scrub_pause(root); 1853 btrfs_scrub_pause(root);
1758 /* 1854 /*
1759 * Ok now we need to make sure to block out any other joins while we 1855 * Ok now we need to make sure to block out any other joins while we
@@ -1842,13 +1938,10 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
1842 } 1938 }
1843 1939
1844 /* 1940 /*
1845 * Since the transaction is done, we should set the inode map cache flag 1941 * Since the transaction is done, we can apply the pending changes
1846 * before any other comming transaction. 1942 * before the next transaction.
1847 */ 1943 */
1848 if (btrfs_test_opt(root, CHANGE_INODE_CACHE)) 1944 btrfs_apply_pending_changes(root->fs_info);
1849 btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
1850 else
1851 btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
1852 1945
1853 /* commit_fs_roots gets rid of all the tree log roots, it is now 1946 /* commit_fs_roots gets rid of all the tree log roots, it is now
1854 * safe to free the root of tree log roots 1947 * safe to free the root of tree log roots
@@ -2019,3 +2112,32 @@ int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
2019 2112
2020 return (ret < 0) ? 0 : 1; 2113 return (ret < 0) ? 0 : 1;
2021} 2114}
2115
2116void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info)
2117{
2118 unsigned long prev;
2119 unsigned long bit;
2120
2121 prev = cmpxchg(&fs_info->pending_changes, 0, 0);
2122 if (!prev)
2123 return;
2124
2125 bit = 1 << BTRFS_PENDING_SET_INODE_MAP_CACHE;
2126 if (prev & bit)
2127 btrfs_set_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2128 prev &= ~bit;
2129
2130 bit = 1 << BTRFS_PENDING_CLEAR_INODE_MAP_CACHE;
2131 if (prev & bit)
2132 btrfs_clear_opt(fs_info->mount_opt, INODE_MAP_CACHE);
2133 prev &= ~bit;
2134
2135 bit = 1 << BTRFS_PENDING_COMMIT;
2136 if (prev & bit)
2137 btrfs_debug(fs_info, "pending commit done");
2138 prev &= ~bit;
2139
2140 if (prev)
2141 btrfs_warn(fs_info,
2142 "unknown pending changes left 0x%lx, ignoring", prev);
2143}
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h
index d8f40e1a5d2d..00ed29c4b3f9 100644
--- a/fs/btrfs/transaction.h
+++ b/fs/btrfs/transaction.h
@@ -56,6 +56,7 @@ struct btrfs_transaction {
56 wait_queue_head_t commit_wait; 56 wait_queue_head_t commit_wait;
57 struct list_head pending_snapshots; 57 struct list_head pending_snapshots;
58 struct list_head pending_chunks; 58 struct list_head pending_chunks;
59 struct list_head pending_ordered;
59 struct list_head switch_commits; 60 struct list_head switch_commits;
60 struct btrfs_delayed_ref_root delayed_refs; 61 struct btrfs_delayed_ref_root delayed_refs;
61 int aborted; 62 int aborted;
@@ -105,6 +106,7 @@ struct btrfs_trans_handle {
105 */ 106 */
106 struct btrfs_root *root; 107 struct btrfs_root *root;
107 struct seq_list delayed_ref_elem; 108 struct seq_list delayed_ref_elem;
109 struct list_head ordered;
108 struct list_head qgroup_ref_list; 110 struct list_head qgroup_ref_list;
109 struct list_head new_bgs; 111 struct list_head new_bgs;
110}; 112};
@@ -145,8 +147,6 @@ struct btrfs_trans_handle *btrfs_attach_transaction_barrier(
145 struct btrfs_root *root); 147 struct btrfs_root *root);
146struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root); 148struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root);
147int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); 149int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid);
148int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
149 struct btrfs_root *root);
150 150
151void btrfs_add_dead_root(struct btrfs_root *root); 151void btrfs_add_dead_root(struct btrfs_root *root);
152int btrfs_defrag_root(struct btrfs_root *root); 152int btrfs_defrag_root(struct btrfs_root *root);
@@ -170,4 +170,6 @@ int btrfs_wait_marked_extents(struct btrfs_root *root,
170int btrfs_transaction_blocked(struct btrfs_fs_info *info); 170int btrfs_transaction_blocked(struct btrfs_fs_info *info);
171int btrfs_transaction_in_commit(struct btrfs_fs_info *info); 171int btrfs_transaction_in_commit(struct btrfs_fs_info *info);
172void btrfs_put_transaction(struct btrfs_transaction *transaction); 172void btrfs_put_transaction(struct btrfs_transaction *transaction);
173void btrfs_apply_pending_changes(struct btrfs_fs_info *fs_info);
174
173#endif 175#endif
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 286213cec861..9a02da16f2be 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -2599,12 +2599,14 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2599 index2 = root_log_ctx.log_transid % 2; 2599 index2 = root_log_ctx.log_transid % 2;
2600 if (atomic_read(&log_root_tree->log_commit[index2])) { 2600 if (atomic_read(&log_root_tree->log_commit[index2])) {
2601 blk_finish_plug(&plug); 2601 blk_finish_plug(&plug);
2602 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2602 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages,
2603 mark);
2604 btrfs_wait_logged_extents(trans, log, log_transid);
2603 wait_log_commit(trans, log_root_tree, 2605 wait_log_commit(trans, log_root_tree,
2604 root_log_ctx.log_transid); 2606 root_log_ctx.log_transid);
2605 btrfs_free_logged_extents(log, log_transid);
2606 mutex_unlock(&log_root_tree->log_mutex); 2607 mutex_unlock(&log_root_tree->log_mutex);
2607 ret = root_log_ctx.log_ret; 2608 if (!ret)
2609 ret = root_log_ctx.log_ret;
2608 goto out; 2610 goto out;
2609 } 2611 }
2610 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid); 2612 ASSERT(root_log_ctx.log_transid == log_root_tree->log_transid);
@@ -2641,11 +2643,18 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans,
2641 mutex_unlock(&log_root_tree->log_mutex); 2643 mutex_unlock(&log_root_tree->log_mutex);
2642 goto out_wake_log_root; 2644 goto out_wake_log_root;
2643 } 2645 }
2644 btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark); 2646 ret = btrfs_wait_marked_extents(log, &log->dirty_log_pages, mark);
2645 btrfs_wait_marked_extents(log_root_tree, 2647 if (!ret)
2646 &log_root_tree->dirty_log_pages, 2648 ret = btrfs_wait_marked_extents(log_root_tree,
2647 EXTENT_NEW | EXTENT_DIRTY); 2649 &log_root_tree->dirty_log_pages,
2648 btrfs_wait_logged_extents(log, log_transid); 2650 EXTENT_NEW | EXTENT_DIRTY);
2651 if (ret) {
2652 btrfs_set_log_full_commit(root->fs_info, trans);
2653 btrfs_free_logged_extents(log, log_transid);
2654 mutex_unlock(&log_root_tree->log_mutex);
2655 goto out_wake_log_root;
2656 }
2657 btrfs_wait_logged_extents(trans, log, log_transid);
2649 2658
2650 btrfs_set_super_log_root(root->fs_info->super_for_commit, 2659 btrfs_set_super_log_root(root->fs_info->super_for_commit,
2651 log_root_tree->node->start); 2660 log_root_tree->node->start);
@@ -3626,6 +3635,12 @@ static int wait_ordered_extents(struct btrfs_trans_handle *trans,
3626 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))); 3635 test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)));
3627 3636
3628 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) { 3637 if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags)) {
3638 /*
3639 * Clear the AS_EIO/AS_ENOSPC flags from the inode's
3640 * i_mapping flags, so that the next fsync won't get
3641 * an outdated io error too.
3642 */
3643 btrfs_inode_check_errors(inode);
3629 *ordered_io_error = true; 3644 *ordered_io_error = true;
3630 break; 3645 break;
3631 } 3646 }
@@ -3766,7 +3781,7 @@ static int log_one_extent(struct btrfs_trans_handle *trans,
3766 fi = btrfs_item_ptr(leaf, path->slots[0], 3781 fi = btrfs_item_ptr(leaf, path->slots[0],
3767 struct btrfs_file_extent_item); 3782 struct btrfs_file_extent_item);
3768 3783
3769 btrfs_set_token_file_extent_generation(leaf, fi, em->generation, 3784 btrfs_set_token_file_extent_generation(leaf, fi, trans->transid,
3770 &token); 3785 &token);
3771 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) 3786 if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags))
3772 btrfs_set_token_file_extent_type(leaf, fi, 3787 btrfs_set_token_file_extent_type(leaf, fi,
@@ -3963,7 +3978,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans,
3963 3978
3964 mutex_lock(&BTRFS_I(inode)->log_mutex); 3979 mutex_lock(&BTRFS_I(inode)->log_mutex);
3965 3980
3966 btrfs_get_logged_extents(inode, &logged_list); 3981 btrfs_get_logged_extents(inode, &logged_list, start, end);
3967 3982
3968 /* 3983 /*
3969 * a brute force approach to making sure we get the most uptodate 3984 * a brute force approach to making sure we get the most uptodate
@@ -4089,6 +4104,21 @@ log_extents:
4089 btrfs_release_path(path); 4104 btrfs_release_path(path);
4090 btrfs_release_path(dst_path); 4105 btrfs_release_path(dst_path);
4091 if (fast_search) { 4106 if (fast_search) {
4107 /*
4108 * Some ordered extents started by fsync might have completed
4109 * before we collected the ordered extents in logged_list, which
4110 * means they're gone, not in our logged_list nor in the inode's
4111 * ordered tree. We want the application/user space to know an
4112 * error happened while attempting to persist file data so that
4113 * it can take proper action. If such error happened, we leave
4114 * without writing to the log tree and the fsync must report the
4115 * file data write error and not commit the current transaction.
4116 */
4117 err = btrfs_inode_check_errors(inode);
4118 if (err) {
4119 ctx->io_err = err;
4120 goto out_unlock;
4121 }
4092 ret = btrfs_log_changed_extents(trans, root, inode, dst_path, 4122 ret = btrfs_log_changed_extents(trans, root, inode, dst_path,
4093 &logged_list, ctx); 4123 &logged_list, ctx);
4094 if (ret) { 4124 if (ret) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index d47289c715c8..0144790e296e 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -53,16 +53,6 @@ static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
53DEFINE_MUTEX(uuid_mutex); 53DEFINE_MUTEX(uuid_mutex);
54static LIST_HEAD(fs_uuids); 54static LIST_HEAD(fs_uuids);
55 55
56static void lock_chunks(struct btrfs_root *root)
57{
58 mutex_lock(&root->fs_info->chunk_mutex);
59}
60
61static void unlock_chunks(struct btrfs_root *root)
62{
63 mutex_unlock(&root->fs_info->chunk_mutex);
64}
65
66static struct btrfs_fs_devices *__alloc_fs_devices(void) 56static struct btrfs_fs_devices *__alloc_fs_devices(void)
67{ 57{
68 struct btrfs_fs_devices *fs_devs; 58 struct btrfs_fs_devices *fs_devs;
@@ -1068,9 +1058,11 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans,
1068 u64 *start, u64 len) 1058 u64 *start, u64 len)
1069{ 1059{
1070 struct extent_map *em; 1060 struct extent_map *em;
1061 struct list_head *search_list = &trans->transaction->pending_chunks;
1071 int ret = 0; 1062 int ret = 0;
1072 1063
1073 list_for_each_entry(em, &trans->transaction->pending_chunks, list) { 1064again:
1065 list_for_each_entry(em, search_list, list) {
1074 struct map_lookup *map; 1066 struct map_lookup *map;
1075 int i; 1067 int i;
1076 1068
@@ -1087,6 +1079,10 @@ static int contains_pending_extent(struct btrfs_trans_handle *trans,
1087 ret = 1; 1079 ret = 1;
1088 } 1080 }
1089 } 1081 }
1082 if (search_list == &trans->transaction->pending_chunks) {
1083 search_list = &trans->root->fs_info->pinned_chunks;
1084 goto again;
1085 }
1090 1086
1091 return ret; 1087 return ret;
1092} 1088}
@@ -1800,8 +1796,8 @@ error_undo:
1800 goto error_brelse; 1796 goto error_brelse;
1801} 1797}
1802 1798
1803void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info, 1799void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
1804 struct btrfs_device *srcdev) 1800 struct btrfs_device *srcdev)
1805{ 1801{
1806 struct btrfs_fs_devices *fs_devices; 1802 struct btrfs_fs_devices *fs_devices;
1807 1803
@@ -1829,6 +1825,12 @@ void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info,
1829 1825
1830 if (srcdev->bdev) 1826 if (srcdev->bdev)
1831 fs_devices->open_devices--; 1827 fs_devices->open_devices--;
1828}
1829
1830void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
1831 struct btrfs_device *srcdev)
1832{
1833 struct btrfs_fs_devices *fs_devices = srcdev->fs_devices;
1832 1834
1833 call_rcu(&srcdev->rcu, free_device); 1835 call_rcu(&srcdev->rcu, free_device);
1834 1836
@@ -2647,18 +2649,12 @@ int btrfs_remove_chunk(struct btrfs_trans_handle *trans,
2647 } 2649 }
2648 } 2650 }
2649 2651
2650 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset); 2652 ret = btrfs_remove_block_group(trans, extent_root, chunk_offset, em);
2651 if (ret) { 2653 if (ret) {
2652 btrfs_abort_transaction(trans, extent_root, ret); 2654 btrfs_abort_transaction(trans, extent_root, ret);
2653 goto out; 2655 goto out;
2654 } 2656 }
2655 2657
2656 write_lock(&em_tree->lock);
2657 remove_extent_mapping(em_tree, em);
2658 write_unlock(&em_tree->lock);
2659
2660 /* once for the tree */
2661 free_extent_map(em);
2662out: 2658out:
2663 /* once for us */ 2659 /* once for us */
2664 free_extent_map(em); 2660 free_extent_map(em);
@@ -4505,6 +4501,8 @@ error_del_extent:
4505 free_extent_map(em); 4501 free_extent_map(em);
4506 /* One for the tree reference */ 4502 /* One for the tree reference */
4507 free_extent_map(em); 4503 free_extent_map(em);
4504 /* One for the pending_chunks list reference */
4505 free_extent_map(em);
4508error: 4506error:
4509 kfree(devices_info); 4507 kfree(devices_info);
4510 return ret; 4508 return ret;
@@ -4881,13 +4879,15 @@ static inline int parity_smaller(u64 a, u64 b)
4881static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map) 4879static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4882{ 4880{
4883 struct btrfs_bio_stripe s; 4881 struct btrfs_bio_stripe s;
4882 int real_stripes = bbio->num_stripes - bbio->num_tgtdevs;
4884 int i; 4883 int i;
4885 u64 l; 4884 u64 l;
4886 int again = 1; 4885 int again = 1;
4886 int m;
4887 4887
4888 while (again) { 4888 while (again) {
4889 again = 0; 4889 again = 0;
4890 for (i = 0; i < bbio->num_stripes - 1; i++) { 4890 for (i = 0; i < real_stripes - 1; i++) {
4891 if (parity_smaller(raid_map[i], raid_map[i+1])) { 4891 if (parity_smaller(raid_map[i], raid_map[i+1])) {
4892 s = bbio->stripes[i]; 4892 s = bbio->stripes[i];
4893 l = raid_map[i]; 4893 l = raid_map[i];
@@ -4895,6 +4895,14 @@ static void sort_parity_stripes(struct btrfs_bio *bbio, u64 *raid_map)
4895 raid_map[i] = raid_map[i+1]; 4895 raid_map[i] = raid_map[i+1];
4896 bbio->stripes[i+1] = s; 4896 bbio->stripes[i+1] = s;
4897 raid_map[i+1] = l; 4897 raid_map[i+1] = l;
4898
4899 if (bbio->tgtdev_map) {
4900 m = bbio->tgtdev_map[i];
4901 bbio->tgtdev_map[i] =
4902 bbio->tgtdev_map[i + 1];
4903 bbio->tgtdev_map[i + 1] = m;
4904 }
4905
4898 again = 1; 4906 again = 1;
4899 } 4907 }
4900 } 4908 }
@@ -4923,6 +4931,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
4923 int ret = 0; 4931 int ret = 0;
4924 int num_stripes; 4932 int num_stripes;
4925 int max_errors = 0; 4933 int max_errors = 0;
4934 int tgtdev_indexes = 0;
4926 struct btrfs_bio *bbio = NULL; 4935 struct btrfs_bio *bbio = NULL;
4927 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace; 4936 struct btrfs_dev_replace *dev_replace = &fs_info->dev_replace;
4928 int dev_replace_is_ongoing = 0; 4937 int dev_replace_is_ongoing = 0;
@@ -5161,15 +5170,14 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5161 BTRFS_BLOCK_GROUP_RAID6)) { 5170 BTRFS_BLOCK_GROUP_RAID6)) {
5162 u64 tmp; 5171 u64 tmp;
5163 5172
5164 if (bbio_ret && ((rw & REQ_WRITE) || mirror_num > 1) 5173 if (raid_map_ret &&
5165 && raid_map_ret) { 5174 ((rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) ||
5175 mirror_num > 1)) {
5166 int i, rot; 5176 int i, rot;
5167 5177
5168 /* push stripe_nr back to the start of the full stripe */ 5178 /* push stripe_nr back to the start of the full stripe */
5169 stripe_nr = raid56_full_stripe_start; 5179 stripe_nr = raid56_full_stripe_start;
5170 do_div(stripe_nr, stripe_len); 5180 do_div(stripe_nr, stripe_len * nr_data_stripes(map));
5171
5172 stripe_index = do_div(stripe_nr, nr_data_stripes(map));
5173 5181
5174 /* RAID[56] write or recovery. Return all stripes */ 5182 /* RAID[56] write or recovery. Return all stripes */
5175 num_stripes = map->num_stripes; 5183 num_stripes = map->num_stripes;
@@ -5235,14 +5243,19 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5235 num_alloc_stripes <<= 1; 5243 num_alloc_stripes <<= 1;
5236 if (rw & REQ_GET_READ_MIRRORS) 5244 if (rw & REQ_GET_READ_MIRRORS)
5237 num_alloc_stripes++; 5245 num_alloc_stripes++;
5246 tgtdev_indexes = num_stripes;
5238 } 5247 }
5239 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes), GFP_NOFS); 5248
5249 bbio = kzalloc(btrfs_bio_size(num_alloc_stripes, tgtdev_indexes),
5250 GFP_NOFS);
5240 if (!bbio) { 5251 if (!bbio) {
5241 kfree(raid_map); 5252 kfree(raid_map);
5242 ret = -ENOMEM; 5253 ret = -ENOMEM;
5243 goto out; 5254 goto out;
5244 } 5255 }
5245 atomic_set(&bbio->error, 0); 5256 atomic_set(&bbio->error, 0);
5257 if (dev_replace_is_ongoing)
5258 bbio->tgtdev_map = (int *)(bbio->stripes + num_alloc_stripes);
5246 5259
5247 if (rw & REQ_DISCARD) { 5260 if (rw & REQ_DISCARD) {
5248 int factor = 0; 5261 int factor = 0;
@@ -5327,6 +5340,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5327 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS)) 5340 if (rw & (REQ_WRITE | REQ_GET_READ_MIRRORS))
5328 max_errors = btrfs_chunk_max_errors(map); 5341 max_errors = btrfs_chunk_max_errors(map);
5329 5342
5343 tgtdev_indexes = 0;
5330 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) && 5344 if (dev_replace_is_ongoing && (rw & (REQ_WRITE | REQ_DISCARD)) &&
5331 dev_replace->tgtdev != NULL) { 5345 dev_replace->tgtdev != NULL) {
5332 int index_where_to_add; 5346 int index_where_to_add;
@@ -5355,8 +5369,10 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5355 new->physical = old->physical; 5369 new->physical = old->physical;
5356 new->length = old->length; 5370 new->length = old->length;
5357 new->dev = dev_replace->tgtdev; 5371 new->dev = dev_replace->tgtdev;
5372 bbio->tgtdev_map[i] = index_where_to_add;
5358 index_where_to_add++; 5373 index_where_to_add++;
5359 max_errors++; 5374 max_errors++;
5375 tgtdev_indexes++;
5360 } 5376 }
5361 } 5377 }
5362 num_stripes = index_where_to_add; 5378 num_stripes = index_where_to_add;
@@ -5402,7 +5418,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5402 tgtdev_stripe->length = 5418 tgtdev_stripe->length =
5403 bbio->stripes[index_srcdev].length; 5419 bbio->stripes[index_srcdev].length;
5404 tgtdev_stripe->dev = dev_replace->tgtdev; 5420 tgtdev_stripe->dev = dev_replace->tgtdev;
5421 bbio->tgtdev_map[index_srcdev] = num_stripes;
5405 5422
5423 tgtdev_indexes++;
5406 num_stripes++; 5424 num_stripes++;
5407 } 5425 }
5408 } 5426 }
@@ -5412,6 +5430,7 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5412 bbio->num_stripes = num_stripes; 5430 bbio->num_stripes = num_stripes;
5413 bbio->max_errors = max_errors; 5431 bbio->max_errors = max_errors;
5414 bbio->mirror_num = mirror_num; 5432 bbio->mirror_num = mirror_num;
5433 bbio->num_tgtdevs = tgtdev_indexes;
5415 5434
5416 /* 5435 /*
5417 * this is the case that REQ_READ && dev_replace_is_ongoing && 5436 * this is the case that REQ_READ && dev_replace_is_ongoing &&
@@ -5443,6 +5462,16 @@ int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
5443 mirror_num, NULL); 5462 mirror_num, NULL);
5444} 5463}
5445 5464
5465/* For Scrub/replace */
5466int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
5467 u64 logical, u64 *length,
5468 struct btrfs_bio **bbio_ret, int mirror_num,
5469 u64 **raid_map_ret)
5470{
5471 return __btrfs_map_block(fs_info, rw, logical, length, bbio_ret,
5472 mirror_num, raid_map_ret);
5473}
5474
5446int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 5475int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
5447 u64 chunk_start, u64 physical, u64 devid, 5476 u64 chunk_start, u64 physical, u64 devid,
5448 u64 **logical, int *naddrs, int *stripe_len) 5477 u64 **logical, int *naddrs, int *stripe_len)
@@ -5812,12 +5841,9 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5812 } else { 5841 } else {
5813 ret = raid56_parity_recover(root, bio, bbio, 5842 ret = raid56_parity_recover(root, bio, bbio,
5814 raid_map, map_length, 5843 raid_map, map_length,
5815 mirror_num); 5844 mirror_num, 1);
5816 } 5845 }
5817 /* 5846
5818 * FIXME, replace dosen't support raid56 yet, please fix
5819 * it in the future.
5820 */
5821 btrfs_bio_counter_dec(root->fs_info); 5847 btrfs_bio_counter_dec(root->fs_info);
5822 return ret; 5848 return ret;
5823 } 5849 }
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 08980fa23039..d6fe73c0f4a2 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -292,7 +292,7 @@ struct btrfs_bio_stripe {
292struct btrfs_bio; 292struct btrfs_bio;
293typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err); 293typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
294 294
295#define BTRFS_BIO_ORIG_BIO_SUBMITTED 0x1 295#define BTRFS_BIO_ORIG_BIO_SUBMITTED (1 << 0)
296 296
297struct btrfs_bio { 297struct btrfs_bio {
298 atomic_t stripes_pending; 298 atomic_t stripes_pending;
@@ -305,6 +305,8 @@ struct btrfs_bio {
305 int max_errors; 305 int max_errors;
306 int num_stripes; 306 int num_stripes;
307 int mirror_num; 307 int mirror_num;
308 int num_tgtdevs;
309 int *tgtdev_map;
308 struct btrfs_bio_stripe stripes[]; 310 struct btrfs_bio_stripe stripes[];
309}; 311};
310 312
@@ -387,12 +389,18 @@ struct btrfs_balance_control {
387int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, 389int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start,
388 u64 end, u64 *length); 390 u64 end, u64 *length);
389 391
390#define btrfs_bio_size(n) (sizeof(struct btrfs_bio) + \ 392#define btrfs_bio_size(total_stripes, real_stripes) \
391 (sizeof(struct btrfs_bio_stripe) * (n))) 393 (sizeof(struct btrfs_bio) + \
394 (sizeof(struct btrfs_bio_stripe) * (total_stripes)) + \
395 (sizeof(int) * (real_stripes)))
392 396
393int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw, 397int btrfs_map_block(struct btrfs_fs_info *fs_info, int rw,
394 u64 logical, u64 *length, 398 u64 logical, u64 *length,
395 struct btrfs_bio **bbio_ret, int mirror_num); 399 struct btrfs_bio **bbio_ret, int mirror_num);
400int btrfs_map_sblock(struct btrfs_fs_info *fs_info, int rw,
401 u64 logical, u64 *length,
402 struct btrfs_bio **bbio_ret, int mirror_num,
403 u64 **raid_map_ret);
396int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree, 404int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
397 u64 chunk_start, u64 physical, u64 devid, 405 u64 chunk_start, u64 physical, u64 devid,
398 u64 **logical, int *naddrs, int *stripe_len); 406 u64 **logical, int *naddrs, int *stripe_len);
@@ -448,8 +456,10 @@ void btrfs_init_devices_late(struct btrfs_fs_info *fs_info);
448int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info); 456int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
449int btrfs_run_dev_stats(struct btrfs_trans_handle *trans, 457int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
450 struct btrfs_fs_info *fs_info); 458 struct btrfs_fs_info *fs_info);
451void btrfs_rm_dev_replace_srcdev(struct btrfs_fs_info *fs_info, 459void btrfs_rm_dev_replace_remove_srcdev(struct btrfs_fs_info *fs_info,
452 struct btrfs_device *srcdev); 460 struct btrfs_device *srcdev);
461void btrfs_rm_dev_replace_free_srcdev(struct btrfs_fs_info *fs_info,
462 struct btrfs_device *srcdev);
453void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info, 463void btrfs_destroy_dev_replace_tgtdev(struct btrfs_fs_info *fs_info,
454 struct btrfs_device *tgtdev); 464 struct btrfs_device *tgtdev);
455void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info, 465void btrfs_init_dev_replace_tgtdev_for_resume(struct btrfs_fs_info *fs_info,
@@ -513,4 +523,16 @@ static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
513void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info); 523void btrfs_update_commit_device_size(struct btrfs_fs_info *fs_info);
514void btrfs_update_commit_device_bytes_used(struct btrfs_root *root, 524void btrfs_update_commit_device_bytes_used(struct btrfs_root *root,
515 struct btrfs_transaction *transaction); 525 struct btrfs_transaction *transaction);
526
527static inline void lock_chunks(struct btrfs_root *root)
528{
529 mutex_lock(&root->fs_info->chunk_mutex);
530}
531
532static inline void unlock_chunks(struct btrfs_root *root)
533{
534 mutex_unlock(&root->fs_info->chunk_mutex);
535}
536
537
516#endif 538#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index dcf20131fbe4..47b19465f0dc 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -29,6 +29,7 @@
29#include "xattr.h" 29#include "xattr.h"
30#include "disk-io.h" 30#include "disk-io.h"
31#include "props.h" 31#include "props.h"
32#include "locking.h"
32 33
33 34
34ssize_t __btrfs_getxattr(struct inode *inode, const char *name, 35ssize_t __btrfs_getxattr(struct inode *inode, const char *name,
@@ -91,7 +92,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
91 struct inode *inode, const char *name, 92 struct inode *inode, const char *name,
92 const void *value, size_t size, int flags) 93 const void *value, size_t size, int flags)
93{ 94{
94 struct btrfs_dir_item *di; 95 struct btrfs_dir_item *di = NULL;
95 struct btrfs_root *root = BTRFS_I(inode)->root; 96 struct btrfs_root *root = BTRFS_I(inode)->root;
96 struct btrfs_path *path; 97 struct btrfs_path *path;
97 size_t name_len = strlen(name); 98 size_t name_len = strlen(name);
@@ -103,84 +104,119 @@ static int do_setxattr(struct btrfs_trans_handle *trans,
103 path = btrfs_alloc_path(); 104 path = btrfs_alloc_path();
104 if (!path) 105 if (!path)
105 return -ENOMEM; 106 return -ENOMEM;
107 path->skip_release_on_error = 1;
108
109 if (!value) {
110 di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode),
111 name, name_len, -1);
112 if (!di && (flags & XATTR_REPLACE))
113 ret = -ENODATA;
114 else if (di)
115 ret = btrfs_delete_one_dir_name(trans, root, path, di);
116 goto out;
117 }
106 118
119 /*
120 * For a replace we can't just do the insert blindly.
121 * Do a lookup first (read-only btrfs_search_slot), and return if xattr
122 * doesn't exist. If it exists, fall down below to the insert/replace
123 * path - we can't race with a concurrent xattr delete, because the VFS
124 * locks the inode's i_mutex before calling setxattr or removexattr.
125 */
107 if (flags & XATTR_REPLACE) { 126 if (flags & XATTR_REPLACE) {
108 di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, 127 ASSERT(mutex_is_locked(&inode->i_mutex));
109 name_len, -1); 128 di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode),
110 if (IS_ERR(di)) { 129 name, name_len, 0);
111 ret = PTR_ERR(di); 130 if (!di) {
112 goto out;
113 } else if (!di) {
114 ret = -ENODATA; 131 ret = -ENODATA;
115 goto out; 132 goto out;
116 } 133 }
117 ret = btrfs_delete_one_dir_name(trans, root, path, di);
118 if (ret)
119 goto out;
120 btrfs_release_path(path); 134 btrfs_release_path(path);
135 di = NULL;
136 }
121 137
138 ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
139 name, name_len, value, size);
140 if (ret == -EOVERFLOW) {
122 /* 141 /*
123 * remove the attribute 142 * We have an existing item in a leaf, split_leaf couldn't
143 * expand it. That item might have or not a dir_item that
144 * matches our target xattr, so lets check.
124 */ 145 */
125 if (!value) 146 ret = 0;
126 goto out; 147 btrfs_assert_tree_locked(path->nodes[0]);
127 } else { 148 di = btrfs_match_dir_item_name(root, path, name, name_len);
128 di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), 149 if (!di && !(flags & XATTR_REPLACE)) {
129 name, name_len, 0); 150 ret = -ENOSPC;
130 if (IS_ERR(di)) {
131 ret = PTR_ERR(di);
132 goto out; 151 goto out;
133 } 152 }
134 if (!di && !value) 153 } else if (ret == -EEXIST) {
135 goto out; 154 ret = 0;
136 btrfs_release_path(path); 155 di = btrfs_match_dir_item_name(root, path, name, name_len);
156 ASSERT(di); /* logic error */
157 } else if (ret) {
158 goto out;
137 } 159 }
138 160
139again: 161 if (di && (flags & XATTR_CREATE)) {
140 ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode),
141 name, name_len, value, size);
142 /*
143 * If we're setting an xattr to a new value but the new value is say
144 * exactly BTRFS_MAX_XATTR_SIZE, we could end up with EOVERFLOW getting
145 * back from split_leaf. This is because it thinks we'll be extending
146 * the existing item size, but we're asking for enough space to add the
147 * item itself. So if we get EOVERFLOW just set ret to EEXIST and let
148 * the rest of the function figure it out.
149 */
150 if (ret == -EOVERFLOW)
151 ret = -EEXIST; 162 ret = -EEXIST;
163 goto out;
164 }
152 165
153 if (ret == -EEXIST) { 166 if (di) {
154 if (flags & XATTR_CREATE)
155 goto out;
156 /* 167 /*
157 * We can't use the path we already have since we won't have the 168 * We're doing a replace, and it must be atomic, that is, at
158 * proper locking for a delete, so release the path and 169 * any point in time we have either the old or the new xattr
159 * re-lookup to delete the thing. 170 * value in the tree. We don't want readers (getxattr and
171 * listxattrs) to miss a value, this is specially important
172 * for ACLs.
160 */ 173 */
161 btrfs_release_path(path); 174 const int slot = path->slots[0];
162 di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), 175 struct extent_buffer *leaf = path->nodes[0];
163 name, name_len, -1); 176 const u16 old_data_len = btrfs_dir_data_len(leaf, di);
164 if (IS_ERR(di)) { 177 const u32 item_size = btrfs_item_size_nr(leaf, slot);
165 ret = PTR_ERR(di); 178 const u32 data_size = sizeof(*di) + name_len + size;
166 goto out; 179 struct btrfs_item *item;
167 } else if (!di) { 180 unsigned long data_ptr;
168 /* Shouldn't happen but just in case... */ 181 char *ptr;
169 btrfs_release_path(path); 182
170 goto again; 183 if (size > old_data_len) {
184 if (btrfs_leaf_free_space(root, leaf) <
185 (size - old_data_len)) {
186 ret = -ENOSPC;
187 goto out;
188 }
171 } 189 }
172 190
173 ret = btrfs_delete_one_dir_name(trans, root, path, di); 191 if (old_data_len + name_len + sizeof(*di) == item_size) {
174 if (ret) 192 /* No other xattrs packed in the same leaf item. */
175 goto out; 193 if (size > old_data_len)
194 btrfs_extend_item(root, path,
195 size - old_data_len);
196 else if (size < old_data_len)
197 btrfs_truncate_item(root, path, data_size, 1);
198 } else {
199 /* There are other xattrs packed in the same item. */
200 ret = btrfs_delete_one_dir_name(trans, root, path, di);
201 if (ret)
202 goto out;
203 btrfs_extend_item(root, path, data_size);
204 }
176 205
206 item = btrfs_item_nr(slot);
207 ptr = btrfs_item_ptr(leaf, slot, char);
208 ptr += btrfs_item_size(leaf, item) - data_size;
209 di = (struct btrfs_dir_item *)ptr;
210 btrfs_set_dir_data_len(leaf, di, size);
211 data_ptr = ((unsigned long)(di + 1)) + name_len;
212 write_extent_buffer(leaf, value, data_ptr, size);
213 btrfs_mark_buffer_dirty(leaf);
214 } else {
177 /* 215 /*
178 * We have a value to set, so go back and try to insert it now. 216 * Insert, and we had space for the xattr, so path->slots[0] is
217 * where our xattr dir_item is and btrfs_insert_xattr_item()
218 * filled it.
179 */ 219 */
180 if (value) {
181 btrfs_release_path(path);
182 goto again;
183 }
184 } 220 }
185out: 221out:
186 btrfs_free_path(path); 222 btrfs_free_path(path);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index db3f772e57ae..a75fba67bb1f 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -158,17 +158,8 @@ struct ext4_allocation_request {
158#define EXT4_MAP_MAPPED (1 << BH_Mapped) 158#define EXT4_MAP_MAPPED (1 << BH_Mapped)
159#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten) 159#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
160#define EXT4_MAP_BOUNDARY (1 << BH_Boundary) 160#define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
161/* Sometimes (in the bigalloc case, from ext4_da_get_block_prep) the caller of
162 * ext4_map_blocks wants to know whether or not the underlying cluster has
163 * already been accounted for. EXT4_MAP_FROM_CLUSTER conveys to the caller that
164 * the requested mapping was from previously mapped (or delayed allocated)
165 * cluster. We use BH_AllocFromCluster only for this flag. BH_AllocFromCluster
166 * should never appear on buffer_head's state flags.
167 */
168#define EXT4_MAP_FROM_CLUSTER (1 << BH_AllocFromCluster)
169#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\ 161#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
170 EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\ 162 EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY)
171 EXT4_MAP_FROM_CLUSTER)
172 163
173struct ext4_map_blocks { 164struct ext4_map_blocks {
174 ext4_fsblk_t m_pblk; 165 ext4_fsblk_t m_pblk;
@@ -565,10 +556,8 @@ enum {
565#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080 556#define EXT4_GET_BLOCKS_KEEP_SIZE 0x0080
566 /* Do not take i_data_sem locking in ext4_map_blocks */ 557 /* Do not take i_data_sem locking in ext4_map_blocks */
567#define EXT4_GET_BLOCKS_NO_LOCK 0x0100 558#define EXT4_GET_BLOCKS_NO_LOCK 0x0100
568 /* Do not put hole in extent cache */
569#define EXT4_GET_BLOCKS_NO_PUT_HOLE 0x0200
570 /* Convert written extents to unwritten */ 559 /* Convert written extents to unwritten */
571#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0400 560#define EXT4_GET_BLOCKS_CONVERT_UNWRITTEN 0x0200
572 561
573/* 562/*
574 * The bit position of these flags must not overlap with any of the 563 * The bit position of these flags must not overlap with any of the
@@ -889,10 +878,12 @@ struct ext4_inode_info {
889 /* extents status tree */ 878 /* extents status tree */
890 struct ext4_es_tree i_es_tree; 879 struct ext4_es_tree i_es_tree;
891 rwlock_t i_es_lock; 880 rwlock_t i_es_lock;
892 struct list_head i_es_lru; 881 struct list_head i_es_list;
893 unsigned int i_es_all_nr; /* protected by i_es_lock */ 882 unsigned int i_es_all_nr; /* protected by i_es_lock */
894 unsigned int i_es_lru_nr; /* protected by i_es_lock */ 883 unsigned int i_es_shk_nr; /* protected by i_es_lock */
895 unsigned long i_touch_when; /* jiffies of last accessing */ 884 ext4_lblk_t i_es_shrink_lblk; /* Offset where we start searching for
885 extents to shrink. Protected by
886 i_es_lock */
896 887
897 /* ialloc */ 888 /* ialloc */
898 ext4_group_t i_last_alloc_group; 889 ext4_group_t i_last_alloc_group;
@@ -1337,10 +1328,11 @@ struct ext4_sb_info {
1337 1328
1338 /* Reclaim extents from extent status tree */ 1329 /* Reclaim extents from extent status tree */
1339 struct shrinker s_es_shrinker; 1330 struct shrinker s_es_shrinker;
1340 struct list_head s_es_lru; 1331 struct list_head s_es_list; /* List of inodes with reclaimable extents */
1332 long s_es_nr_inode;
1341 struct ext4_es_stats s_es_stats; 1333 struct ext4_es_stats s_es_stats;
1342 struct mb_cache *s_mb_cache; 1334 struct mb_cache *s_mb_cache;
1343 spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; 1335 spinlock_t s_es_lock ____cacheline_aligned_in_smp;
1344 1336
1345 /* Ratelimit ext4 messages. */ 1337 /* Ratelimit ext4 messages. */
1346 struct ratelimit_state s_err_ratelimit_state; 1338 struct ratelimit_state s_err_ratelimit_state;
@@ -2196,7 +2188,6 @@ extern int ext4_calculate_overhead(struct super_block *sb);
2196extern void ext4_superblock_csum_set(struct super_block *sb); 2188extern void ext4_superblock_csum_set(struct super_block *sb);
2197extern void *ext4_kvmalloc(size_t size, gfp_t flags); 2189extern void *ext4_kvmalloc(size_t size, gfp_t flags);
2198extern void *ext4_kvzalloc(size_t size, gfp_t flags); 2190extern void *ext4_kvzalloc(size_t size, gfp_t flags);
2199extern void ext4_kvfree(void *ptr);
2200extern int ext4_alloc_flex_bg_array(struct super_block *sb, 2191extern int ext4_alloc_flex_bg_array(struct super_block *sb,
2201 ext4_group_t ngroup); 2192 ext4_group_t ngroup);
2202extern const char *ext4_decode_error(struct super_block *sb, int errno, 2193extern const char *ext4_decode_error(struct super_block *sb, int errno,
@@ -2647,7 +2638,7 @@ extern struct buffer_head *ext4_get_first_inline_block(struct inode *inode,
2647 int *retval); 2638 int *retval);
2648extern int ext4_inline_data_fiemap(struct inode *inode, 2639extern int ext4_inline_data_fiemap(struct inode *inode,
2649 struct fiemap_extent_info *fieinfo, 2640 struct fiemap_extent_info *fieinfo,
2650 int *has_inline); 2641 int *has_inline, __u64 start, __u64 len);
2651extern int ext4_try_to_evict_inline_data(handle_t *handle, 2642extern int ext4_try_to_evict_inline_data(handle_t *handle,
2652 struct inode *inode, 2643 struct inode *inode,
2653 int needed); 2644 int needed);
@@ -2795,16 +2786,6 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
2795extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t); 2786extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
2796 2787
2797/* 2788/*
2798 * Note that these flags will never ever appear in a buffer_head's state flag.
2799 * See EXT4_MAP_... to see where this is used.
2800 */
2801enum ext4_state_bits {
2802 BH_AllocFromCluster /* allocated blocks were part of already
2803 * allocated cluster. */
2804 = BH_JBDPrivateStart
2805};
2806
2807/*
2808 * Add new method to test whether block and inode bitmaps are properly 2789 * Add new method to test whether block and inode bitmaps are properly
2809 * initialized. With uninit_bg reading the block from disk is not enough 2790 * initialized. With uninit_bg reading the block from disk is not enough
2810 * to mark the bitmap uptodate. We need to also zero-out the bitmap 2791 * to mark the bitmap uptodate. We need to also zero-out the bitmap
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0b16fb4c06d3..e5d3eadf47b1 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -2306,16 +2306,16 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2306 ext4_lblk_t block) 2306 ext4_lblk_t block)
2307{ 2307{
2308 int depth = ext_depth(inode); 2308 int depth = ext_depth(inode);
2309 unsigned long len = 0; 2309 ext4_lblk_t len;
2310 ext4_lblk_t lblock = 0; 2310 ext4_lblk_t lblock;
2311 struct ext4_extent *ex; 2311 struct ext4_extent *ex;
2312 struct extent_status es;
2312 2313
2313 ex = path[depth].p_ext; 2314 ex = path[depth].p_ext;
2314 if (ex == NULL) { 2315 if (ex == NULL) {
2315 /* 2316 /* there is no extent yet, so gap is [0;-] */
2316 * there is no extent yet, so gap is [0;-] and we 2317 lblock = 0;
2317 * don't cache it 2318 len = EXT_MAX_BLOCKS;
2318 */
2319 ext_debug("cache gap(whole file):"); 2319 ext_debug("cache gap(whole file):");
2320 } else if (block < le32_to_cpu(ex->ee_block)) { 2320 } else if (block < le32_to_cpu(ex->ee_block)) {
2321 lblock = block; 2321 lblock = block;
@@ -2324,9 +2324,6 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2324 block, 2324 block,
2325 le32_to_cpu(ex->ee_block), 2325 le32_to_cpu(ex->ee_block),
2326 ext4_ext_get_actual_len(ex)); 2326 ext4_ext_get_actual_len(ex));
2327 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2328 ext4_es_insert_extent(inode, lblock, len, ~0,
2329 EXTENT_STATUS_HOLE);
2330 } else if (block >= le32_to_cpu(ex->ee_block) 2327 } else if (block >= le32_to_cpu(ex->ee_block)
2331 + ext4_ext_get_actual_len(ex)) { 2328 + ext4_ext_get_actual_len(ex)) {
2332 ext4_lblk_t next; 2329 ext4_lblk_t next;
@@ -2340,14 +2337,19 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path,
2340 block); 2337 block);
2341 BUG_ON(next == lblock); 2338 BUG_ON(next == lblock);
2342 len = next - lblock; 2339 len = next - lblock;
2343 if (!ext4_find_delalloc_range(inode, lblock, lblock + len - 1))
2344 ext4_es_insert_extent(inode, lblock, len, ~0,
2345 EXTENT_STATUS_HOLE);
2346 } else { 2340 } else {
2347 BUG(); 2341 BUG();
2348 } 2342 }
2349 2343
2350 ext_debug(" -> %u:%lu\n", lblock, len); 2344 ext4_es_find_delayed_extent_range(inode, lblock, lblock + len - 1, &es);
2345 if (es.es_len) {
2346 /* There's delayed extent containing lblock? */
2347 if (es.es_lblk <= lblock)
2348 return;
2349 len = min(es.es_lblk - lblock, len);
2350 }
2351 ext_debug(" -> %u:%u\n", lblock, len);
2352 ext4_es_insert_extent(inode, lblock, len, ~0, EXTENT_STATUS_HOLE);
2351} 2353}
2352 2354
2353/* 2355/*
@@ -2481,7 +2483,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2481 ext4_lblk_t from, ext4_lblk_t to) 2483 ext4_lblk_t from, ext4_lblk_t to)
2482{ 2484{
2483 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 2485 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2484 unsigned short ee_len = ext4_ext_get_actual_len(ex); 2486 unsigned short ee_len = ext4_ext_get_actual_len(ex);
2485 ext4_fsblk_t pblk; 2487 ext4_fsblk_t pblk;
2486 int flags = get_default_free_blocks_flags(inode); 2488 int flags = get_default_free_blocks_flags(inode);
2487 2489
@@ -2490,7 +2492,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2490 * at the beginning of the extent. Instead, we make a note 2492 * at the beginning of the extent. Instead, we make a note
2491 * that we tried freeing the cluster, and check to see if we 2493 * that we tried freeing the cluster, and check to see if we
2492 * need to free it on a subsequent call to ext4_remove_blocks, 2494 * need to free it on a subsequent call to ext4_remove_blocks,
2493 * or at the end of the ext4_truncate() operation. 2495 * or at the end of ext4_ext_rm_leaf or ext4_ext_remove_space.
2494 */ 2496 */
2495 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER; 2497 flags |= EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER;
2496 2498
@@ -2501,8 +2503,8 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2501 * partial cluster here. 2503 * partial cluster here.
2502 */ 2504 */
2503 pblk = ext4_ext_pblock(ex) + ee_len - 1; 2505 pblk = ext4_ext_pblock(ex) + ee_len - 1;
2504 if ((*partial_cluster > 0) && 2506 if (*partial_cluster > 0 &&
2505 (EXT4_B2C(sbi, pblk) != *partial_cluster)) { 2507 *partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
2506 ext4_free_blocks(handle, inode, NULL, 2508 ext4_free_blocks(handle, inode, NULL,
2507 EXT4_C2B(sbi, *partial_cluster), 2509 EXT4_C2B(sbi, *partial_cluster),
2508 sbi->s_cluster_ratio, flags); 2510 sbi->s_cluster_ratio, flags);
@@ -2528,7 +2530,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2528 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) { 2530 && to == le32_to_cpu(ex->ee_block) + ee_len - 1) {
2529 /* tail removal */ 2531 /* tail removal */
2530 ext4_lblk_t num; 2532 ext4_lblk_t num;
2531 unsigned int unaligned; 2533 long long first_cluster;
2532 2534
2533 num = le32_to_cpu(ex->ee_block) + ee_len - from; 2535 num = le32_to_cpu(ex->ee_block) + ee_len - from;
2534 pblk = ext4_ext_pblock(ex) + ee_len - num; 2536 pblk = ext4_ext_pblock(ex) + ee_len - num;
@@ -2538,7 +2540,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2538 * used by any other extent (partial_cluster is negative). 2540 * used by any other extent (partial_cluster is negative).
2539 */ 2541 */
2540 if (*partial_cluster < 0 && 2542 if (*partial_cluster < 0 &&
2541 -(*partial_cluster) == EXT4_B2C(sbi, pblk + num - 1)) 2543 *partial_cluster == -(long long) EXT4_B2C(sbi, pblk+num-1))
2542 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER; 2544 flags |= EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER;
2543 2545
2544 ext_debug("free last %u blocks starting %llu partial %lld\n", 2546 ext_debug("free last %u blocks starting %llu partial %lld\n",
@@ -2549,21 +2551,24 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2549 * beginning of a cluster, and we removed the entire 2551 * beginning of a cluster, and we removed the entire
2550 * extent and the cluster is not used by any other extent, 2552 * extent and the cluster is not used by any other extent,
2551 * save the partial cluster here, since we might need to 2553 * save the partial cluster here, since we might need to
2552 * delete if we determine that the truncate operation has 2554 * delete if we determine that the truncate or punch hole
2553 * removed all of the blocks in the cluster. 2555 * operation has removed all of the blocks in the cluster.
2556 * If that cluster is used by another extent, preserve its
2557 * negative value so it isn't freed later on.
2554 * 2558 *
2555 * On the other hand, if we did not manage to free the whole 2559 * If the whole extent wasn't freed, we've reached the
2556 * extent, we have to mark the cluster as used (store negative 2560 * start of the truncated/punched region and have finished
2557 * cluster number in partial_cluster). 2561 * removing blocks. If there's a partial cluster here it's
2562 * shared with the remainder of the extent and is no longer
2563 * a candidate for removal.
2558 */ 2564 */
2559 unaligned = EXT4_PBLK_COFF(sbi, pblk); 2565 if (EXT4_PBLK_COFF(sbi, pblk) && ee_len == num) {
2560 if (unaligned && (ee_len == num) && 2566 first_cluster = (long long) EXT4_B2C(sbi, pblk);
2561 (*partial_cluster != -((long long)EXT4_B2C(sbi, pblk)))) 2567 if (first_cluster != -*partial_cluster)
2562 *partial_cluster = EXT4_B2C(sbi, pblk); 2568 *partial_cluster = first_cluster;
2563 else if (unaligned) 2569 } else {
2564 *partial_cluster = -((long long)EXT4_B2C(sbi, pblk));
2565 else if (*partial_cluster > 0)
2566 *partial_cluster = 0; 2570 *partial_cluster = 0;
2571 }
2567 } else 2572 } else
2568 ext4_error(sbi->s_sb, "strange request: removal(2) " 2573 ext4_error(sbi->s_sb, "strange request: removal(2) "
2569 "%u-%u from %u:%u\n", 2574 "%u-%u from %u:%u\n",
@@ -2574,15 +2579,16 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode,
2574 2579
2575/* 2580/*
2576 * ext4_ext_rm_leaf() Removes the extents associated with the 2581 * ext4_ext_rm_leaf() Removes the extents associated with the
2577 * blocks appearing between "start" and "end", and splits the extents 2582 * blocks appearing between "start" and "end". Both "start"
2578 * if "start" and "end" appear in the same extent 2583 * and "end" must appear in the same extent or EIO is returned.
2579 * 2584 *
2580 * @handle: The journal handle 2585 * @handle: The journal handle
2581 * @inode: The files inode 2586 * @inode: The files inode
2582 * @path: The path to the leaf 2587 * @path: The path to the leaf
2583 * @partial_cluster: The cluster which we'll have to free if all extents 2588 * @partial_cluster: The cluster which we'll have to free if all extents
2584 * has been released from it. It gets negative in case 2589 * has been released from it. However, if this value is
2585 * that the cluster is still used. 2590 * negative, it's a cluster just to the right of the
2591 * punched region and it must not be freed.
2586 * @start: The first block to remove 2592 * @start: The first block to remove
2587 * @end: The last block to remove 2593 * @end: The last block to remove
2588 */ 2594 */
@@ -2621,27 +2627,6 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2621 ex_ee_block = le32_to_cpu(ex->ee_block); 2627 ex_ee_block = le32_to_cpu(ex->ee_block);
2622 ex_ee_len = ext4_ext_get_actual_len(ex); 2628 ex_ee_len = ext4_ext_get_actual_len(ex);
2623 2629
2624 /*
2625 * If we're starting with an extent other than the last one in the
2626 * node, we need to see if it shares a cluster with the extent to
2627 * the right (towards the end of the file). If its leftmost cluster
2628 * is this extent's rightmost cluster and it is not cluster aligned,
2629 * we'll mark it as a partial that is not to be deallocated.
2630 */
2631
2632 if (ex != EXT_LAST_EXTENT(eh)) {
2633 ext4_fsblk_t current_pblk, right_pblk;
2634 long long current_cluster, right_cluster;
2635
2636 current_pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2637 current_cluster = (long long)EXT4_B2C(sbi, current_pblk);
2638 right_pblk = ext4_ext_pblock(ex + 1);
2639 right_cluster = (long long)EXT4_B2C(sbi, right_pblk);
2640 if (current_cluster == right_cluster &&
2641 EXT4_PBLK_COFF(sbi, right_pblk))
2642 *partial_cluster = -right_cluster;
2643 }
2644
2645 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster); 2630 trace_ext4_ext_rm_leaf(inode, start, ex, *partial_cluster);
2646 2631
2647 while (ex >= EXT_FIRST_EXTENT(eh) && 2632 while (ex >= EXT_FIRST_EXTENT(eh) &&
@@ -2666,14 +2651,16 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2666 if (end < ex_ee_block) { 2651 if (end < ex_ee_block) {
2667 /* 2652 /*
2668 * We're going to skip this extent and move to another, 2653 * We're going to skip this extent and move to another,
2669 * so if this extent is not cluster aligned we have 2654 * so note that its first cluster is in use to avoid
2670 * to mark the current cluster as used to avoid 2655 * freeing it when removing blocks. Eventually, the
2671 * accidentally freeing it later on 2656 * right edge of the truncated/punched region will
2657 * be just to the left.
2672 */ 2658 */
2673 pblk = ext4_ext_pblock(ex); 2659 if (sbi->s_cluster_ratio > 1) {
2674 if (EXT4_PBLK_COFF(sbi, pblk)) 2660 pblk = ext4_ext_pblock(ex);
2675 *partial_cluster = 2661 *partial_cluster =
2676 -((long long)EXT4_B2C(sbi, pblk)); 2662 -(long long) EXT4_B2C(sbi, pblk);
2663 }
2677 ex--; 2664 ex--;
2678 ex_ee_block = le32_to_cpu(ex->ee_block); 2665 ex_ee_block = le32_to_cpu(ex->ee_block);
2679 ex_ee_len = ext4_ext_get_actual_len(ex); 2666 ex_ee_len = ext4_ext_get_actual_len(ex);
@@ -2749,8 +2736,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2749 sizeof(struct ext4_extent)); 2736 sizeof(struct ext4_extent));
2750 } 2737 }
2751 le16_add_cpu(&eh->eh_entries, -1); 2738 le16_add_cpu(&eh->eh_entries, -1);
2752 } else if (*partial_cluster > 0) 2739 }
2753 *partial_cluster = 0;
2754 2740
2755 err = ext4_ext_dirty(handle, inode, path + depth); 2741 err = ext4_ext_dirty(handle, inode, path + depth);
2756 if (err) 2742 if (err)
@@ -2769,20 +2755,18 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode,
2769 /* 2755 /*
2770 * If there's a partial cluster and at least one extent remains in 2756 * If there's a partial cluster and at least one extent remains in
2771 * the leaf, free the partial cluster if it isn't shared with the 2757 * the leaf, free the partial cluster if it isn't shared with the
2772 * current extent. If there's a partial cluster and no extents 2758 * current extent. If it is shared with the current extent
2773 * remain in the leaf, it can't be freed here. It can only be 2759 * we zero partial_cluster because we've reached the start of the
2774 * freed when it's possible to determine if it's not shared with 2760 * truncated/punched region and we're done removing blocks.
2775 * any other extent - when the next leaf is processed or when space
2776 * removal is complete.
2777 */ 2761 */
2778 if (*partial_cluster > 0 && eh->eh_entries && 2762 if (*partial_cluster > 0 && ex >= EXT_FIRST_EXTENT(eh)) {
2779 (EXT4_B2C(sbi, ext4_ext_pblock(ex) + ex_ee_len - 1) != 2763 pblk = ext4_ext_pblock(ex) + ex_ee_len - 1;
2780 *partial_cluster)) { 2764 if (*partial_cluster != (long long) EXT4_B2C(sbi, pblk)) {
2781 int flags = get_default_free_blocks_flags(inode); 2765 ext4_free_blocks(handle, inode, NULL,
2782 2766 EXT4_C2B(sbi, *partial_cluster),
2783 ext4_free_blocks(handle, inode, NULL, 2767 sbi->s_cluster_ratio,
2784 EXT4_C2B(sbi, *partial_cluster), 2768 get_default_free_blocks_flags(inode));
2785 sbi->s_cluster_ratio, flags); 2769 }
2786 *partial_cluster = 0; 2770 *partial_cluster = 0;
2787 } 2771 }
2788 2772
@@ -2819,7 +2803,7 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path)
2819int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, 2803int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
2820 ext4_lblk_t end) 2804 ext4_lblk_t end)
2821{ 2805{
2822 struct super_block *sb = inode->i_sb; 2806 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
2823 int depth = ext_depth(inode); 2807 int depth = ext_depth(inode);
2824 struct ext4_ext_path *path = NULL; 2808 struct ext4_ext_path *path = NULL;
2825 long long partial_cluster = 0; 2809 long long partial_cluster = 0;
@@ -2845,9 +2829,10 @@ again:
2845 */ 2829 */
2846 if (end < EXT_MAX_BLOCKS - 1) { 2830 if (end < EXT_MAX_BLOCKS - 1) {
2847 struct ext4_extent *ex; 2831 struct ext4_extent *ex;
2848 ext4_lblk_t ee_block; 2832 ext4_lblk_t ee_block, ex_end, lblk;
2833 ext4_fsblk_t pblk;
2849 2834
2850 /* find extent for this block */ 2835 /* find extent for or closest extent to this block */
2851 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 2836 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
2852 if (IS_ERR(path)) { 2837 if (IS_ERR(path)) {
2853 ext4_journal_stop(handle); 2838 ext4_journal_stop(handle);
@@ -2867,6 +2852,7 @@ again:
2867 } 2852 }
2868 2853
2869 ee_block = le32_to_cpu(ex->ee_block); 2854 ee_block = le32_to_cpu(ex->ee_block);
2855 ex_end = ee_block + ext4_ext_get_actual_len(ex) - 1;
2870 2856
2871 /* 2857 /*
2872 * See if the last block is inside the extent, if so split 2858 * See if the last block is inside the extent, if so split
@@ -2874,8 +2860,19 @@ again:
2874 * tail of the first part of the split extent in 2860 * tail of the first part of the split extent in
2875 * ext4_ext_rm_leaf(). 2861 * ext4_ext_rm_leaf().
2876 */ 2862 */
2877 if (end >= ee_block && 2863 if (end >= ee_block && end < ex_end) {
2878 end < ee_block + ext4_ext_get_actual_len(ex) - 1) { 2864
2865 /*
2866 * If we're going to split the extent, note that
2867 * the cluster containing the block after 'end' is
2868 * in use to avoid freeing it when removing blocks.
2869 */
2870 if (sbi->s_cluster_ratio > 1) {
2871 pblk = ext4_ext_pblock(ex) + end - ee_block + 2;
2872 partial_cluster =
2873 -(long long) EXT4_B2C(sbi, pblk);
2874 }
2875
2879 /* 2876 /*
2880 * Split the extent in two so that 'end' is the last 2877 * Split the extent in two so that 'end' is the last
2881 * block in the first new extent. Also we should not 2878 * block in the first new extent. Also we should not
@@ -2886,6 +2883,24 @@ again:
2886 end + 1, 1); 2883 end + 1, 1);
2887 if (err < 0) 2884 if (err < 0)
2888 goto out; 2885 goto out;
2886
2887 } else if (sbi->s_cluster_ratio > 1 && end >= ex_end) {
2888 /*
2889 * If there's an extent to the right its first cluster
2890 * contains the immediate right boundary of the
2891 * truncated/punched region. Set partial_cluster to
2892 * its negative value so it won't be freed if shared
2893 * with the current extent. The end < ee_block case
2894 * is handled in ext4_ext_rm_leaf().
2895 */
2896 lblk = ex_end + 1;
2897 err = ext4_ext_search_right(inode, path, &lblk, &pblk,
2898 &ex);
2899 if (err)
2900 goto out;
2901 if (pblk)
2902 partial_cluster =
2903 -(long long) EXT4_B2C(sbi, pblk);
2889 } 2904 }
2890 } 2905 }
2891 /* 2906 /*
@@ -2996,16 +3011,18 @@ again:
2996 trace_ext4_ext_remove_space_done(inode, start, end, depth, 3011 trace_ext4_ext_remove_space_done(inode, start, end, depth,
2997 partial_cluster, path->p_hdr->eh_entries); 3012 partial_cluster, path->p_hdr->eh_entries);
2998 3013
2999 /* If we still have something in the partial cluster and we have removed 3014 /*
3015 * If we still have something in the partial cluster and we have removed
3000 * even the first extent, then we should free the blocks in the partial 3016 * even the first extent, then we should free the blocks in the partial
3001 * cluster as well. */ 3017 * cluster as well. (This code will only run when there are no leaves
3002 if (partial_cluster > 0 && path->p_hdr->eh_entries == 0) { 3018 * to the immediate left of the truncated/punched region.)
3003 int flags = get_default_free_blocks_flags(inode); 3019 */
3004 3020 if (partial_cluster > 0 && err == 0) {
3021 /* don't zero partial_cluster since it's not used afterwards */
3005 ext4_free_blocks(handle, inode, NULL, 3022 ext4_free_blocks(handle, inode, NULL,
3006 EXT4_C2B(EXT4_SB(sb), partial_cluster), 3023 EXT4_C2B(sbi, partial_cluster),
3007 EXT4_SB(sb)->s_cluster_ratio, flags); 3024 sbi->s_cluster_ratio,
3008 partial_cluster = 0; 3025 get_default_free_blocks_flags(inode));
3009 } 3026 }
3010 3027
3011 /* TODO: flexible tree reduction should be here */ 3028 /* TODO: flexible tree reduction should be here */
@@ -4267,6 +4284,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4267 ext4_io_end_t *io = ext4_inode_aio(inode); 4284 ext4_io_end_t *io = ext4_inode_aio(inode);
4268 ext4_lblk_t cluster_offset; 4285 ext4_lblk_t cluster_offset;
4269 int set_unwritten = 0; 4286 int set_unwritten = 0;
4287 bool map_from_cluster = false;
4270 4288
4271 ext_debug("blocks %u/%u requested for inode %lu\n", 4289 ext_debug("blocks %u/%u requested for inode %lu\n",
4272 map->m_lblk, map->m_len, inode->i_ino); 4290 map->m_lblk, map->m_len, inode->i_ino);
@@ -4343,10 +4361,6 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4343 } 4361 }
4344 } 4362 }
4345 4363
4346 if ((sbi->s_cluster_ratio > 1) &&
4347 ext4_find_delalloc_cluster(inode, map->m_lblk))
4348 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
4349
4350 /* 4364 /*
4351 * requested block isn't allocated yet; 4365 * requested block isn't allocated yet;
4352 * we couldn't try to create block if create flag is zero 4366 * we couldn't try to create block if create flag is zero
@@ -4356,15 +4370,13 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4356 * put just found gap into cache to speed up 4370 * put just found gap into cache to speed up
4357 * subsequent requests 4371 * subsequent requests
4358 */ 4372 */
4359 if ((flags & EXT4_GET_BLOCKS_NO_PUT_HOLE) == 0) 4373 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4360 ext4_ext_put_gap_in_cache(inode, path, map->m_lblk);
4361 goto out2; 4374 goto out2;
4362 } 4375 }
4363 4376
4364 /* 4377 /*
4365 * Okay, we need to do block allocation. 4378 * Okay, we need to do block allocation.
4366 */ 4379 */
4367 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
4368 newex.ee_block = cpu_to_le32(map->m_lblk); 4380 newex.ee_block = cpu_to_le32(map->m_lblk);
4369 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); 4381 cluster_offset = EXT4_LBLK_COFF(sbi, map->m_lblk);
4370 4382
@@ -4376,7 +4388,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4376 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4388 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
4377 ar.len = allocated = map->m_len; 4389 ar.len = allocated = map->m_len;
4378 newblock = map->m_pblk; 4390 newblock = map->m_pblk;
4379 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4391 map_from_cluster = true;
4380 goto got_allocated_blocks; 4392 goto got_allocated_blocks;
4381 } 4393 }
4382 4394
@@ -4397,7 +4409,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4397 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) { 4409 get_implied_cluster_alloc(inode->i_sb, map, ex2, path)) {
4398 ar.len = allocated = map->m_len; 4410 ar.len = allocated = map->m_len;
4399 newblock = map->m_pblk; 4411 newblock = map->m_pblk;
4400 map->m_flags |= EXT4_MAP_FROM_CLUSTER; 4412 map_from_cluster = true;
4401 goto got_allocated_blocks; 4413 goto got_allocated_blocks;
4402 } 4414 }
4403 4415
@@ -4523,7 +4535,7 @@ got_allocated_blocks:
4523 */ 4535 */
4524 reserved_clusters = get_reserved_cluster_alloc(inode, 4536 reserved_clusters = get_reserved_cluster_alloc(inode,
4525 map->m_lblk, allocated); 4537 map->m_lblk, allocated);
4526 if (map->m_flags & EXT4_MAP_FROM_CLUSTER) { 4538 if (map_from_cluster) {
4527 if (reserved_clusters) { 4539 if (reserved_clusters) {
4528 /* 4540 /*
4529 * We have clusters reserved for this range. 4541 * We have clusters reserved for this range.
@@ -4620,7 +4632,6 @@ out2:
4620 4632
4621 trace_ext4_ext_map_blocks_exit(inode, flags, map, 4633 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4622 err ? err : allocated); 4634 err ? err : allocated);
4623 ext4_es_lru_add(inode);
4624 return err ? err : allocated; 4635 return err ? err : allocated;
4625} 4636}
4626 4637
@@ -5140,7 +5151,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5140 if (ext4_has_inline_data(inode)) { 5151 if (ext4_has_inline_data(inode)) {
5141 int has_inline = 1; 5152 int has_inline = 1;
5142 5153
5143 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline); 5154 error = ext4_inline_data_fiemap(inode, fieinfo, &has_inline,
5155 start, len);
5144 5156
5145 if (has_inline) 5157 if (has_inline)
5146 return error; 5158 return error;
@@ -5154,8 +5166,8 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5154 5166
5155 /* fallback to generic here if not in extents fmt */ 5167 /* fallback to generic here if not in extents fmt */
5156 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 5168 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
5157 return generic_block_fiemap(inode, fieinfo, start, len, 5169 return __generic_block_fiemap(inode, fieinfo, start, len,
5158 ext4_get_block); 5170 ext4_get_block);
5159 5171
5160 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS)) 5172 if (fiemap_check_flags(fieinfo, EXT4_FIEMAP_FLAGS))
5161 return -EBADR; 5173 return -EBADR;
@@ -5179,7 +5191,6 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
5179 error = ext4_fill_fiemap_extents(inode, start_blk, 5191 error = ext4_fill_fiemap_extents(inode, start_blk,
5180 len_blks, fieinfo); 5192 len_blks, fieinfo);
5181 } 5193 }
5182 ext4_es_lru_add(inode);
5183 return error; 5194 return error;
5184} 5195}
5185 5196
@@ -5239,8 +5250,6 @@ ext4_ext_shift_path_extents(struct ext4_ext_path *path, ext4_lblk_t shift,
5239 return -EIO; 5250 return -EIO;
5240 5251
5241 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr); 5252 ex_last = EXT_LAST_EXTENT(path[depth].p_hdr);
5242 if (!ex_last)
5243 return -EIO;
5244 5253
5245 err = ext4_access_path(handle, inode, path + depth); 5254 err = ext4_access_path(handle, inode, path + depth);
5246 if (err) 5255 if (err)
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 94e7855ae71b..e04d45733976 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -147,10 +147,9 @@ static struct kmem_cache *ext4_es_cachep;
147static int __es_insert_extent(struct inode *inode, struct extent_status *newes); 147static int __es_insert_extent(struct inode *inode, struct extent_status *newes);
148static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk, 148static int __es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
149 ext4_lblk_t end); 149 ext4_lblk_t end);
150static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, 150static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan);
151 int nr_to_scan); 151static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
152static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan, 152 struct ext4_inode_info *locked_ei);
153 struct ext4_inode_info *locked_ei);
154 153
155int __init ext4_init_es(void) 154int __init ext4_init_es(void)
156{ 155{
@@ -298,6 +297,36 @@ out:
298 trace_ext4_es_find_delayed_extent_range_exit(inode, es); 297 trace_ext4_es_find_delayed_extent_range_exit(inode, es);
299} 298}
300 299
300static void ext4_es_list_add(struct inode *inode)
301{
302 struct ext4_inode_info *ei = EXT4_I(inode);
303 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
304
305 if (!list_empty(&ei->i_es_list))
306 return;
307
308 spin_lock(&sbi->s_es_lock);
309 if (list_empty(&ei->i_es_list)) {
310 list_add_tail(&ei->i_es_list, &sbi->s_es_list);
311 sbi->s_es_nr_inode++;
312 }
313 spin_unlock(&sbi->s_es_lock);
314}
315
316static void ext4_es_list_del(struct inode *inode)
317{
318 struct ext4_inode_info *ei = EXT4_I(inode);
319 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
320
321 spin_lock(&sbi->s_es_lock);
322 if (!list_empty(&ei->i_es_list)) {
323 list_del_init(&ei->i_es_list);
324 sbi->s_es_nr_inode--;
325 WARN_ON_ONCE(sbi->s_es_nr_inode < 0);
326 }
327 spin_unlock(&sbi->s_es_lock);
328}
329
301static struct extent_status * 330static struct extent_status *
302ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len, 331ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
303 ext4_fsblk_t pblk) 332 ext4_fsblk_t pblk)
@@ -314,9 +343,10 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
314 * We don't count delayed extent because we never try to reclaim them 343 * We don't count delayed extent because we never try to reclaim them
315 */ 344 */
316 if (!ext4_es_is_delayed(es)) { 345 if (!ext4_es_is_delayed(es)) {
317 EXT4_I(inode)->i_es_lru_nr++; 346 if (!EXT4_I(inode)->i_es_shk_nr++)
347 ext4_es_list_add(inode);
318 percpu_counter_inc(&EXT4_SB(inode->i_sb)-> 348 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
319 s_es_stats.es_stats_lru_cnt); 349 s_es_stats.es_stats_shk_cnt);
320 } 350 }
321 351
322 EXT4_I(inode)->i_es_all_nr++; 352 EXT4_I(inode)->i_es_all_nr++;
@@ -330,12 +360,13 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
330 EXT4_I(inode)->i_es_all_nr--; 360 EXT4_I(inode)->i_es_all_nr--;
331 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt); 361 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
332 362
333 /* Decrease the lru counter when this es is not delayed */ 363 /* Decrease the shrink counter when this es is not delayed */
334 if (!ext4_es_is_delayed(es)) { 364 if (!ext4_es_is_delayed(es)) {
335 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0); 365 BUG_ON(EXT4_I(inode)->i_es_shk_nr == 0);
336 EXT4_I(inode)->i_es_lru_nr--; 366 if (!--EXT4_I(inode)->i_es_shk_nr)
367 ext4_es_list_del(inode);
337 percpu_counter_dec(&EXT4_SB(inode->i_sb)-> 368 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
338 s_es_stats.es_stats_lru_cnt); 369 s_es_stats.es_stats_shk_cnt);
339 } 370 }
340 371
341 kmem_cache_free(ext4_es_cachep, es); 372 kmem_cache_free(ext4_es_cachep, es);
@@ -351,7 +382,7 @@ static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
351static int ext4_es_can_be_merged(struct extent_status *es1, 382static int ext4_es_can_be_merged(struct extent_status *es1,
352 struct extent_status *es2) 383 struct extent_status *es2)
353{ 384{
354 if (ext4_es_status(es1) != ext4_es_status(es2)) 385 if (ext4_es_type(es1) != ext4_es_type(es2))
355 return 0; 386 return 0;
356 387
357 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) { 388 if (((__u64) es1->es_len) + es2->es_len > EXT_MAX_BLOCKS) {
@@ -394,6 +425,8 @@ ext4_es_try_to_merge_left(struct inode *inode, struct extent_status *es)
394 es1 = rb_entry(node, struct extent_status, rb_node); 425 es1 = rb_entry(node, struct extent_status, rb_node);
395 if (ext4_es_can_be_merged(es1, es)) { 426 if (ext4_es_can_be_merged(es1, es)) {
396 es1->es_len += es->es_len; 427 es1->es_len += es->es_len;
428 if (ext4_es_is_referenced(es))
429 ext4_es_set_referenced(es1);
397 rb_erase(&es->rb_node, &tree->root); 430 rb_erase(&es->rb_node, &tree->root);
398 ext4_es_free_extent(inode, es); 431 ext4_es_free_extent(inode, es);
399 es = es1; 432 es = es1;
@@ -416,6 +449,8 @@ ext4_es_try_to_merge_right(struct inode *inode, struct extent_status *es)
416 es1 = rb_entry(node, struct extent_status, rb_node); 449 es1 = rb_entry(node, struct extent_status, rb_node);
417 if (ext4_es_can_be_merged(es, es1)) { 450 if (ext4_es_can_be_merged(es, es1)) {
418 es->es_len += es1->es_len; 451 es->es_len += es1->es_len;
452 if (ext4_es_is_referenced(es1))
453 ext4_es_set_referenced(es);
419 rb_erase(node, &tree->root); 454 rb_erase(node, &tree->root);
420 ext4_es_free_extent(inode, es1); 455 ext4_es_free_extent(inode, es1);
421 } 456 }
@@ -683,8 +718,8 @@ int ext4_es_insert_extent(struct inode *inode, ext4_lblk_t lblk,
683 goto error; 718 goto error;
684retry: 719retry:
685 err = __es_insert_extent(inode, &newes); 720 err = __es_insert_extent(inode, &newes);
686 if (err == -ENOMEM && __ext4_es_shrink(EXT4_SB(inode->i_sb), 1, 721 if (err == -ENOMEM && __es_shrink(EXT4_SB(inode->i_sb),
687 EXT4_I(inode))) 722 128, EXT4_I(inode)))
688 goto retry; 723 goto retry;
689 if (err == -ENOMEM && !ext4_es_is_delayed(&newes)) 724 if (err == -ENOMEM && !ext4_es_is_delayed(&newes))
690 err = 0; 725 err = 0;
@@ -782,6 +817,8 @@ out:
782 es->es_lblk = es1->es_lblk; 817 es->es_lblk = es1->es_lblk;
783 es->es_len = es1->es_len; 818 es->es_len = es1->es_len;
784 es->es_pblk = es1->es_pblk; 819 es->es_pblk = es1->es_pblk;
820 if (!ext4_es_is_referenced(es))
821 ext4_es_set_referenced(es);
785 stats->es_stats_cache_hits++; 822 stats->es_stats_cache_hits++;
786 } else { 823 } else {
787 stats->es_stats_cache_misses++; 824 stats->es_stats_cache_misses++;
@@ -841,8 +878,8 @@ retry:
841 es->es_lblk = orig_es.es_lblk; 878 es->es_lblk = orig_es.es_lblk;
842 es->es_len = orig_es.es_len; 879 es->es_len = orig_es.es_len;
843 if ((err == -ENOMEM) && 880 if ((err == -ENOMEM) &&
844 __ext4_es_shrink(EXT4_SB(inode->i_sb), 1, 881 __es_shrink(EXT4_SB(inode->i_sb),
845 EXT4_I(inode))) 882 128, EXT4_I(inode)))
846 goto retry; 883 goto retry;
847 goto out; 884 goto out;
848 } 885 }
@@ -914,6 +951,11 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
914 end = lblk + len - 1; 951 end = lblk + len - 1;
915 BUG_ON(end < lblk); 952 BUG_ON(end < lblk);
916 953
954 /*
955 * ext4_clear_inode() depends on us taking i_es_lock unconditionally
956 * so that we are sure __es_shrink() is done with the inode before it
957 * is reclaimed.
958 */
917 write_lock(&EXT4_I(inode)->i_es_lock); 959 write_lock(&EXT4_I(inode)->i_es_lock);
918 err = __es_remove_extent(inode, lblk, end); 960 err = __es_remove_extent(inode, lblk, end);
919 write_unlock(&EXT4_I(inode)->i_es_lock); 961 write_unlock(&EXT4_I(inode)->i_es_lock);
@@ -921,114 +963,75 @@ int ext4_es_remove_extent(struct inode *inode, ext4_lblk_t lblk,
921 return err; 963 return err;
922} 964}
923 965
924static int ext4_inode_touch_time_cmp(void *priv, struct list_head *a, 966static int __es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
925 struct list_head *b) 967 struct ext4_inode_info *locked_ei)
926{
927 struct ext4_inode_info *eia, *eib;
928 eia = list_entry(a, struct ext4_inode_info, i_es_lru);
929 eib = list_entry(b, struct ext4_inode_info, i_es_lru);
930
931 if (ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
932 !ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
933 return 1;
934 if (!ext4_test_inode_state(&eia->vfs_inode, EXT4_STATE_EXT_PRECACHED) &&
935 ext4_test_inode_state(&eib->vfs_inode, EXT4_STATE_EXT_PRECACHED))
936 return -1;
937 if (eia->i_touch_when == eib->i_touch_when)
938 return 0;
939 if (time_after(eia->i_touch_when, eib->i_touch_when))
940 return 1;
941 else
942 return -1;
943}
944
945static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
946 struct ext4_inode_info *locked_ei)
947{ 968{
948 struct ext4_inode_info *ei; 969 struct ext4_inode_info *ei;
949 struct ext4_es_stats *es_stats; 970 struct ext4_es_stats *es_stats;
950 struct list_head *cur, *tmp;
951 LIST_HEAD(skipped);
952 ktime_t start_time; 971 ktime_t start_time;
953 u64 scan_time; 972 u64 scan_time;
973 int nr_to_walk;
954 int nr_shrunk = 0; 974 int nr_shrunk = 0;
955 int retried = 0, skip_precached = 1, nr_skipped = 0; 975 int retried = 0, nr_skipped = 0;
956 976
957 es_stats = &sbi->s_es_stats; 977 es_stats = &sbi->s_es_stats;
958 start_time = ktime_get(); 978 start_time = ktime_get();
959 spin_lock(&sbi->s_es_lru_lock);
960 979
961retry: 980retry:
962 list_for_each_safe(cur, tmp, &sbi->s_es_lru) { 981 spin_lock(&sbi->s_es_lock);
963 int shrunk; 982 nr_to_walk = sbi->s_es_nr_inode;
964 983 while (nr_to_walk-- > 0) {
965 /* 984 if (list_empty(&sbi->s_es_list)) {
966 * If we have already reclaimed all extents from extent 985 spin_unlock(&sbi->s_es_lock);
967 * status tree, just stop the loop immediately. 986 goto out;
968 */ 987 }
969 if (percpu_counter_read_positive( 988 ei = list_first_entry(&sbi->s_es_list, struct ext4_inode_info,
970 &es_stats->es_stats_lru_cnt) == 0) 989 i_es_list);
971 break; 990 /* Move the inode to the tail */
972 991 list_move_tail(&ei->i_es_list, &sbi->s_es_list);
973 ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
974 992
975 /* 993 /*
976 * Skip the inode that is newer than the last_sorted 994 * Normally we try hard to avoid shrinking precached inodes,
977 * time. Normally we try hard to avoid shrinking 995 * but we will as a last resort.
978 * precached inodes, but we will as a last resort.
979 */ 996 */
980 if ((es_stats->es_stats_last_sorted < ei->i_touch_when) || 997 if (!retried && ext4_test_inode_state(&ei->vfs_inode,
981 (skip_precached && ext4_test_inode_state(&ei->vfs_inode, 998 EXT4_STATE_EXT_PRECACHED)) {
982 EXT4_STATE_EXT_PRECACHED))) {
983 nr_skipped++; 999 nr_skipped++;
984 list_move_tail(cur, &skipped);
985 continue; 1000 continue;
986 } 1001 }
987 1002
988 if (ei->i_es_lru_nr == 0 || ei == locked_ei || 1003 if (ei == locked_ei || !write_trylock(&ei->i_es_lock)) {
989 !write_trylock(&ei->i_es_lock)) 1004 nr_skipped++;
990 continue; 1005 continue;
1006 }
1007 /*
1008 * Now we hold i_es_lock which protects us from inode reclaim
1009 * freeing inode under us
1010 */
1011 spin_unlock(&sbi->s_es_lock);
991 1012
992 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); 1013 nr_shrunk += es_reclaim_extents(ei, &nr_to_scan);
993 if (ei->i_es_lru_nr == 0)
994 list_del_init(&ei->i_es_lru);
995 write_unlock(&ei->i_es_lock); 1014 write_unlock(&ei->i_es_lock);
996 1015
997 nr_shrunk += shrunk; 1016 if (nr_to_scan <= 0)
998 nr_to_scan -= shrunk; 1017 goto out;
999 if (nr_to_scan == 0) 1018 spin_lock(&sbi->s_es_lock);
1000 break;
1001 } 1019 }
1002 1020 spin_unlock(&sbi->s_es_lock);
1003 /* Move the newer inodes into the tail of the LRU list. */
1004 list_splice_tail(&skipped, &sbi->s_es_lru);
1005 INIT_LIST_HEAD(&skipped);
1006 1021
1007 /* 1022 /*
1008 * If we skipped any inodes, and we weren't able to make any 1023 * If we skipped any inodes, and we weren't able to make any
1009 * forward progress, sort the list and try again. 1024 * forward progress, try again to scan precached inodes.
1010 */ 1025 */
1011 if ((nr_shrunk == 0) && nr_skipped && !retried) { 1026 if ((nr_shrunk == 0) && nr_skipped && !retried) {
1012 retried++; 1027 retried++;
1013 list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
1014 es_stats->es_stats_last_sorted = jiffies;
1015 ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
1016 i_es_lru);
1017 /*
1018 * If there are no non-precached inodes left on the
1019 * list, start releasing precached extents.
1020 */
1021 if (ext4_test_inode_state(&ei->vfs_inode,
1022 EXT4_STATE_EXT_PRECACHED))
1023 skip_precached = 0;
1024 goto retry; 1028 goto retry;
1025 } 1029 }
1026 1030
1027 spin_unlock(&sbi->s_es_lru_lock);
1028
1029 if (locked_ei && nr_shrunk == 0) 1031 if (locked_ei && nr_shrunk == 0)
1030 nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan); 1032 nr_shrunk = es_reclaim_extents(locked_ei, &nr_to_scan);
1031 1033
1034out:
1032 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); 1035 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1033 if (likely(es_stats->es_stats_scan_time)) 1036 if (likely(es_stats->es_stats_scan_time))
1034 es_stats->es_stats_scan_time = (scan_time + 1037 es_stats->es_stats_scan_time = (scan_time +
@@ -1043,7 +1046,7 @@ retry:
1043 else 1046 else
1044 es_stats->es_stats_shrunk = nr_shrunk; 1047 es_stats->es_stats_shrunk = nr_shrunk;
1045 1048
1046 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, skip_precached, 1049 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time,
1047 nr_skipped, retried); 1050 nr_skipped, retried);
1048 return nr_shrunk; 1051 return nr_shrunk;
1049} 1052}
@@ -1055,7 +1058,7 @@ static unsigned long ext4_es_count(struct shrinker *shrink,
1055 struct ext4_sb_info *sbi; 1058 struct ext4_sb_info *sbi;
1056 1059
1057 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); 1060 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
1058 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt); 1061 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1059 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr); 1062 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
1060 return nr; 1063 return nr;
1061} 1064}
@@ -1068,13 +1071,13 @@ static unsigned long ext4_es_scan(struct shrinker *shrink,
1068 int nr_to_scan = sc->nr_to_scan; 1071 int nr_to_scan = sc->nr_to_scan;
1069 int ret, nr_shrunk; 1072 int ret, nr_shrunk;
1070 1073
1071 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt); 1074 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_shk_cnt);
1072 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret); 1075 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
1073 1076
1074 if (!nr_to_scan) 1077 if (!nr_to_scan)
1075 return ret; 1078 return ret;
1076 1079
1077 nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL); 1080 nr_shrunk = __es_shrink(sbi, nr_to_scan, NULL);
1078 1081
1079 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret); 1082 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
1080 return nr_shrunk; 1083 return nr_shrunk;
@@ -1102,28 +1105,24 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
1102 return 0; 1105 return 0;
1103 1106
1104 /* here we just find an inode that has the max nr. of objects */ 1107 /* here we just find an inode that has the max nr. of objects */
1105 spin_lock(&sbi->s_es_lru_lock); 1108 spin_lock(&sbi->s_es_lock);
1106 list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) { 1109 list_for_each_entry(ei, &sbi->s_es_list, i_es_list) {
1107 inode_cnt++; 1110 inode_cnt++;
1108 if (max && max->i_es_all_nr < ei->i_es_all_nr) 1111 if (max && max->i_es_all_nr < ei->i_es_all_nr)
1109 max = ei; 1112 max = ei;
1110 else if (!max) 1113 else if (!max)
1111 max = ei; 1114 max = ei;
1112 } 1115 }
1113 spin_unlock(&sbi->s_es_lru_lock); 1116 spin_unlock(&sbi->s_es_lock);
1114 1117
1115 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n", 1118 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
1116 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt), 1119 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
1117 percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt)); 1120 percpu_counter_sum_positive(&es_stats->es_stats_shk_cnt));
1118 seq_printf(seq, " %lu/%lu cache hits/misses\n", 1121 seq_printf(seq, " %lu/%lu cache hits/misses\n",
1119 es_stats->es_stats_cache_hits, 1122 es_stats->es_stats_cache_hits,
1120 es_stats->es_stats_cache_misses); 1123 es_stats->es_stats_cache_misses);
1121 if (es_stats->es_stats_last_sorted != 0)
1122 seq_printf(seq, " %u ms last sorted interval\n",
1123 jiffies_to_msecs(jiffies -
1124 es_stats->es_stats_last_sorted));
1125 if (inode_cnt) 1124 if (inode_cnt)
1126 seq_printf(seq, " %d inodes on lru list\n", inode_cnt); 1125 seq_printf(seq, " %d inodes on list\n", inode_cnt);
1127 1126
1128 seq_printf(seq, "average:\n %llu us scan time\n", 1127 seq_printf(seq, "average:\n %llu us scan time\n",
1129 div_u64(es_stats->es_stats_scan_time, 1000)); 1128 div_u64(es_stats->es_stats_scan_time, 1000));
@@ -1132,7 +1131,7 @@ static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
1132 seq_printf(seq, 1131 seq_printf(seq,
1133 "maximum:\n %lu inode (%u objects, %u reclaimable)\n" 1132 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1134 " %llu us max scan time\n", 1133 " %llu us max scan time\n",
1135 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr, 1134 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_shk_nr,
1136 div_u64(es_stats->es_stats_max_scan_time, 1000)); 1135 div_u64(es_stats->es_stats_max_scan_time, 1000));
1137 1136
1138 return 0; 1137 return 0;
@@ -1181,9 +1180,11 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1181{ 1180{
1182 int err; 1181 int err;
1183 1182
1184 INIT_LIST_HEAD(&sbi->s_es_lru); 1183 /* Make sure we have enough bits for physical block number */
1185 spin_lock_init(&sbi->s_es_lru_lock); 1184 BUILD_BUG_ON(ES_SHIFT < 48);
1186 sbi->s_es_stats.es_stats_last_sorted = 0; 1185 INIT_LIST_HEAD(&sbi->s_es_list);
1186 sbi->s_es_nr_inode = 0;
1187 spin_lock_init(&sbi->s_es_lock);
1187 sbi->s_es_stats.es_stats_shrunk = 0; 1188 sbi->s_es_stats.es_stats_shrunk = 0;
1188 sbi->s_es_stats.es_stats_cache_hits = 0; 1189 sbi->s_es_stats.es_stats_cache_hits = 0;
1189 sbi->s_es_stats.es_stats_cache_misses = 0; 1190 sbi->s_es_stats.es_stats_cache_misses = 0;
@@ -1192,7 +1193,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1192 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL); 1193 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
1193 if (err) 1194 if (err)
1194 return err; 1195 return err;
1195 err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt, 0, GFP_KERNEL); 1196 err = percpu_counter_init(&sbi->s_es_stats.es_stats_shk_cnt, 0, GFP_KERNEL);
1196 if (err) 1197 if (err)
1197 goto err1; 1198 goto err1;
1198 1199
@@ -1210,7 +1211,7 @@ int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1210 return 0; 1211 return 0;
1211 1212
1212err2: 1213err2:
1213 percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt); 1214 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1214err1: 1215err1:
1215 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1216 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1216 return err; 1217 return err;
@@ -1221,71 +1222,83 @@ void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
1221 if (sbi->s_proc) 1222 if (sbi->s_proc)
1222 remove_proc_entry("es_shrinker_info", sbi->s_proc); 1223 remove_proc_entry("es_shrinker_info", sbi->s_proc);
1223 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt); 1224 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1224 percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt); 1225 percpu_counter_destroy(&sbi->s_es_stats.es_stats_shk_cnt);
1225 unregister_shrinker(&sbi->s_es_shrinker); 1226 unregister_shrinker(&sbi->s_es_shrinker);
1226} 1227}
1227 1228
1228void ext4_es_lru_add(struct inode *inode) 1229/*
1230 * Shrink extents in given inode from ei->i_es_shrink_lblk till end. Scan at
1231 * most *nr_to_scan extents, update *nr_to_scan accordingly.
1232 *
1233 * Return 0 if we hit end of tree / interval, 1 if we exhausted nr_to_scan.
1234 * Increment *nr_shrunk by the number of reclaimed extents. Also update
1235 * ei->i_es_shrink_lblk to where we should continue scanning.
1236 */
1237static int es_do_reclaim_extents(struct ext4_inode_info *ei, ext4_lblk_t end,
1238 int *nr_to_scan, int *nr_shrunk)
1229{ 1239{
1230 struct ext4_inode_info *ei = EXT4_I(inode); 1240 struct inode *inode = &ei->vfs_inode;
1231 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1241 struct ext4_es_tree *tree = &ei->i_es_tree;
1232 1242 struct extent_status *es;
1233 ei->i_touch_when = jiffies; 1243 struct rb_node *node;
1234
1235 if (!list_empty(&ei->i_es_lru))
1236 return;
1237 1244
1238 spin_lock(&sbi->s_es_lru_lock); 1245 es = __es_tree_search(&tree->root, ei->i_es_shrink_lblk);
1239 if (list_empty(&ei->i_es_lru)) 1246 if (!es)
1240 list_add_tail(&ei->i_es_lru, &sbi->s_es_lru); 1247 goto out_wrap;
1241 spin_unlock(&sbi->s_es_lru_lock); 1248 node = &es->rb_node;
1242} 1249 while (*nr_to_scan > 0) {
1250 if (es->es_lblk > end) {
1251 ei->i_es_shrink_lblk = end + 1;
1252 return 0;
1253 }
1243 1254
1244void ext4_es_lru_del(struct inode *inode) 1255 (*nr_to_scan)--;
1245{ 1256 node = rb_next(&es->rb_node);
1246 struct ext4_inode_info *ei = EXT4_I(inode); 1257 /*
1247 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 1258 * We can't reclaim delayed extent from status tree because
1259 * fiemap, bigallic, and seek_data/hole need to use it.
1260 */
1261 if (ext4_es_is_delayed(es))
1262 goto next;
1263 if (ext4_es_is_referenced(es)) {
1264 ext4_es_clear_referenced(es);
1265 goto next;
1266 }
1248 1267
1249 spin_lock(&sbi->s_es_lru_lock); 1268 rb_erase(&es->rb_node, &tree->root);
1250 if (!list_empty(&ei->i_es_lru)) 1269 ext4_es_free_extent(inode, es);
1251 list_del_init(&ei->i_es_lru); 1270 (*nr_shrunk)++;
1252 spin_unlock(&sbi->s_es_lru_lock); 1271next:
1272 if (!node)
1273 goto out_wrap;
1274 es = rb_entry(node, struct extent_status, rb_node);
1275 }
1276 ei->i_es_shrink_lblk = es->es_lblk;
1277 return 1;
1278out_wrap:
1279 ei->i_es_shrink_lblk = 0;
1280 return 0;
1253} 1281}
1254 1282
1255static int __es_try_to_reclaim_extents(struct ext4_inode_info *ei, 1283static int es_reclaim_extents(struct ext4_inode_info *ei, int *nr_to_scan)
1256 int nr_to_scan)
1257{ 1284{
1258 struct inode *inode = &ei->vfs_inode; 1285 struct inode *inode = &ei->vfs_inode;
1259 struct ext4_es_tree *tree = &ei->i_es_tree; 1286 int nr_shrunk = 0;
1260 struct rb_node *node; 1287 ext4_lblk_t start = ei->i_es_shrink_lblk;
1261 struct extent_status *es;
1262 unsigned long nr_shrunk = 0;
1263 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL, 1288 static DEFINE_RATELIMIT_STATE(_rs, DEFAULT_RATELIMIT_INTERVAL,
1264 DEFAULT_RATELIMIT_BURST); 1289 DEFAULT_RATELIMIT_BURST);
1265 1290
1266 if (ei->i_es_lru_nr == 0) 1291 if (ei->i_es_shk_nr == 0)
1267 return 0; 1292 return 0;
1268 1293
1269 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) && 1294 if (ext4_test_inode_state(inode, EXT4_STATE_EXT_PRECACHED) &&
1270 __ratelimit(&_rs)) 1295 __ratelimit(&_rs))
1271 ext4_warning(inode->i_sb, "forced shrink of precached extents"); 1296 ext4_warning(inode->i_sb, "forced shrink of precached extents");
1272 1297
1273 node = rb_first(&tree->root); 1298 if (!es_do_reclaim_extents(ei, EXT_MAX_BLOCKS, nr_to_scan, &nr_shrunk) &&
1274 while (node != NULL) { 1299 start != 0)
1275 es = rb_entry(node, struct extent_status, rb_node); 1300 es_do_reclaim_extents(ei, start - 1, nr_to_scan, &nr_shrunk);
1276 node = rb_next(&es->rb_node); 1301
1277 /* 1302 ei->i_es_tree.cache_es = NULL;
1278 * We can't reclaim delayed extent from status tree because
1279 * fiemap, bigallic, and seek_data/hole need to use it.
1280 */
1281 if (!ext4_es_is_delayed(es)) {
1282 rb_erase(&es->rb_node, &tree->root);
1283 ext4_es_free_extent(inode, es);
1284 nr_shrunk++;
1285 if (--nr_to_scan == 0)
1286 break;
1287 }
1288 }
1289 tree->cache_es = NULL;
1290 return nr_shrunk; 1303 return nr_shrunk;
1291} 1304}
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index efd5f970b501..691b52613ce4 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -29,25 +29,28 @@
29/* 29/*
30 * These flags live in the high bits of extent_status.es_pblk 30 * These flags live in the high bits of extent_status.es_pblk
31 */ 31 */
32#define ES_SHIFT 60 32enum {
33 33 ES_WRITTEN_B,
34#define EXTENT_STATUS_WRITTEN (1 << 3) 34 ES_UNWRITTEN_B,
35#define EXTENT_STATUS_UNWRITTEN (1 << 2) 35 ES_DELAYED_B,
36#define EXTENT_STATUS_DELAYED (1 << 1) 36 ES_HOLE_B,
37#define EXTENT_STATUS_HOLE (1 << 0) 37 ES_REFERENCED_B,
38 ES_FLAGS
39};
38 40
39#define EXTENT_STATUS_FLAGS (EXTENT_STATUS_WRITTEN | \ 41#define ES_SHIFT (sizeof(ext4_fsblk_t)*8 - ES_FLAGS)
40 EXTENT_STATUS_UNWRITTEN | \ 42#define ES_MASK (~((ext4_fsblk_t)0) << ES_SHIFT)
41 EXTENT_STATUS_DELAYED | \
42 EXTENT_STATUS_HOLE)
43 43
44#define ES_WRITTEN (1ULL << 63) 44#define EXTENT_STATUS_WRITTEN (1 << ES_WRITTEN_B)
45#define ES_UNWRITTEN (1ULL << 62) 45#define EXTENT_STATUS_UNWRITTEN (1 << ES_UNWRITTEN_B)
46#define ES_DELAYED (1ULL << 61) 46#define EXTENT_STATUS_DELAYED (1 << ES_DELAYED_B)
47#define ES_HOLE (1ULL << 60) 47#define EXTENT_STATUS_HOLE (1 << ES_HOLE_B)
48#define EXTENT_STATUS_REFERENCED (1 << ES_REFERENCED_B)
48 49
49#define ES_MASK (ES_WRITTEN | ES_UNWRITTEN | \ 50#define ES_TYPE_MASK ((ext4_fsblk_t)(EXTENT_STATUS_WRITTEN | \
50 ES_DELAYED | ES_HOLE) 51 EXTENT_STATUS_UNWRITTEN | \
52 EXTENT_STATUS_DELAYED | \
53 EXTENT_STATUS_HOLE) << ES_SHIFT)
51 54
52struct ext4_sb_info; 55struct ext4_sb_info;
53struct ext4_extent; 56struct ext4_extent;
@@ -65,14 +68,13 @@ struct ext4_es_tree {
65}; 68};
66 69
67struct ext4_es_stats { 70struct ext4_es_stats {
68 unsigned long es_stats_last_sorted;
69 unsigned long es_stats_shrunk; 71 unsigned long es_stats_shrunk;
70 unsigned long es_stats_cache_hits; 72 unsigned long es_stats_cache_hits;
71 unsigned long es_stats_cache_misses; 73 unsigned long es_stats_cache_misses;
72 u64 es_stats_scan_time; 74 u64 es_stats_scan_time;
73 u64 es_stats_max_scan_time; 75 u64 es_stats_max_scan_time;
74 struct percpu_counter es_stats_all_cnt; 76 struct percpu_counter es_stats_all_cnt;
75 struct percpu_counter es_stats_lru_cnt; 77 struct percpu_counter es_stats_shk_cnt;
76}; 78};
77 79
78extern int __init ext4_init_es(void); 80extern int __init ext4_init_es(void);
@@ -93,29 +95,49 @@ extern void ext4_es_find_delayed_extent_range(struct inode *inode,
93extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk, 95extern int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
94 struct extent_status *es); 96 struct extent_status *es);
95 97
98static inline unsigned int ext4_es_status(struct extent_status *es)
99{
100 return es->es_pblk >> ES_SHIFT;
101}
102
103static inline unsigned int ext4_es_type(struct extent_status *es)
104{
105 return (es->es_pblk & ES_TYPE_MASK) >> ES_SHIFT;
106}
107
96static inline int ext4_es_is_written(struct extent_status *es) 108static inline int ext4_es_is_written(struct extent_status *es)
97{ 109{
98 return (es->es_pblk & ES_WRITTEN) != 0; 110 return (ext4_es_type(es) & EXTENT_STATUS_WRITTEN) != 0;
99} 111}
100 112
101static inline int ext4_es_is_unwritten(struct extent_status *es) 113static inline int ext4_es_is_unwritten(struct extent_status *es)
102{ 114{
103 return (es->es_pblk & ES_UNWRITTEN) != 0; 115 return (ext4_es_type(es) & EXTENT_STATUS_UNWRITTEN) != 0;
104} 116}
105 117
106static inline int ext4_es_is_delayed(struct extent_status *es) 118static inline int ext4_es_is_delayed(struct extent_status *es)
107{ 119{
108 return (es->es_pblk & ES_DELAYED) != 0; 120 return (ext4_es_type(es) & EXTENT_STATUS_DELAYED) != 0;
109} 121}
110 122
111static inline int ext4_es_is_hole(struct extent_status *es) 123static inline int ext4_es_is_hole(struct extent_status *es)
112{ 124{
113 return (es->es_pblk & ES_HOLE) != 0; 125 return (ext4_es_type(es) & EXTENT_STATUS_HOLE) != 0;
114} 126}
115 127
116static inline unsigned int ext4_es_status(struct extent_status *es) 128static inline void ext4_es_set_referenced(struct extent_status *es)
117{ 129{
118 return es->es_pblk >> ES_SHIFT; 130 es->es_pblk |= ((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT;
131}
132
133static inline void ext4_es_clear_referenced(struct extent_status *es)
134{
135 es->es_pblk &= ~(((ext4_fsblk_t)EXTENT_STATUS_REFERENCED) << ES_SHIFT);
136}
137
138static inline int ext4_es_is_referenced(struct extent_status *es)
139{
140 return (ext4_es_status(es) & EXTENT_STATUS_REFERENCED) != 0;
119} 141}
120 142
121static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es) 143static inline ext4_fsblk_t ext4_es_pblock(struct extent_status *es)
@@ -135,23 +157,19 @@ static inline void ext4_es_store_pblock(struct extent_status *es,
135static inline void ext4_es_store_status(struct extent_status *es, 157static inline void ext4_es_store_status(struct extent_status *es,
136 unsigned int status) 158 unsigned int status)
137{ 159{
138 es->es_pblk = (((ext4_fsblk_t) 160 es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
139 (status & EXTENT_STATUS_FLAGS) << ES_SHIFT) | 161 (es->es_pblk & ~ES_MASK);
140 (es->es_pblk & ~ES_MASK));
141} 162}
142 163
143static inline void ext4_es_store_pblock_status(struct extent_status *es, 164static inline void ext4_es_store_pblock_status(struct extent_status *es,
144 ext4_fsblk_t pb, 165 ext4_fsblk_t pb,
145 unsigned int status) 166 unsigned int status)
146{ 167{
147 es->es_pblk = (((ext4_fsblk_t) 168 es->es_pblk = (((ext4_fsblk_t)status << ES_SHIFT) & ES_MASK) |
148 (status & EXTENT_STATUS_FLAGS) << ES_SHIFT) | 169 (pb & ~ES_MASK);
149 (pb & ~ES_MASK));
150} 170}
151 171
152extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi); 172extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
153extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi); 173extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
154extern void ext4_es_lru_add(struct inode *inode);
155extern void ext4_es_lru_del(struct inode *inode);
156 174
157#endif /* _EXT4_EXTENTS_STATUS_H */ 175#endif /* _EXT4_EXTENTS_STATUS_H */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 8131be8c0af3..513c12cf444c 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -273,24 +273,19 @@ static int ext4_file_open(struct inode * inode, struct file * filp)
273 * we determine this extent as a data or a hole according to whether the 273 * we determine this extent as a data or a hole according to whether the
274 * page cache has data or not. 274 * page cache has data or not.
275 */ 275 */
276static int ext4_find_unwritten_pgoff(struct inode *inode, 276static int ext4_find_unwritten_pgoff(struct inode *inode, int whence,
277 int whence, 277 loff_t endoff, loff_t *offset)
278 struct ext4_map_blocks *map,
279 loff_t *offset)
280{ 278{
281 struct pagevec pvec; 279 struct pagevec pvec;
282 unsigned int blkbits;
283 pgoff_t index; 280 pgoff_t index;
284 pgoff_t end; 281 pgoff_t end;
285 loff_t endoff;
286 loff_t startoff; 282 loff_t startoff;
287 loff_t lastoff; 283 loff_t lastoff;
288 int found = 0; 284 int found = 0;
289 285
290 blkbits = inode->i_sb->s_blocksize_bits;
291 startoff = *offset; 286 startoff = *offset;
292 lastoff = startoff; 287 lastoff = startoff;
293 endoff = (loff_t)(map->m_lblk + map->m_len) << blkbits; 288
294 289
295 index = startoff >> PAGE_CACHE_SHIFT; 290 index = startoff >> PAGE_CACHE_SHIFT;
296 end = endoff >> PAGE_CACHE_SHIFT; 291 end = endoff >> PAGE_CACHE_SHIFT;
@@ -408,147 +403,144 @@ out:
408static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize) 403static loff_t ext4_seek_data(struct file *file, loff_t offset, loff_t maxsize)
409{ 404{
410 struct inode *inode = file->f_mapping->host; 405 struct inode *inode = file->f_mapping->host;
411 struct ext4_map_blocks map; 406 struct fiemap_extent_info fie;
412 struct extent_status es; 407 struct fiemap_extent ext[2];
413 ext4_lblk_t start, last, end; 408 loff_t next;
414 loff_t dataoff, isize; 409 int i, ret = 0;
415 int blkbits;
416 int ret = 0;
417 410
418 mutex_lock(&inode->i_mutex); 411 mutex_lock(&inode->i_mutex);
419 412 if (offset >= inode->i_size) {
420 isize = i_size_read(inode);
421 if (offset >= isize) {
422 mutex_unlock(&inode->i_mutex); 413 mutex_unlock(&inode->i_mutex);
423 return -ENXIO; 414 return -ENXIO;
424 } 415 }
425 416 fie.fi_flags = 0;
426 blkbits = inode->i_sb->s_blocksize_bits; 417 fie.fi_extents_max = 2;
427 start = offset >> blkbits; 418 fie.fi_extents_start = (struct fiemap_extent __user *) &ext;
428 last = start; 419 while (1) {
429 end = isize >> blkbits; 420 mm_segment_t old_fs = get_fs();
430 dataoff = offset; 421
431 422 fie.fi_extents_mapped = 0;
432 do { 423 memset(ext, 0, sizeof(*ext) * fie.fi_extents_max);
433 map.m_lblk = last; 424
434 map.m_len = end - last + 1; 425 set_fs(get_ds());
435 ret = ext4_map_blocks(NULL, inode, &map, 0); 426 ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
436 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) { 427 set_fs(old_fs);
437 if (last != start) 428 if (ret)
438 dataoff = (loff_t)last << blkbits;
439 break; 429 break;
440 }
441 430
442 /* 431 /* No extents found, EOF */
443 * If there is a delay extent at this offset, 432 if (!fie.fi_extents_mapped) {
444 * it will be as a data. 433 ret = -ENXIO;
445 */
446 ext4_es_find_delayed_extent_range(inode, last, last, &es);
447 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
448 if (last != start)
449 dataoff = (loff_t)last << blkbits;
450 break; 434 break;
451 } 435 }
436 for (i = 0; i < fie.fi_extents_mapped; i++) {
437 next = (loff_t)(ext[i].fe_length + ext[i].fe_logical);
452 438
453 /* 439 if (offset < (loff_t)ext[i].fe_logical)
454 * If there is a unwritten extent at this offset, 440 offset = (loff_t)ext[i].fe_logical;
455 * it will be as a data or a hole according to page 441 /*
456 * cache that has data or not. 442 * If extent is not unwritten, then it contains valid
457 */ 443 * data, mapped or delayed.
458 if (map.m_flags & EXT4_MAP_UNWRITTEN) { 444 */
459 int unwritten; 445 if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN))
460 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_DATA, 446 goto out;
461 &map, &dataoff);
462 if (unwritten)
463 break;
464 }
465 447
466 last++; 448 /*
467 dataoff = (loff_t)last << blkbits; 449 * If there is a unwritten extent at this offset,
468 } while (last <= end); 450 * it will be as a data or a hole according to page
451 * cache that has data or not.
452 */
453 if (ext4_find_unwritten_pgoff(inode, SEEK_DATA,
454 next, &offset))
455 goto out;
469 456
457 if (ext[i].fe_flags & FIEMAP_EXTENT_LAST) {
458 ret = -ENXIO;
459 goto out;
460 }
461 offset = next;
462 }
463 }
464 if (offset > inode->i_size)
465 offset = inode->i_size;
466out:
470 mutex_unlock(&inode->i_mutex); 467 mutex_unlock(&inode->i_mutex);
468 if (ret)
469 return ret;
471 470
472 if (dataoff > isize) 471 return vfs_setpos(file, offset, maxsize);
473 return -ENXIO;
474
475 return vfs_setpos(file, dataoff, maxsize);
476} 472}
477 473
478/* 474/*
479 * ext4_seek_hole() retrieves the offset for SEEK_HOLE. 475 * ext4_seek_hole() retrieves the offset for SEEK_HOLE
480 */ 476 */
481static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize) 477static loff_t ext4_seek_hole(struct file *file, loff_t offset, loff_t maxsize)
482{ 478{
483 struct inode *inode = file->f_mapping->host; 479 struct inode *inode = file->f_mapping->host;
484 struct ext4_map_blocks map; 480 struct fiemap_extent_info fie;
485 struct extent_status es; 481 struct fiemap_extent ext[2];
486 ext4_lblk_t start, last, end; 482 loff_t next;
487 loff_t holeoff, isize; 483 int i, ret = 0;
488 int blkbits;
489 int ret = 0;
490 484
491 mutex_lock(&inode->i_mutex); 485 mutex_lock(&inode->i_mutex);
492 486 if (offset >= inode->i_size) {
493 isize = i_size_read(inode);
494 if (offset >= isize) {
495 mutex_unlock(&inode->i_mutex); 487 mutex_unlock(&inode->i_mutex);
496 return -ENXIO; 488 return -ENXIO;
497 } 489 }
498 490
499 blkbits = inode->i_sb->s_blocksize_bits; 491 fie.fi_flags = 0;
500 start = offset >> blkbits; 492 fie.fi_extents_max = 2;
501 last = start; 493 fie.fi_extents_start = (struct fiemap_extent __user *)&ext;
502 end = isize >> blkbits; 494 while (1) {
503 holeoff = offset; 495 mm_segment_t old_fs = get_fs();
504 496
505 do { 497 fie.fi_extents_mapped = 0;
506 map.m_lblk = last; 498 memset(ext, 0, sizeof(*ext));
507 map.m_len = end - last + 1;
508 ret = ext4_map_blocks(NULL, inode, &map, 0);
509 if (ret > 0 && !(map.m_flags & EXT4_MAP_UNWRITTEN)) {
510 last += ret;
511 holeoff = (loff_t)last << blkbits;
512 continue;
513 }
514 499
515 /* 500 set_fs(get_ds());
516 * If there is a delay extent at this offset, 501 ret = ext4_fiemap(inode, &fie, offset, maxsize - offset);
517 * we will skip this extent. 502 set_fs(old_fs);
518 */ 503 if (ret)
519 ext4_es_find_delayed_extent_range(inode, last, last, &es); 504 break;
520 if (es.es_len != 0 && in_range(last, es.es_lblk, es.es_len)) {
521 last = es.es_lblk + es.es_len;
522 holeoff = (loff_t)last << blkbits;
523 continue;
524 }
525 505
526 /* 506 /* No extents found */
527 * If there is a unwritten extent at this offset, 507 if (!fie.fi_extents_mapped)
528 * it will be as a data or a hole according to page 508 break;
529 * cache that has data or not. 509
530 */ 510 for (i = 0; i < fie.fi_extents_mapped; i++) {
531 if (map.m_flags & EXT4_MAP_UNWRITTEN) { 511 next = (loff_t)(ext[i].fe_logical + ext[i].fe_length);
532 int unwritten; 512 /*
533 unwritten = ext4_find_unwritten_pgoff(inode, SEEK_HOLE, 513 * If extent is not unwritten, then it contains valid
534 &map, &holeoff); 514 * data, mapped or delayed.
535 if (!unwritten) { 515 */
536 last += ret; 516 if (!(ext[i].fe_flags & FIEMAP_EXTENT_UNWRITTEN)) {
537 holeoff = (loff_t)last << blkbits; 517 if (offset < (loff_t)ext[i].fe_logical)
518 goto out;
519 offset = next;
538 continue; 520 continue;
539 } 521 }
540 } 522 /*
541 523 * If there is a unwritten extent at this offset,
542 /* find a hole */ 524 * it will be as a data or a hole according to page
543 break; 525 * cache that has data or not.
544 } while (last <= end); 526 */
527 if (ext4_find_unwritten_pgoff(inode, SEEK_HOLE,
528 next, &offset))
529 goto out;
545 530
531 offset = next;
532 if (ext[i].fe_flags & FIEMAP_EXTENT_LAST)
533 goto out;
534 }
535 }
536 if (offset > inode->i_size)
537 offset = inode->i_size;
538out:
546 mutex_unlock(&inode->i_mutex); 539 mutex_unlock(&inode->i_mutex);
540 if (ret)
541 return ret;
547 542
548 if (holeoff > isize) 543 return vfs_setpos(file, offset, maxsize);
549 holeoff = isize;
550
551 return vfs_setpos(file, holeoff, maxsize);
552} 544}
553 545
554/* 546/*
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 3ea62695abce..4b143febf21f 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -811,8 +811,11 @@ static int ext4_da_convert_inline_data_to_extent(struct address_space *mapping,
811 ret = __block_write_begin(page, 0, inline_size, 811 ret = __block_write_begin(page, 0, inline_size,
812 ext4_da_get_block_prep); 812 ext4_da_get_block_prep);
813 if (ret) { 813 if (ret) {
814 up_read(&EXT4_I(inode)->xattr_sem);
815 unlock_page(page);
816 page_cache_release(page);
814 ext4_truncate_failed_write(inode); 817 ext4_truncate_failed_write(inode);
815 goto out; 818 return ret;
816 } 819 }
817 820
818 SetPageDirty(page); 821 SetPageDirty(page);
@@ -870,6 +873,12 @@ retry_journal:
870 goto out_journal; 873 goto out_journal;
871 } 874 }
872 875
876 /*
877 * We cannot recurse into the filesystem as the transaction
878 * is already started.
879 */
880 flags |= AOP_FLAG_NOFS;
881
873 if (ret == -ENOSPC) { 882 if (ret == -ENOSPC) {
874 ret = ext4_da_convert_inline_data_to_extent(mapping, 883 ret = ext4_da_convert_inline_data_to_extent(mapping,
875 inode, 884 inode,
@@ -882,11 +891,6 @@ retry_journal:
882 goto out; 891 goto out;
883 } 892 }
884 893
885 /*
886 * We cannot recurse into the filesystem as the transaction
887 * is already started.
888 */
889 flags |= AOP_FLAG_NOFS;
890 894
891 page = grab_cache_page_write_begin(mapping, 0, flags); 895 page = grab_cache_page_write_begin(mapping, 0, flags);
892 if (!page) { 896 if (!page) {
@@ -1807,11 +1811,12 @@ int ext4_destroy_inline_data(handle_t *handle, struct inode *inode)
1807 1811
1808int ext4_inline_data_fiemap(struct inode *inode, 1812int ext4_inline_data_fiemap(struct inode *inode,
1809 struct fiemap_extent_info *fieinfo, 1813 struct fiemap_extent_info *fieinfo,
1810 int *has_inline) 1814 int *has_inline, __u64 start, __u64 len)
1811{ 1815{
1812 __u64 physical = 0; 1816 __u64 physical = 0;
1813 __u64 length; 1817 __u64 inline_len;
1814 __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_LAST; 1818 __u32 flags = FIEMAP_EXTENT_DATA_INLINE | FIEMAP_EXTENT_NOT_ALIGNED |
1819 FIEMAP_EXTENT_LAST;
1815 int error = 0; 1820 int error = 0;
1816 struct ext4_iloc iloc; 1821 struct ext4_iloc iloc;
1817 1822
@@ -1820,6 +1825,13 @@ int ext4_inline_data_fiemap(struct inode *inode,
1820 *has_inline = 0; 1825 *has_inline = 0;
1821 goto out; 1826 goto out;
1822 } 1827 }
1828 inline_len = min_t(size_t, ext4_get_inline_size(inode),
1829 i_size_read(inode));
1830 if (start >= inline_len)
1831 goto out;
1832 if (start + len < inline_len)
1833 inline_len = start + len;
1834 inline_len -= start;
1823 1835
1824 error = ext4_get_inode_loc(inode, &iloc); 1836 error = ext4_get_inode_loc(inode, &iloc);
1825 if (error) 1837 if (error)
@@ -1828,11 +1840,10 @@ int ext4_inline_data_fiemap(struct inode *inode,
1828 physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits; 1840 physical = (__u64)iloc.bh->b_blocknr << inode->i_sb->s_blocksize_bits;
1829 physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data; 1841 physical += (char *)ext4_raw_inode(&iloc) - iloc.bh->b_data;
1830 physical += offsetof(struct ext4_inode, i_block); 1842 physical += offsetof(struct ext4_inode, i_block);
1831 length = i_size_read(inode);
1832 1843
1833 if (physical) 1844 if (physical)
1834 error = fiemap_fill_next_extent(fieinfo, 0, physical, 1845 error = fiemap_fill_next_extent(fieinfo, start, physical,
1835 length, flags); 1846 inline_len, flags);
1836 brelse(iloc.bh); 1847 brelse(iloc.bh);
1837out: 1848out:
1838 up_read(&EXT4_I(inode)->xattr_sem); 1849 up_read(&EXT4_I(inode)->xattr_sem);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3356ab5395f4..5653fa42930b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -416,11 +416,6 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
416 } 416 }
417 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK)) 417 if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
418 up_read((&EXT4_I(inode)->i_data_sem)); 418 up_read((&EXT4_I(inode)->i_data_sem));
419 /*
420 * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
421 * because it shouldn't be marked in es_map->m_flags.
422 */
423 map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
424 419
425 /* 420 /*
426 * We don't check m_len because extent will be collpased in status 421 * We don't check m_len because extent will be collpased in status
@@ -491,7 +486,6 @@ int ext4_map_blocks(handle_t *handle, struct inode *inode,
491 486
492 /* Lookup extent status tree firstly */ 487 /* Lookup extent status tree firstly */
493 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) { 488 if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
494 ext4_es_lru_add(inode);
495 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) { 489 if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
496 map->m_pblk = ext4_es_pblock(&es) + 490 map->m_pblk = ext4_es_pblock(&es) +
497 map->m_lblk - es.es_lblk; 491 map->m_lblk - es.es_lblk;
@@ -1393,7 +1387,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1393 1387
1394 /* Lookup extent status tree firstly */ 1388 /* Lookup extent status tree firstly */
1395 if (ext4_es_lookup_extent(inode, iblock, &es)) { 1389 if (ext4_es_lookup_extent(inode, iblock, &es)) {
1396 ext4_es_lru_add(inode);
1397 if (ext4_es_is_hole(&es)) { 1390 if (ext4_es_is_hole(&es)) {
1398 retval = 0; 1391 retval = 0;
1399 down_read(&EXT4_I(inode)->i_data_sem); 1392 down_read(&EXT4_I(inode)->i_data_sem);
@@ -1434,24 +1427,12 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1434 * file system block. 1427 * file system block.
1435 */ 1428 */
1436 down_read(&EXT4_I(inode)->i_data_sem); 1429 down_read(&EXT4_I(inode)->i_data_sem);
1437 if (ext4_has_inline_data(inode)) { 1430 if (ext4_has_inline_data(inode))
1438 /*
1439 * We will soon create blocks for this page, and let
1440 * us pretend as if the blocks aren't allocated yet.
1441 * In case of clusters, we have to handle the work
1442 * of mapping from cluster so that the reserved space
1443 * is calculated properly.
1444 */
1445 if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
1446 ext4_find_delalloc_cluster(inode, map->m_lblk))
1447 map->m_flags |= EXT4_MAP_FROM_CLUSTER;
1448 retval = 0; 1431 retval = 0;
1449 } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) 1432 else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1450 retval = ext4_ext_map_blocks(NULL, inode, map, 1433 retval = ext4_ext_map_blocks(NULL, inode, map, 0);
1451 EXT4_GET_BLOCKS_NO_PUT_HOLE);
1452 else 1434 else
1453 retval = ext4_ind_map_blocks(NULL, inode, map, 1435 retval = ext4_ind_map_blocks(NULL, inode, map, 0);
1454 EXT4_GET_BLOCKS_NO_PUT_HOLE);
1455 1436
1456add_delayed: 1437add_delayed:
1457 if (retval == 0) { 1438 if (retval == 0) {
@@ -1465,7 +1446,8 @@ add_delayed:
1465 * then we don't need to reserve it again. However we still need 1446 * then we don't need to reserve it again. However we still need
1466 * to reserve metadata for every block we're going to write. 1447 * to reserve metadata for every block we're going to write.
1467 */ 1448 */
1468 if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) { 1449 if (EXT4_SB(inode->i_sb)->s_cluster_ratio <= 1 ||
1450 !ext4_find_delalloc_cluster(inode, map->m_lblk)) {
1469 ret = ext4_da_reserve_space(inode, iblock); 1451 ret = ext4_da_reserve_space(inode, iblock);
1470 if (ret) { 1452 if (ret) {
1471 /* not enough space to reserve */ 1453 /* not enough space to reserve */
@@ -1481,11 +1463,6 @@ add_delayed:
1481 goto out_unlock; 1463 goto out_unlock;
1482 } 1464 }
1483 1465
1484 /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1485 * and it should not appear on the bh->b_state.
1486 */
1487 map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1488
1489 map_bh(bh, inode->i_sb, invalid_block); 1466 map_bh(bh, inode->i_sb, invalid_block);
1490 set_buffer_new(bh); 1467 set_buffer_new(bh);
1491 set_buffer_delay(bh); 1468 set_buffer_delay(bh);
@@ -3643,7 +3620,7 @@ out_stop:
3643 * If this was a simple ftruncate() and the file will remain alive, 3620 * If this was a simple ftruncate() and the file will remain alive,
3644 * then we need to clear up the orphan record which we created above. 3621 * then we need to clear up the orphan record which we created above.
3645 * However, if this was a real unlink then we were called by 3622 * However, if this was a real unlink then we were called by
3646 * ext4_delete_inode(), and we allow that function to clean up the 3623 * ext4_evict_inode(), and we allow that function to clean up the
3647 * orphan info for us. 3624 * orphan info for us.
3648 */ 3625 */
3649 if (inode->i_nlink) 3626 if (inode->i_nlink)
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index bfda18a15592..f58a0d106726 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -78,8 +78,6 @@ static void swap_inode_data(struct inode *inode1, struct inode *inode2)
78 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize)); 78 memswap(&ei1->i_disksize, &ei2->i_disksize, sizeof(ei1->i_disksize));
79 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS); 79 ext4_es_remove_extent(inode1, 0, EXT_MAX_BLOCKS);
80 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS); 80 ext4_es_remove_extent(inode2, 0, EXT_MAX_BLOCKS);
81 ext4_es_lru_del(inode1);
82 ext4_es_lru_del(inode2);
83 81
84 isize = i_size_read(inode1); 82 isize = i_size_read(inode1);
85 i_size_write(inode1, i_size_read(inode2)); 83 i_size_write(inode1, i_size_read(inode2));
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index dbfe15c2533c..8d1e60214ef0 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2358,7 +2358,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
2358 if (sbi->s_group_info) { 2358 if (sbi->s_group_info) {
2359 memcpy(new_groupinfo, sbi->s_group_info, 2359 memcpy(new_groupinfo, sbi->s_group_info,
2360 sbi->s_group_info_size * sizeof(*sbi->s_group_info)); 2360 sbi->s_group_info_size * sizeof(*sbi->s_group_info));
2361 ext4_kvfree(sbi->s_group_info); 2361 kvfree(sbi->s_group_info);
2362 } 2362 }
2363 sbi->s_group_info = new_groupinfo; 2363 sbi->s_group_info = new_groupinfo;
2364 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info); 2364 sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
@@ -2385,7 +2385,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2385 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) { 2385 if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
2386 metalen = sizeof(*meta_group_info) << 2386 metalen = sizeof(*meta_group_info) <<
2387 EXT4_DESC_PER_BLOCK_BITS(sb); 2387 EXT4_DESC_PER_BLOCK_BITS(sb);
2388 meta_group_info = kmalloc(metalen, GFP_KERNEL); 2388 meta_group_info = kmalloc(metalen, GFP_NOFS);
2389 if (meta_group_info == NULL) { 2389 if (meta_group_info == NULL) {
2390 ext4_msg(sb, KERN_ERR, "can't allocate mem " 2390 ext4_msg(sb, KERN_ERR, "can't allocate mem "
2391 "for a buddy group"); 2391 "for a buddy group");
@@ -2399,7 +2399,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2399 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]; 2399 sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
2400 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1); 2400 i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
2401 2401
2402 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_KERNEL); 2402 meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
2403 if (meta_group_info[i] == NULL) { 2403 if (meta_group_info[i] == NULL) {
2404 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem"); 2404 ext4_msg(sb, KERN_ERR, "can't allocate buddy mem");
2405 goto exit_group_info; 2405 goto exit_group_info;
@@ -2428,7 +2428,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
2428 { 2428 {
2429 struct buffer_head *bh; 2429 struct buffer_head *bh;
2430 meta_group_info[i]->bb_bitmap = 2430 meta_group_info[i]->bb_bitmap =
2431 kmalloc(sb->s_blocksize, GFP_KERNEL); 2431 kmalloc(sb->s_blocksize, GFP_NOFS);
2432 BUG_ON(meta_group_info[i]->bb_bitmap == NULL); 2432 BUG_ON(meta_group_info[i]->bb_bitmap == NULL);
2433 bh = ext4_read_block_bitmap(sb, group); 2433 bh = ext4_read_block_bitmap(sb, group);
2434 BUG_ON(bh == NULL); 2434 BUG_ON(bh == NULL);
@@ -2495,7 +2495,7 @@ err_freebuddy:
2495 kfree(sbi->s_group_info[i]); 2495 kfree(sbi->s_group_info[i]);
2496 iput(sbi->s_buddy_cache); 2496 iput(sbi->s_buddy_cache);
2497err_freesgi: 2497err_freesgi:
2498 ext4_kvfree(sbi->s_group_info); 2498 kvfree(sbi->s_group_info);
2499 return -ENOMEM; 2499 return -ENOMEM;
2500} 2500}
2501 2501
@@ -2708,12 +2708,11 @@ int ext4_mb_release(struct super_block *sb)
2708 EXT4_DESC_PER_BLOCK_BITS(sb); 2708 EXT4_DESC_PER_BLOCK_BITS(sb);
2709 for (i = 0; i < num_meta_group_infos; i++) 2709 for (i = 0; i < num_meta_group_infos; i++)
2710 kfree(sbi->s_group_info[i]); 2710 kfree(sbi->s_group_info[i]);
2711 ext4_kvfree(sbi->s_group_info); 2711 kvfree(sbi->s_group_info);
2712 } 2712 }
2713 kfree(sbi->s_mb_offsets); 2713 kfree(sbi->s_mb_offsets);
2714 kfree(sbi->s_mb_maxs); 2714 kfree(sbi->s_mb_maxs);
2715 if (sbi->s_buddy_cache) 2715 iput(sbi->s_buddy_cache);
2716 iput(sbi->s_buddy_cache);
2717 if (sbi->s_mb_stats) { 2716 if (sbi->s_mb_stats) {
2718 ext4_msg(sb, KERN_INFO, 2717 ext4_msg(sb, KERN_INFO,
2719 "mballoc: %u blocks %u reqs (%u success)", 2718 "mballoc: %u blocks %u reqs (%u success)",
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index a432634f2e6a..3cb267aee802 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -592,7 +592,7 @@ err_out:
592 592
593 /* 593 /*
594 * set the i_blocks count to zero 594 * set the i_blocks count to zero
595 * so that the ext4_delete_inode does the 595 * so that the ext4_evict_inode() does the
596 * right job 596 * right job
597 * 597 *
598 * We don't need to take the i_lock because 598 * We don't need to take the i_lock because
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 9f2311bc9c4f..503ea15dc5db 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -273,6 +273,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
273 int replaced_count = 0; 273 int replaced_count = 0;
274 int from = data_offset_in_page << orig_inode->i_blkbits; 274 int from = data_offset_in_page << orig_inode->i_blkbits;
275 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 275 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
276 struct super_block *sb = orig_inode->i_sb;
276 277
277 /* 278 /*
278 * It needs twice the amount of ordinary journal buffers because 279 * It needs twice the amount of ordinary journal buffers because
@@ -405,10 +406,13 @@ unlock_pages:
405 page_cache_release(pagep[1]); 406 page_cache_release(pagep[1]);
406stop_journal: 407stop_journal:
407 ext4_journal_stop(handle); 408 ext4_journal_stop(handle);
409 if (*err == -ENOSPC &&
410 ext4_should_retry_alloc(sb, &retries))
411 goto again;
408 /* Buffer was busy because probably is pinned to journal transaction, 412 /* Buffer was busy because probably is pinned to journal transaction,
409 * force transaction commit may help to free it. */ 413 * force transaction commit may help to free it. */
410 if (*err == -EBUSY && ext4_should_retry_alloc(orig_inode->i_sb, 414 if (*err == -EBUSY && retries++ < 4 && EXT4_SB(sb)->s_journal &&
411 &retries)) 415 jbd2_journal_force_commit_nested(EXT4_SB(sb)->s_journal))
412 goto again; 416 goto again;
413 return replaced_count; 417 return replaced_count;
414 418
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 426211882f72..2291923dae4e 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -2814,7 +2814,6 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry)
2814 ext4_orphan_add(handle, inode); 2814 ext4_orphan_add(handle, inode);
2815 inode->i_ctime = ext4_current_time(inode); 2815 inode->i_ctime = ext4_current_time(inode);
2816 ext4_mark_inode_dirty(handle, inode); 2816 ext4_mark_inode_dirty(handle, inode);
2817 retval = 0;
2818 2817
2819end_unlink: 2818end_unlink:
2820 brelse(bh); 2819 brelse(bh);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ca4588388fc3..bf76f405a5f9 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -856,7 +856,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
856 n_group_desc[gdb_num] = gdb_bh; 856 n_group_desc[gdb_num] = gdb_bh;
857 EXT4_SB(sb)->s_group_desc = n_group_desc; 857 EXT4_SB(sb)->s_group_desc = n_group_desc;
858 EXT4_SB(sb)->s_gdb_count++; 858 EXT4_SB(sb)->s_gdb_count++;
859 ext4_kvfree(o_group_desc); 859 kvfree(o_group_desc);
860 860
861 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 861 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
862 err = ext4_handle_dirty_super(handle, sb); 862 err = ext4_handle_dirty_super(handle, sb);
@@ -866,7 +866,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
866 return err; 866 return err;
867 867
868exit_inode: 868exit_inode:
869 ext4_kvfree(n_group_desc); 869 kvfree(n_group_desc);
870 brelse(iloc.bh); 870 brelse(iloc.bh);
871exit_dind: 871exit_dind:
872 brelse(dind); 872 brelse(dind);
@@ -909,7 +909,7 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
909 n_group_desc[gdb_num] = gdb_bh; 909 n_group_desc[gdb_num] = gdb_bh;
910 EXT4_SB(sb)->s_group_desc = n_group_desc; 910 EXT4_SB(sb)->s_group_desc = n_group_desc;
911 EXT4_SB(sb)->s_gdb_count++; 911 EXT4_SB(sb)->s_gdb_count++;
912 ext4_kvfree(o_group_desc); 912 kvfree(o_group_desc);
913 BUFFER_TRACE(gdb_bh, "get_write_access"); 913 BUFFER_TRACE(gdb_bh, "get_write_access");
914 err = ext4_journal_get_write_access(handle, gdb_bh); 914 err = ext4_journal_get_write_access(handle, gdb_bh);
915 if (unlikely(err)) 915 if (unlikely(err))
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 63e802b8ec68..43c92b1685cb 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -176,15 +176,6 @@ void *ext4_kvzalloc(size_t size, gfp_t flags)
176 return ret; 176 return ret;
177} 177}
178 178
179void ext4_kvfree(void *ptr)
180{
181 if (is_vmalloc_addr(ptr))
182 vfree(ptr);
183 else
184 kfree(ptr);
185
186}
187
188ext4_fsblk_t ext4_block_bitmap(struct super_block *sb, 179ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
189 struct ext4_group_desc *bg) 180 struct ext4_group_desc *bg)
190{ 181{
@@ -811,8 +802,8 @@ static void ext4_put_super(struct super_block *sb)
811 802
812 for (i = 0; i < sbi->s_gdb_count; i++) 803 for (i = 0; i < sbi->s_gdb_count; i++)
813 brelse(sbi->s_group_desc[i]); 804 brelse(sbi->s_group_desc[i]);
814 ext4_kvfree(sbi->s_group_desc); 805 kvfree(sbi->s_group_desc);
815 ext4_kvfree(sbi->s_flex_groups); 806 kvfree(sbi->s_flex_groups);
816 percpu_counter_destroy(&sbi->s_freeclusters_counter); 807 percpu_counter_destroy(&sbi->s_freeclusters_counter);
817 percpu_counter_destroy(&sbi->s_freeinodes_counter); 808 percpu_counter_destroy(&sbi->s_freeinodes_counter);
818 percpu_counter_destroy(&sbi->s_dirs_counter); 809 percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -880,10 +871,10 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
880 spin_lock_init(&ei->i_prealloc_lock); 871 spin_lock_init(&ei->i_prealloc_lock);
881 ext4_es_init_tree(&ei->i_es_tree); 872 ext4_es_init_tree(&ei->i_es_tree);
882 rwlock_init(&ei->i_es_lock); 873 rwlock_init(&ei->i_es_lock);
883 INIT_LIST_HEAD(&ei->i_es_lru); 874 INIT_LIST_HEAD(&ei->i_es_list);
884 ei->i_es_all_nr = 0; 875 ei->i_es_all_nr = 0;
885 ei->i_es_lru_nr = 0; 876 ei->i_es_shk_nr = 0;
886 ei->i_touch_when = 0; 877 ei->i_es_shrink_lblk = 0;
887 ei->i_reserved_data_blocks = 0; 878 ei->i_reserved_data_blocks = 0;
888 ei->i_reserved_meta_blocks = 0; 879 ei->i_reserved_meta_blocks = 0;
889 ei->i_allocated_meta_blocks = 0; 880 ei->i_allocated_meta_blocks = 0;
@@ -973,7 +964,6 @@ void ext4_clear_inode(struct inode *inode)
973 dquot_drop(inode); 964 dquot_drop(inode);
974 ext4_discard_preallocations(inode); 965 ext4_discard_preallocations(inode);
975 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS); 966 ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
976 ext4_es_lru_del(inode);
977 if (EXT4_I(inode)->jinode) { 967 if (EXT4_I(inode)->jinode) {
978 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode), 968 jbd2_journal_release_jbd_inode(EXT4_JOURNAL(inode),
979 EXT4_I(inode)->jinode); 969 EXT4_I(inode)->jinode);
@@ -1153,7 +1143,7 @@ enum {
1153 Opt_inode_readahead_blks, Opt_journal_ioprio, 1143 Opt_inode_readahead_blks, Opt_journal_ioprio,
1154 Opt_dioread_nolock, Opt_dioread_lock, 1144 Opt_dioread_nolock, Opt_dioread_lock,
1155 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable, 1145 Opt_discard, Opt_nodiscard, Opt_init_itable, Opt_noinit_itable,
1156 Opt_max_dir_size_kb, 1146 Opt_max_dir_size_kb, Opt_nojournal_checksum,
1157}; 1147};
1158 1148
1159static const match_table_t tokens = { 1149static const match_table_t tokens = {
@@ -1187,6 +1177,7 @@ static const match_table_t tokens = {
1187 {Opt_journal_dev, "journal_dev=%u"}, 1177 {Opt_journal_dev, "journal_dev=%u"},
1188 {Opt_journal_path, "journal_path=%s"}, 1178 {Opt_journal_path, "journal_path=%s"},
1189 {Opt_journal_checksum, "journal_checksum"}, 1179 {Opt_journal_checksum, "journal_checksum"},
1180 {Opt_nojournal_checksum, "nojournal_checksum"},
1190 {Opt_journal_async_commit, "journal_async_commit"}, 1181 {Opt_journal_async_commit, "journal_async_commit"},
1191 {Opt_abort, "abort"}, 1182 {Opt_abort, "abort"},
1192 {Opt_data_journal, "data=journal"}, 1183 {Opt_data_journal, "data=journal"},
@@ -1368,6 +1359,8 @@ static const struct mount_opts {
1368 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT}, 1359 MOPT_EXT4_ONLY | MOPT_SET | MOPT_EXPLICIT},
1369 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC, 1360 {Opt_nodelalloc, EXT4_MOUNT_DELALLOC,
1370 MOPT_EXT4_ONLY | MOPT_CLEAR}, 1361 MOPT_EXT4_ONLY | MOPT_CLEAR},
1362 {Opt_nojournal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1363 MOPT_EXT4_ONLY | MOPT_CLEAR},
1371 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM, 1364 {Opt_journal_checksum, EXT4_MOUNT_JOURNAL_CHECKSUM,
1372 MOPT_EXT4_ONLY | MOPT_SET}, 1365 MOPT_EXT4_ONLY | MOPT_SET},
1373 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT | 1366 {Opt_journal_async_commit, (EXT4_MOUNT_JOURNAL_ASYNC_COMMIT |
@@ -1709,6 +1702,12 @@ static int parse_options(char *options, struct super_block *sb,
1709 return 0; 1702 return 0;
1710 } 1703 }
1711 } 1704 }
1705 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA &&
1706 test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
1707 ext4_msg(sb, KERN_ERR, "can't mount with journal_async_commit "
1708 "in data=ordered mode");
1709 return 0;
1710 }
1712 return 1; 1711 return 1;
1713} 1712}
1714 1713
@@ -1946,7 +1945,7 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
1946 memcpy(new_groups, sbi->s_flex_groups, 1945 memcpy(new_groups, sbi->s_flex_groups,
1947 (sbi->s_flex_groups_allocated * 1946 (sbi->s_flex_groups_allocated *
1948 sizeof(struct flex_groups))); 1947 sizeof(struct flex_groups)));
1949 ext4_kvfree(sbi->s_flex_groups); 1948 kvfree(sbi->s_flex_groups);
1950 } 1949 }
1951 sbi->s_flex_groups = new_groups; 1950 sbi->s_flex_groups = new_groups;
1952 sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups); 1951 sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
@@ -3317,7 +3316,7 @@ int ext4_calculate_overhead(struct super_block *sb)
3317 struct ext4_super_block *es = sbi->s_es; 3316 struct ext4_super_block *es = sbi->s_es;
3318 ext4_group_t i, ngroups = ext4_get_groups_count(sb); 3317 ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3319 ext4_fsblk_t overhead = 0; 3318 ext4_fsblk_t overhead = 0;
3320 char *buf = (char *) get_zeroed_page(GFP_KERNEL); 3319 char *buf = (char *) get_zeroed_page(GFP_NOFS);
3321 3320
3322 if (!buf) 3321 if (!buf)
3323 return -ENOMEM; 3322 return -ENOMEM;
@@ -3345,8 +3344,8 @@ int ext4_calculate_overhead(struct super_block *sb)
3345 memset(buf, 0, PAGE_SIZE); 3344 memset(buf, 0, PAGE_SIZE);
3346 cond_resched(); 3345 cond_resched();
3347 } 3346 }
3348 /* Add the journal blocks as well */ 3347 /* Add the internal journal blocks as well */
3349 if (sbi->s_journal) 3348 if (sbi->s_journal && !sbi->journal_bdev)
3350 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen); 3349 overhead += EXT4_NUM_B2C(sbi, sbi->s_journal->j_maxlen);
3351 3350
3352 sbi->s_overhead = overhead; 3351 sbi->s_overhead = overhead;
@@ -4232,7 +4231,7 @@ failed_mount7:
4232failed_mount6: 4231failed_mount6:
4233 ext4_mb_release(sb); 4232 ext4_mb_release(sb);
4234 if (sbi->s_flex_groups) 4233 if (sbi->s_flex_groups)
4235 ext4_kvfree(sbi->s_flex_groups); 4234 kvfree(sbi->s_flex_groups);
4236 percpu_counter_destroy(&sbi->s_freeclusters_counter); 4235 percpu_counter_destroy(&sbi->s_freeclusters_counter);
4237 percpu_counter_destroy(&sbi->s_freeinodes_counter); 4236 percpu_counter_destroy(&sbi->s_freeinodes_counter);
4238 percpu_counter_destroy(&sbi->s_dirs_counter); 4237 percpu_counter_destroy(&sbi->s_dirs_counter);
@@ -4261,7 +4260,7 @@ failed_mount3:
4261failed_mount2: 4260failed_mount2:
4262 for (i = 0; i < db_count; i++) 4261 for (i = 0; i < db_count; i++)
4263 brelse(sbi->s_group_desc[i]); 4262 brelse(sbi->s_group_desc[i]);
4264 ext4_kvfree(sbi->s_group_desc); 4263 kvfree(sbi->s_group_desc);
4265failed_mount: 4264failed_mount:
4266 if (sbi->s_chksum_driver) 4265 if (sbi->s_chksum_driver)
4267 crypto_free_shash(sbi->s_chksum_driver); 4266 crypto_free_shash(sbi->s_chksum_driver);
@@ -4862,6 +4861,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4862 goto restore_opts; 4861 goto restore_opts;
4863 } 4862 }
4864 4863
4864 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
4865 test_opt(sb, JOURNAL_CHECKSUM)) {
4866 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
4867 "during remount not supported");
4868 err = -EINVAL;
4869 goto restore_opts;
4870 }
4871
4865 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 4872 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4866 if (test_opt2(sb, EXPLICIT_DELALLOC)) { 4873 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
4867 ext4_msg(sb, KERN_ERR, "can't mount with " 4874 ext4_msg(sb, KERN_ERR, "can't mount with "
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 1df94fabe4eb..b96bd8076b70 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1714,8 +1714,7 @@ int jbd2_journal_destroy(journal_t *journal)
1714 1714
1715 if (journal->j_proc_entry) 1715 if (journal->j_proc_entry)
1716 jbd2_stats_proc_exit(journal); 1716 jbd2_stats_proc_exit(journal);
1717 if (journal->j_inode) 1717 iput(journal->j_inode);
1718 iput(journal->j_inode);
1719 if (journal->j_revoke) 1718 if (journal->j_revoke)
1720 jbd2_journal_destroy_revoke(journal); 1719 jbd2_journal_destroy_revoke(journal);
1721 if (journal->j_chksum_driver) 1720 if (journal->j_chksum_driver)
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index ec881b312700..2f389ce5023c 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -61,6 +61,11 @@ module_param(mem_size, ulong, 0400);
61MODULE_PARM_DESC(mem_size, 61MODULE_PARM_DESC(mem_size,
62 "size of reserved RAM used to store oops/panic logs"); 62 "size of reserved RAM used to store oops/panic logs");
63 63
64static unsigned int mem_type;
65module_param(mem_type, uint, 0600);
66MODULE_PARM_DESC(mem_type,
67 "set to 1 to try to use unbuffered memory (default 0)");
68
64static int dump_oops = 1; 69static int dump_oops = 1;
65module_param(dump_oops, int, 0600); 70module_param(dump_oops, int, 0600);
66MODULE_PARM_DESC(dump_oops, 71MODULE_PARM_DESC(dump_oops,
@@ -79,6 +84,7 @@ struct ramoops_context {
79 struct persistent_ram_zone *fprz; 84 struct persistent_ram_zone *fprz;
80 phys_addr_t phys_addr; 85 phys_addr_t phys_addr;
81 unsigned long size; 86 unsigned long size;
87 unsigned int memtype;
82 size_t record_size; 88 size_t record_size;
83 size_t console_size; 89 size_t console_size;
84 size_t ftrace_size; 90 size_t ftrace_size;
@@ -366,7 +372,8 @@ static int ramoops_init_przs(struct device *dev, struct ramoops_context *cxt,
366 size_t sz = cxt->record_size; 372 size_t sz = cxt->record_size;
367 373
368 cxt->przs[i] = persistent_ram_new(*paddr, sz, 0, 374 cxt->przs[i] = persistent_ram_new(*paddr, sz, 0,
369 &cxt->ecc_info); 375 &cxt->ecc_info,
376 cxt->memtype);
370 if (IS_ERR(cxt->przs[i])) { 377 if (IS_ERR(cxt->przs[i])) {
371 err = PTR_ERR(cxt->przs[i]); 378 err = PTR_ERR(cxt->przs[i]);
372 dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n", 379 dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
@@ -396,7 +403,7 @@ static int ramoops_init_prz(struct device *dev, struct ramoops_context *cxt,
396 return -ENOMEM; 403 return -ENOMEM;
397 } 404 }
398 405
399 *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info); 406 *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
400 if (IS_ERR(*prz)) { 407 if (IS_ERR(*prz)) {
401 int err = PTR_ERR(*prz); 408 int err = PTR_ERR(*prz);
402 409
@@ -443,6 +450,7 @@ static int ramoops_probe(struct platform_device *pdev)
443 450
444 cxt->size = pdata->mem_size; 451 cxt->size = pdata->mem_size;
445 cxt->phys_addr = pdata->mem_address; 452 cxt->phys_addr = pdata->mem_address;
453 cxt->memtype = pdata->mem_type;
446 cxt->record_size = pdata->record_size; 454 cxt->record_size = pdata->record_size;
447 cxt->console_size = pdata->console_size; 455 cxt->console_size = pdata->console_size;
448 cxt->ftrace_size = pdata->ftrace_size; 456 cxt->ftrace_size = pdata->ftrace_size;
@@ -572,6 +580,7 @@ static void ramoops_register_dummy(void)
572 580
573 dummy_data->mem_size = mem_size; 581 dummy_data->mem_size = mem_size;
574 dummy_data->mem_address = mem_address; 582 dummy_data->mem_address = mem_address;
583 dummy_data->mem_type = 0;
575 dummy_data->record_size = record_size; 584 dummy_data->record_size = record_size;
576 dummy_data->console_size = ramoops_console_size; 585 dummy_data->console_size = ramoops_console_size;
577 dummy_data->ftrace_size = ramoops_ftrace_size; 586 dummy_data->ftrace_size = ramoops_ftrace_size;
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 9d7b9a83699e..76c3f80efdfa 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -380,7 +380,8 @@ void persistent_ram_zap(struct persistent_ram_zone *prz)
380 persistent_ram_update_header_ecc(prz); 380 persistent_ram_update_header_ecc(prz);
381} 381}
382 382
383static void *persistent_ram_vmap(phys_addr_t start, size_t size) 383static void *persistent_ram_vmap(phys_addr_t start, size_t size,
384 unsigned int memtype)
384{ 385{
385 struct page **pages; 386 struct page **pages;
386 phys_addr_t page_start; 387 phys_addr_t page_start;
@@ -392,7 +393,10 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
392 page_start = start - offset_in_page(start); 393 page_start = start - offset_in_page(start);
393 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE); 394 page_count = DIV_ROUND_UP(size + offset_in_page(start), PAGE_SIZE);
394 395
395 prot = pgprot_noncached(PAGE_KERNEL); 396 if (memtype)
397 prot = pgprot_noncached(PAGE_KERNEL);
398 else
399 prot = pgprot_writecombine(PAGE_KERNEL);
396 400
397 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL); 401 pages = kmalloc_array(page_count, sizeof(struct page *), GFP_KERNEL);
398 if (!pages) { 402 if (!pages) {
@@ -411,8 +415,11 @@ static void *persistent_ram_vmap(phys_addr_t start, size_t size)
411 return vaddr; 415 return vaddr;
412} 416}
413 417
414static void *persistent_ram_iomap(phys_addr_t start, size_t size) 418static void *persistent_ram_iomap(phys_addr_t start, size_t size,
419 unsigned int memtype)
415{ 420{
421 void *va;
422
416 if (!request_mem_region(start, size, "persistent_ram")) { 423 if (!request_mem_region(start, size, "persistent_ram")) {
417 pr_err("request mem region (0x%llx@0x%llx) failed\n", 424 pr_err("request mem region (0x%llx@0x%llx) failed\n",
418 (unsigned long long)size, (unsigned long long)start); 425 (unsigned long long)size, (unsigned long long)start);
@@ -422,19 +429,24 @@ static void *persistent_ram_iomap(phys_addr_t start, size_t size)
422 buffer_start_add = buffer_start_add_locked; 429 buffer_start_add = buffer_start_add_locked;
423 buffer_size_add = buffer_size_add_locked; 430 buffer_size_add = buffer_size_add_locked;
424 431
425 return ioremap(start, size); 432 if (memtype)
433 va = ioremap(start, size);
434 else
435 va = ioremap_wc(start, size);
436
437 return va;
426} 438}
427 439
428static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size, 440static int persistent_ram_buffer_map(phys_addr_t start, phys_addr_t size,
429 struct persistent_ram_zone *prz) 441 struct persistent_ram_zone *prz, int memtype)
430{ 442{
431 prz->paddr = start; 443 prz->paddr = start;
432 prz->size = size; 444 prz->size = size;
433 445
434 if (pfn_valid(start >> PAGE_SHIFT)) 446 if (pfn_valid(start >> PAGE_SHIFT))
435 prz->vaddr = persistent_ram_vmap(start, size); 447 prz->vaddr = persistent_ram_vmap(start, size, memtype);
436 else 448 else
437 prz->vaddr = persistent_ram_iomap(start, size); 449 prz->vaddr = persistent_ram_iomap(start, size, memtype);
438 450
439 if (!prz->vaddr) { 451 if (!prz->vaddr) {
440 pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__, 452 pr_err("%s: Failed to map 0x%llx pages at 0x%llx\n", __func__,
@@ -500,7 +512,8 @@ void persistent_ram_free(struct persistent_ram_zone *prz)
500} 512}
501 513
502struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, 514struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
503 u32 sig, struct persistent_ram_ecc_info *ecc_info) 515 u32 sig, struct persistent_ram_ecc_info *ecc_info,
516 unsigned int memtype)
504{ 517{
505 struct persistent_ram_zone *prz; 518 struct persistent_ram_zone *prz;
506 int ret = -ENOMEM; 519 int ret = -ENOMEM;
@@ -511,7 +524,7 @@ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
511 goto err; 524 goto err;
512 } 525 }
513 526
514 ret = persistent_ram_buffer_map(start, size, prz); 527 ret = persistent_ram_buffer_map(start, size, prz, memtype);
515 if (ret) 528 if (ret)
516 goto err; 529 goto err;
517 530
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index d571e173a990..9d6486d416a3 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -2772,7 +2772,7 @@ int journal_init(struct super_block *sb, const char *j_dev_name,
2772 2772
2773 if (journal_init_dev(sb, journal, j_dev_name) != 0) { 2773 if (journal_init_dev(sb, journal, j_dev_name) != 0) {
2774 reiserfs_warning(sb, "sh-462", 2774 reiserfs_warning(sb, "sh-462",
2775 "unable to initialize jornal device"); 2775 "unable to initialize journal device");
2776 goto free_and_return; 2776 goto free_and_return;
2777 } 2777 }
2778 2778
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index b5b593c45270..538519ee37d9 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -262,6 +262,7 @@ static int write_begin_slow(struct address_space *mapping,
262 if (err) { 262 if (err) {
263 unlock_page(page); 263 unlock_page(page);
264 page_cache_release(page); 264 page_cache_release(page);
265 ubifs_release_budget(c, &req);
265 return err; 266 return err;
266 } 267 }
267 } 268 }
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index fb166e204441..f6ac3f29323c 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -571,7 +571,11 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
571 571
572 aligned_dlen = ALIGN(dlen, 8); 572 aligned_dlen = ALIGN(dlen, 8);
573 aligned_ilen = ALIGN(ilen, 8); 573 aligned_ilen = ALIGN(ilen, 8);
574
574 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ; 575 len = aligned_dlen + aligned_ilen + UBIFS_INO_NODE_SZ;
576 /* Make sure to also account for extended attributes */
577 len += host_ui->data_len;
578
575 dent = kmalloc(len, GFP_NOFS); 579 dent = kmalloc(len, GFP_NOFS);
576 if (!dent) 580 if (!dent)
577 return -ENOMEM; 581 return -ENOMEM;
@@ -648,7 +652,8 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
648 652
649 ino_key_init(c, &ino_key, dir->i_ino); 653 ino_key_init(c, &ino_key, dir->i_ino);
650 ino_offs += aligned_ilen; 654 ino_offs += aligned_ilen;
651 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs, UBIFS_INO_NODE_SZ); 655 err = ubifs_tnc_add(c, &ino_key, lnum, ino_offs,
656 UBIFS_INO_NODE_SZ + host_ui->data_len);
652 if (err) 657 if (err)
653 goto out_ro; 658 goto out_ro;
654 659
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
deleted file mode 100644
index 6e247a99f5db..000000000000
--- a/fs/xfs/libxfs/xfs_ag.h
+++ /dev/null
@@ -1,281 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_AG_H__
19#define __XFS_AG_H__
20
21/*
22 * Allocation group header
23 * This is divided into three structures, placed in sequential 512-byte
24 * buffers after a copy of the superblock (also in a 512-byte buffer).
25 */
26
27struct xfs_buf;
28struct xfs_mount;
29struct xfs_trans;
30
31#define XFS_AGF_MAGIC 0x58414746 /* 'XAGF' */
32#define XFS_AGI_MAGIC 0x58414749 /* 'XAGI' */
33#define XFS_AGFL_MAGIC 0x5841464c /* 'XAFL' */
34#define XFS_AGF_VERSION 1
35#define XFS_AGI_VERSION 1
36
37#define XFS_AGF_GOOD_VERSION(v) ((v) == XFS_AGF_VERSION)
38#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION)
39
40/*
41 * Btree number 0 is bno, 1 is cnt. This value gives the size of the
42 * arrays below.
43 */
44#define XFS_BTNUM_AGF ((int)XFS_BTNUM_CNTi + 1)
45
46/*
47 * The second word of agf_levels in the first a.g. overlaps the EFS
48 * superblock's magic number. Since the magic numbers valid for EFS
49 * are > 64k, our value cannot be confused for an EFS superblock's.
50 */
51
52typedef struct xfs_agf {
53 /*
54 * Common allocation group header information
55 */
56 __be32 agf_magicnum; /* magic number == XFS_AGF_MAGIC */
57 __be32 agf_versionnum; /* header version == XFS_AGF_VERSION */
58 __be32 agf_seqno; /* sequence # starting from 0 */
59 __be32 agf_length; /* size in blocks of a.g. */
60 /*
61 * Freespace information
62 */
63 __be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */
64 __be32 agf_spare0; /* spare field */
65 __be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
66 __be32 agf_spare1; /* spare field */
67
68 __be32 agf_flfirst; /* first freelist block's index */
69 __be32 agf_fllast; /* last freelist block's index */
70 __be32 agf_flcount; /* count of blocks in freelist */
71 __be32 agf_freeblks; /* total free blocks */
72
73 __be32 agf_longest; /* longest free space */
74 __be32 agf_btreeblks; /* # of blocks held in AGF btrees */
75 uuid_t agf_uuid; /* uuid of filesystem */
76
77 /*
78 * reserve some contiguous space for future logged fields before we add
79 * the unlogged fields. This makes the range logging via flags and
80 * structure offsets much simpler.
81 */
82 __be64 agf_spare64[16];
83
84 /* unlogged fields, written during buffer writeback. */
85 __be64 agf_lsn; /* last write sequence */
86 __be32 agf_crc; /* crc of agf sector */
87 __be32 agf_spare2;
88
89 /* structure must be padded to 64 bit alignment */
90} xfs_agf_t;
91
92#define XFS_AGF_CRC_OFF offsetof(struct xfs_agf, agf_crc)
93
94#define XFS_AGF_MAGICNUM 0x00000001
95#define XFS_AGF_VERSIONNUM 0x00000002
96#define XFS_AGF_SEQNO 0x00000004
97#define XFS_AGF_LENGTH 0x00000008
98#define XFS_AGF_ROOTS 0x00000010
99#define XFS_AGF_LEVELS 0x00000020
100#define XFS_AGF_FLFIRST 0x00000040
101#define XFS_AGF_FLLAST 0x00000080
102#define XFS_AGF_FLCOUNT 0x00000100
103#define XFS_AGF_FREEBLKS 0x00000200
104#define XFS_AGF_LONGEST 0x00000400
105#define XFS_AGF_BTREEBLKS 0x00000800
106#define XFS_AGF_UUID 0x00001000
107#define XFS_AGF_NUM_BITS 13
108#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
109
110#define XFS_AGF_FLAGS \
111 { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
112 { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
113 { XFS_AGF_SEQNO, "SEQNO" }, \
114 { XFS_AGF_LENGTH, "LENGTH" }, \
115 { XFS_AGF_ROOTS, "ROOTS" }, \
116 { XFS_AGF_LEVELS, "LEVELS" }, \
117 { XFS_AGF_FLFIRST, "FLFIRST" }, \
118 { XFS_AGF_FLLAST, "FLLAST" }, \
119 { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
120 { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
121 { XFS_AGF_LONGEST, "LONGEST" }, \
122 { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
123 { XFS_AGF_UUID, "UUID" }
124
125/* disk block (xfs_daddr_t) in the AG */
126#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
127#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
128#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr))
129
130extern int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
131 xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
132
133/*
134 * Size of the unlinked inode hash table in the agi.
135 */
136#define XFS_AGI_UNLINKED_BUCKETS 64
137
138typedef struct xfs_agi {
139 /*
140 * Common allocation group header information
141 */
142 __be32 agi_magicnum; /* magic number == XFS_AGI_MAGIC */
143 __be32 agi_versionnum; /* header version == XFS_AGI_VERSION */
144 __be32 agi_seqno; /* sequence # starting from 0 */
145 __be32 agi_length; /* size in blocks of a.g. */
146 /*
147 * Inode information
148 * Inodes are mapped by interpreting the inode number, so no
149 * mapping data is needed here.
150 */
151 __be32 agi_count; /* count of allocated inodes */
152 __be32 agi_root; /* root of inode btree */
153 __be32 agi_level; /* levels in inode btree */
154 __be32 agi_freecount; /* number of free inodes */
155
156 __be32 agi_newino; /* new inode just allocated */
157 __be32 agi_dirino; /* last directory inode chunk */
158 /*
159 * Hash table of inodes which have been unlinked but are
160 * still being referenced.
161 */
162 __be32 agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
163 /*
164 * This marks the end of logging region 1 and start of logging region 2.
165 */
166 uuid_t agi_uuid; /* uuid of filesystem */
167 __be32 agi_crc; /* crc of agi sector */
168 __be32 agi_pad32;
169 __be64 agi_lsn; /* last write sequence */
170
171 __be32 agi_free_root; /* root of the free inode btree */
172 __be32 agi_free_level;/* levels in free inode btree */
173
174 /* structure must be padded to 64 bit alignment */
175} xfs_agi_t;
176
177#define XFS_AGI_CRC_OFF offsetof(struct xfs_agi, agi_crc)
178
179#define XFS_AGI_MAGICNUM (1 << 0)
180#define XFS_AGI_VERSIONNUM (1 << 1)
181#define XFS_AGI_SEQNO (1 << 2)
182#define XFS_AGI_LENGTH (1 << 3)
183#define XFS_AGI_COUNT (1 << 4)
184#define XFS_AGI_ROOT (1 << 5)
185#define XFS_AGI_LEVEL (1 << 6)
186#define XFS_AGI_FREECOUNT (1 << 7)
187#define XFS_AGI_NEWINO (1 << 8)
188#define XFS_AGI_DIRINO (1 << 9)
189#define XFS_AGI_UNLINKED (1 << 10)
190#define XFS_AGI_NUM_BITS_R1 11 /* end of the 1st agi logging region */
191#define XFS_AGI_ALL_BITS_R1 ((1 << XFS_AGI_NUM_BITS_R1) - 1)
192#define XFS_AGI_FREE_ROOT (1 << 11)
193#define XFS_AGI_FREE_LEVEL (1 << 12)
194#define XFS_AGI_NUM_BITS_R2 13
195
196/* disk block (xfs_daddr_t) in the AG */
197#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
198#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
199#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr))
200
201extern int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
202 xfs_agnumber_t agno, struct xfs_buf **bpp);
203
204/*
205 * The third a.g. block contains the a.g. freelist, an array
206 * of block pointers to blocks owned by the allocation btree code.
207 */
208#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
209#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
210#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr))
211
212#define XFS_BUF_TO_AGFL_BNO(mp, bp) \
213 (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
214 &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
215 (__be32 *)(bp)->b_addr)
216
217/*
218 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of
219 * slots in the beginning of the block for a proper header with the
220 * location information and CRC.
221 */
222#define XFS_AGFL_SIZE(mp) \
223 (((mp)->m_sb.sb_sectsize - \
224 (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
225 sizeof(struct xfs_agfl) : 0)) / \
226 sizeof(xfs_agblock_t))
227
228typedef struct xfs_agfl {
229 __be32 agfl_magicnum;
230 __be32 agfl_seqno;
231 uuid_t agfl_uuid;
232 __be64 agfl_lsn;
233 __be32 agfl_crc;
234 __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
235} xfs_agfl_t;
236
237#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
238
239/*
240 * tags for inode radix tree
241 */
242#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
243 in xfs_inode_ag_iterator */
244#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
245#define XFS_ICI_EOFBLOCKS_TAG 1 /* inode has blocks beyond EOF */
246
247#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
248#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \
249 (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
250#define XFS_MIN_FREELIST(a,mp) \
251 (XFS_MIN_FREELIST_RAW( \
252 be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \
253 be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
254#define XFS_MIN_FREELIST_PAG(pag,mp) \
255 (XFS_MIN_FREELIST_RAW( \
256 (unsigned int)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
257 (unsigned int)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
258
259#define XFS_AGB_TO_FSB(mp,agno,agbno) \
260 (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
261#define XFS_FSB_TO_AGNO(mp,fsbno) \
262 ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog))
263#define XFS_FSB_TO_AGBNO(mp,fsbno) \
264 ((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog)))
265#define XFS_AGB_TO_DADDR(mp,agno,agbno) \
266 ((xfs_daddr_t)XFS_FSB_TO_BB(mp, \
267 (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))
268#define XFS_AG_DADDR(mp,agno,d) (XFS_AGB_TO_DADDR(mp, agno, 0) + (d))
269
270/*
271 * For checking for bad ranges of xfs_daddr_t's, covering multiple
272 * allocation groups or a single xfs_daddr_t that's a superblock copy.
273 */
274#define XFS_AG_CHECK_DADDR(mp,d,len) \
275 ((len) == 1 ? \
276 ASSERT((d) == XFS_SB_DADDR || \
277 xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \
278 ASSERT(xfs_daddr_to_agno(mp, d) == \
279 xfs_daddr_to_agno(mp, (d) + (len) - 1)))
280
281#endif /* __XFS_AG_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc.c b/fs/xfs/libxfs/xfs_alloc.c
index eff34218f405..a6fbf4472017 100644
--- a/fs/xfs/libxfs/xfs_alloc.c
+++ b/fs/xfs/libxfs/xfs_alloc.c
@@ -23,7 +23,6 @@
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 26#include "xfs_mount.h"
28#include "xfs_inode.h" 27#include "xfs_inode.h"
29#include "xfs_btree.h" 28#include "xfs_btree.h"
diff --git a/fs/xfs/libxfs/xfs_alloc.h b/fs/xfs/libxfs/xfs_alloc.h
index feacb061bab7..d1b4b6a5c894 100644
--- a/fs/xfs/libxfs/xfs_alloc.h
+++ b/fs/xfs/libxfs/xfs_alloc.h
@@ -231,4 +231,7 @@ xfs_alloc_get_rec(
231 xfs_extlen_t *len, /* output: length of extent */ 231 xfs_extlen_t *len, /* output: length of extent */
232 int *stat); /* output: success/failure */ 232 int *stat); /* output: success/failure */
233 233
234int xfs_read_agf(struct xfs_mount *mp, struct xfs_trans *tp,
235 xfs_agnumber_t agno, int flags, struct xfs_buf **bpp);
236
234#endif /* __XFS_ALLOC_H__ */ 237#endif /* __XFS_ALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_alloc_btree.c b/fs/xfs/libxfs/xfs_alloc_btree.c
index e0e83e24d3ef..59d521c09a17 100644
--- a/fs/xfs/libxfs/xfs_alloc_btree.c
+++ b/fs/xfs/libxfs/xfs_alloc_btree.c
@@ -22,7 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 25#include "xfs_mount.h"
27#include "xfs_btree.h" 26#include "xfs_btree.h"
28#include "xfs_alloc_btree.h" 27#include "xfs_alloc_btree.h"
diff --git a/fs/xfs/libxfs/xfs_attr.c b/fs/xfs/libxfs/xfs_attr.c
index 353fb425faef..0a472fbe06d4 100644
--- a/fs/xfs/libxfs/xfs_attr.c
+++ b/fs/xfs/libxfs/xfs_attr.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_da_format.h" 26#include "xfs_da_format.h"
29#include "xfs_da_btree.h" 27#include "xfs_da_btree.h"
@@ -42,7 +40,6 @@
42#include "xfs_quota.h" 40#include "xfs_quota.h"
43#include "xfs_trans_space.h" 41#include "xfs_trans_space.h"
44#include "xfs_trace.h" 42#include "xfs_trace.h"
45#include "xfs_dinode.h"
46 43
47/* 44/*
48 * xfs_attr.c 45 * xfs_attr.c
diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
index b1f73dbbf3d8..5d38e8b8a913 100644
--- a/fs/xfs/libxfs/xfs_attr_leaf.c
+++ b/fs/xfs/libxfs/xfs_attr_leaf.c
@@ -24,7 +24,6 @@
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_bit.h" 25#include "xfs_bit.h"
26#include "xfs_sb.h" 26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_da_format.h" 28#include "xfs_da_format.h"
30#include "xfs_da_btree.h" 29#include "xfs_da_btree.h"
@@ -41,7 +40,6 @@
41#include "xfs_trace.h" 40#include "xfs_trace.h"
42#include "xfs_buf_item.h" 41#include "xfs_buf_item.h"
43#include "xfs_cksum.h" 42#include "xfs_cksum.h"
44#include "xfs_dinode.h"
45#include "xfs_dir2.h" 43#include "xfs_dir2.h"
46 44
47 45
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 7510ab8058a4..20de88d1bf86 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -23,8 +23,6 @@
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_bit.h" 25#include "xfs_bit.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_da_format.h" 27#include "xfs_da_format.h"
30#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
diff --git a/fs/xfs/libxfs/xfs_bmap.c b/fs/xfs/libxfs/xfs_bmap.c
index 79c981984dca..b5eb4743f75a 100644
--- a/fs/xfs/libxfs/xfs_bmap.c
+++ b/fs/xfs/libxfs/xfs_bmap.c
@@ -22,9 +22,7 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_inum.h"
26#include "xfs_sb.h" 25#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_da_format.h" 27#include "xfs_da_format.h"
30#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
@@ -46,7 +44,6 @@
46#include "xfs_trace.h" 44#include "xfs_trace.h"
47#include "xfs_symlink.h" 45#include "xfs_symlink.h"
48#include "xfs_attr_leaf.h" 46#include "xfs_attr_leaf.h"
49#include "xfs_dinode.h"
50#include "xfs_filestream.h" 47#include "xfs_filestream.h"
51 48
52 49
@@ -5450,13 +5447,11 @@ xfs_bmse_merge(
5450 struct xfs_btree_cur *cur, 5447 struct xfs_btree_cur *cur,
5451 int *logflags) /* output */ 5448 int *logflags) /* output */
5452{ 5449{
5453 struct xfs_ifork *ifp;
5454 struct xfs_bmbt_irec got; 5450 struct xfs_bmbt_irec got;
5455 struct xfs_bmbt_irec left; 5451 struct xfs_bmbt_irec left;
5456 xfs_filblks_t blockcount; 5452 xfs_filblks_t blockcount;
5457 int error, i; 5453 int error, i;
5458 5454
5459 ifp = XFS_IFORK_PTR(ip, whichfork);
5460 xfs_bmbt_get_all(gotp, &got); 5455 xfs_bmbt_get_all(gotp, &got);
5461 xfs_bmbt_get_all(leftp, &left); 5456 xfs_bmbt_get_all(leftp, &left);
5462 blockcount = left.br_blockcount + got.br_blockcount; 5457 blockcount = left.br_blockcount + got.br_blockcount;
@@ -5489,32 +5484,25 @@ xfs_bmse_merge(
5489 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock, 5484 error = xfs_bmbt_lookup_eq(cur, got.br_startoff, got.br_startblock,
5490 got.br_blockcount, &i); 5485 got.br_blockcount, &i);
5491 if (error) 5486 if (error)
5492 goto out_error; 5487 return error;
5493 XFS_WANT_CORRUPTED_GOTO(i == 1, out_error); 5488 XFS_WANT_CORRUPTED_RETURN(i == 1);
5494 5489
5495 error = xfs_btree_delete(cur, &i); 5490 error = xfs_btree_delete(cur, &i);
5496 if (error) 5491 if (error)
5497 goto out_error; 5492 return error;
5498 XFS_WANT_CORRUPTED_GOTO(i == 1, out_error); 5493 XFS_WANT_CORRUPTED_RETURN(i == 1);
5499 5494
5500 /* lookup and update size of the previous extent */ 5495 /* lookup and update size of the previous extent */
5501 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock, 5496 error = xfs_bmbt_lookup_eq(cur, left.br_startoff, left.br_startblock,
5502 left.br_blockcount, &i); 5497 left.br_blockcount, &i);
5503 if (error) 5498 if (error)
5504 goto out_error; 5499 return error;
5505 XFS_WANT_CORRUPTED_GOTO(i == 1, out_error); 5500 XFS_WANT_CORRUPTED_RETURN(i == 1);
5506 5501
5507 left.br_blockcount = blockcount; 5502 left.br_blockcount = blockcount;
5508 5503
5509 error = xfs_bmbt_update(cur, left.br_startoff, left.br_startblock, 5504 return xfs_bmbt_update(cur, left.br_startoff, left.br_startblock,
5510 left.br_blockcount, left.br_state); 5505 left.br_blockcount, left.br_state);
5511 if (error)
5512 goto out_error;
5513
5514 return 0;
5515
5516out_error:
5517 return error;
5518} 5506}
5519 5507
5520/* 5508/*
@@ -5544,35 +5532,29 @@ xfs_bmse_shift_one(
5544 startoff = got.br_startoff - offset_shift_fsb; 5532 startoff = got.br_startoff - offset_shift_fsb;
5545 5533
5546 /* delalloc extents should be prevented by caller */ 5534 /* delalloc extents should be prevented by caller */
5547 XFS_WANT_CORRUPTED_GOTO(!isnullstartblock(got.br_startblock), 5535 XFS_WANT_CORRUPTED_RETURN(!isnullstartblock(got.br_startblock));
5548 out_error);
5549 5536
5550 /* 5537 /*
5551 * If this is the first extent in the file, make sure there's enough 5538 * Check for merge if we've got an extent to the left, otherwise make
5552 * room at the start of the file and jump right to the shift as there's 5539 * sure there's enough room at the start of the file for the shift.
5553 * no left extent to merge.
5554 */ 5540 */
5555 if (*current_ext == 0) { 5541 if (*current_ext) {
5556 if (got.br_startoff < offset_shift_fsb) 5542 /* grab the left extent and check for a large enough hole */
5557 return -EINVAL; 5543 leftp = xfs_iext_get_ext(ifp, *current_ext - 1);
5558 goto shift_extent; 5544 xfs_bmbt_get_all(leftp, &left);
5559 }
5560 5545
5561 /* grab the left extent and check for a large enough hole */ 5546 if (startoff < left.br_startoff + left.br_blockcount)
5562 leftp = xfs_iext_get_ext(ifp, *current_ext - 1); 5547 return -EINVAL;
5563 xfs_bmbt_get_all(leftp, &left);
5564 5548
5565 if (startoff < left.br_startoff + left.br_blockcount) 5549 /* check whether to merge the extent or shift it down */
5550 if (xfs_bmse_can_merge(&left, &got, offset_shift_fsb)) {
5551 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb,
5552 *current_ext, gotp, leftp, cur,
5553 logflags);
5554 }
5555 } else if (got.br_startoff < offset_shift_fsb)
5566 return -EINVAL; 5556 return -EINVAL;
5567 5557
5568 /* check whether to merge the extent or shift it down */
5569 if (!xfs_bmse_can_merge(&left, &got, offset_shift_fsb))
5570 goto shift_extent;
5571
5572 return xfs_bmse_merge(ip, whichfork, offset_shift_fsb, *current_ext,
5573 gotp, leftp, cur, logflags);
5574
5575shift_extent:
5576 /* 5558 /*
5577 * Increment the extent index for the next iteration, update the start 5559 * Increment the extent index for the next iteration, update the start
5578 * offset of the in-core extent and update the btree if applicable. 5560 * offset of the in-core extent and update the btree if applicable.
@@ -5589,18 +5571,11 @@ shift_extent:
5589 got.br_blockcount, &i); 5571 got.br_blockcount, &i);
5590 if (error) 5572 if (error)
5591 return error; 5573 return error;
5592 XFS_WANT_CORRUPTED_GOTO(i == 1, out_error); 5574 XFS_WANT_CORRUPTED_RETURN(i == 1);
5593 5575
5594 got.br_startoff = startoff; 5576 got.br_startoff = startoff;
5595 error = xfs_bmbt_update(cur, got.br_startoff, got.br_startblock, 5577 return xfs_bmbt_update(cur, got.br_startoff, got.br_startblock,
5596 got.br_blockcount, got.br_state); 5578 got.br_blockcount, got.br_state);
5597 if (error)
5598 return error;
5599
5600 return 0;
5601
5602out_error:
5603 return error;
5604} 5579}
5605 5580
5606/* 5581/*
diff --git a/fs/xfs/libxfs/xfs_bmap_btree.c b/fs/xfs/libxfs/xfs_bmap_btree.c
index fba753308f31..2c44c8e50782 100644
--- a/fs/xfs/libxfs/xfs_bmap_btree.c
+++ b/fs/xfs/libxfs/xfs_bmap_btree.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
29#include "xfs_trans.h" 27#include "xfs_trans.h"
@@ -36,7 +34,6 @@
36#include "xfs_quota.h" 34#include "xfs_quota.h"
37#include "xfs_trace.h" 35#include "xfs_trace.h"
38#include "xfs_cksum.h" 36#include "xfs_cksum.h"
39#include "xfs_dinode.h"
40 37
41/* 38/*
42 * Determine the extent state. 39 * Determine the extent state.
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index 8fe6a93ff473..81cad433df85 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
29#include "xfs_trans.h" 27#include "xfs_trans.h"
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index fd827530afec..9cb0115c6bd1 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -23,8 +23,6 @@
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_bit.h" 25#include "xfs_bit.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_da_format.h" 27#include "xfs_da_format.h"
30#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
@@ -514,7 +512,6 @@ xfs_da3_root_split(
514 struct xfs_buf *bp; 512 struct xfs_buf *bp;
515 struct xfs_inode *dp; 513 struct xfs_inode *dp;
516 struct xfs_trans *tp; 514 struct xfs_trans *tp;
517 struct xfs_mount *mp;
518 struct xfs_dir2_leaf *leaf; 515 struct xfs_dir2_leaf *leaf;
519 xfs_dablk_t blkno; 516 xfs_dablk_t blkno;
520 int level; 517 int level;
@@ -534,7 +531,6 @@ xfs_da3_root_split(
534 531
535 dp = args->dp; 532 dp = args->dp;
536 tp = args->trans; 533 tp = args->trans;
537 mp = state->mp;
538 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork); 534 error = xfs_da_get_buf(tp, dp, blkno, -1, &bp, args->whichfork);
539 if (error) 535 if (error)
540 return error; 536 return error;
@@ -2342,14 +2338,12 @@ xfs_da_shrink_inode(
2342 xfs_inode_t *dp; 2338 xfs_inode_t *dp;
2343 int done, error, w, count; 2339 int done, error, w, count;
2344 xfs_trans_t *tp; 2340 xfs_trans_t *tp;
2345 xfs_mount_t *mp;
2346 2341
2347 trace_xfs_da_shrink_inode(args); 2342 trace_xfs_da_shrink_inode(args);
2348 2343
2349 dp = args->dp; 2344 dp = args->dp;
2350 w = args->whichfork; 2345 w = args->whichfork;
2351 tp = args->trans; 2346 tp = args->trans;
2352 mp = dp->i_mount;
2353 count = args->geo->fsbcount; 2347 count = args->geo->fsbcount;
2354 for (;;) { 2348 for (;;) {
2355 /* 2349 /*
diff --git a/fs/xfs/libxfs/xfs_da_format.c b/fs/xfs/libxfs/xfs_da_format.c
index 7e42fdfd2f1d..9d624a622946 100644
--- a/fs/xfs/libxfs/xfs_da_format.c
+++ b/fs/xfs/libxfs/xfs_da_format.c
@@ -22,8 +22,6 @@
22#include "xfs_format.h" 22#include "xfs_format.h"
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_da_format.h" 26#include "xfs_da_format.h"
29#include "xfs_da_btree.h" 27#include "xfs_da_btree.h"
diff --git a/fs/xfs/libxfs/xfs_dinode.h b/fs/xfs/libxfs/xfs_dinode.h
deleted file mode 100644
index 623bbe8fd921..000000000000
--- a/fs/xfs/libxfs/xfs_dinode.h
+++ /dev/null
@@ -1,243 +0,0 @@
1/*
2 * Copyright (c) 2000,2002,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_DINODE_H__
19#define __XFS_DINODE_H__
20
21#define XFS_DINODE_MAGIC 0x494e /* 'IN' */
22#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
23
24typedef struct xfs_timestamp {
25 __be32 t_sec; /* timestamp seconds */
26 __be32 t_nsec; /* timestamp nanoseconds */
27} xfs_timestamp_t;
28
29/*
30 * On-disk inode structure.
31 *
32 * This is just the header or "dinode core", the inode is expanded to fill a
33 * variable size the leftover area split into a data and an attribute fork.
34 * The format of the data and attribute fork depends on the format of the
35 * inode as indicated by di_format and di_aformat. To access the data and
36 * attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros
37 * below.
38 *
39 * There is a very similar struct icdinode in xfs_inode which matches the
40 * layout of the first 96 bytes of this structure, but is kept in native
41 * format instead of big endian.
42 *
43 * Note: di_flushiter is only used by v1/2 inodes - it's effectively a zeroed
44 * padding field for v3 inodes.
45 */
46typedef struct xfs_dinode {
47 __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
48 __be16 di_mode; /* mode and type of file */
49 __u8 di_version; /* inode version */
50 __u8 di_format; /* format of di_c data */
51 __be16 di_onlink; /* old number of links to file */
52 __be32 di_uid; /* owner's user id */
53 __be32 di_gid; /* owner's group id */
54 __be32 di_nlink; /* number of links to file */
55 __be16 di_projid_lo; /* lower part of owner's project id */
56 __be16 di_projid_hi; /* higher part owner's project id */
57 __u8 di_pad[6]; /* unused, zeroed space */
58 __be16 di_flushiter; /* incremented on flush */
59 xfs_timestamp_t di_atime; /* time last accessed */
60 xfs_timestamp_t di_mtime; /* time last modified */
61 xfs_timestamp_t di_ctime; /* time created/inode modified */
62 __be64 di_size; /* number of bytes in file */
63 __be64 di_nblocks; /* # of direct & btree blocks used */
64 __be32 di_extsize; /* basic/minimum extent size for file */
65 __be32 di_nextents; /* number of extents in data fork */
66 __be16 di_anextents; /* number of extents in attribute fork*/
67 __u8 di_forkoff; /* attr fork offs, <<3 for 64b align */
68 __s8 di_aformat; /* format of attr fork's data */
69 __be32 di_dmevmask; /* DMIG event mask */
70 __be16 di_dmstate; /* DMIG state info */
71 __be16 di_flags; /* random flags, XFS_DIFLAG_... */
72 __be32 di_gen; /* generation number */
73
74 /* di_next_unlinked is the only non-core field in the old dinode */
75 __be32 di_next_unlinked;/* agi unlinked list ptr */
76
77 /* start of the extended dinode, writable fields */
78 __le32 di_crc; /* CRC of the inode */
79 __be64 di_changecount; /* number of attribute changes */
80 __be64 di_lsn; /* flush sequence */
81 __be64 di_flags2; /* more random flags */
82 __u8 di_pad2[16]; /* more padding for future expansion */
83
84 /* fields only written to during inode creation */
85 xfs_timestamp_t di_crtime; /* time created */
86 __be64 di_ino; /* inode number */
87 uuid_t di_uuid; /* UUID of the filesystem */
88
89 /* structure must be padded to 64 bit alignment */
90} xfs_dinode_t;
91
92#define XFS_DINODE_CRC_OFF offsetof(struct xfs_dinode, di_crc)
93
94#define DI_MAX_FLUSH 0xffff
95
96/*
97 * Size of the core inode on disk. Version 1 and 2 inodes have
98 * the same size, but version 3 has grown a few additional fields.
99 */
100static inline uint xfs_dinode_size(int version)
101{
102 if (version == 3)
103 return sizeof(struct xfs_dinode);
104 return offsetof(struct xfs_dinode, di_crc);
105}
106
107/*
108 * The 32 bit link count in the inode theoretically maxes out at UINT_MAX.
109 * Since the pathconf interface is signed, we use 2^31 - 1 instead.
110 * The old inode format had a 16 bit link count, so its maximum is USHRT_MAX.
111 */
112#define XFS_MAXLINK ((1U << 31) - 1U)
113#define XFS_MAXLINK_1 65535U
114
115/*
116 * Values for di_format
117 */
118typedef enum xfs_dinode_fmt {
119 XFS_DINODE_FMT_DEV, /* xfs_dev_t */
120 XFS_DINODE_FMT_LOCAL, /* bulk data */
121 XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */
122 XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */
123 XFS_DINODE_FMT_UUID /* uuid_t */
124} xfs_dinode_fmt_t;
125
126/*
127 * Inode minimum and maximum sizes.
128 */
129#define XFS_DINODE_MIN_LOG 8
130#define XFS_DINODE_MAX_LOG 11
131#define XFS_DINODE_MIN_SIZE (1 << XFS_DINODE_MIN_LOG)
132#define XFS_DINODE_MAX_SIZE (1 << XFS_DINODE_MAX_LOG)
133
134/*
135 * Inode size for given fs.
136 */
137#define XFS_LITINO(mp, version) \
138 ((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version)))
139
140/*
141 * Inode data & attribute fork sizes, per inode.
142 */
143#define XFS_DFORK_Q(dip) ((dip)->di_forkoff != 0)
144#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3))
145
146#define XFS_DFORK_DSIZE(dip,mp) \
147 (XFS_DFORK_Q(dip) ? \
148 XFS_DFORK_BOFF(dip) : \
149 XFS_LITINO(mp, (dip)->di_version))
150#define XFS_DFORK_ASIZE(dip,mp) \
151 (XFS_DFORK_Q(dip) ? \
152 XFS_LITINO(mp, (dip)->di_version) - XFS_DFORK_BOFF(dip) : \
153 0)
154#define XFS_DFORK_SIZE(dip,mp,w) \
155 ((w) == XFS_DATA_FORK ? \
156 XFS_DFORK_DSIZE(dip, mp) : \
157 XFS_DFORK_ASIZE(dip, mp))
158
159/*
160 * Return pointers to the data or attribute forks.
161 */
162#define XFS_DFORK_DPTR(dip) \
163 ((char *)dip + xfs_dinode_size(dip->di_version))
164#define XFS_DFORK_APTR(dip) \
165 (XFS_DFORK_DPTR(dip) + XFS_DFORK_BOFF(dip))
166#define XFS_DFORK_PTR(dip,w) \
167 ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip))
168
169#define XFS_DFORK_FORMAT(dip,w) \
170 ((w) == XFS_DATA_FORK ? \
171 (dip)->di_format : \
172 (dip)->di_aformat)
173#define XFS_DFORK_NEXTENTS(dip,w) \
174 ((w) == XFS_DATA_FORK ? \
175 be32_to_cpu((dip)->di_nextents) : \
176 be16_to_cpu((dip)->di_anextents))
177
178#define XFS_BUF_TO_DINODE(bp) ((xfs_dinode_t *)((bp)->b_addr))
179
180/*
181 * For block and character special files the 32bit dev_t is stored at the
182 * beginning of the data fork.
183 */
184static inline xfs_dev_t xfs_dinode_get_rdev(struct xfs_dinode *dip)
185{
186 return be32_to_cpu(*(__be32 *)XFS_DFORK_DPTR(dip));
187}
188
189static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
190{
191 *(__be32 *)XFS_DFORK_DPTR(dip) = cpu_to_be32(rdev);
192}
193
194/*
195 * Values for di_flags
196 * There should be a one-to-one correspondence between these flags and the
197 * XFS_XFLAG_s.
198 */
199#define XFS_DIFLAG_REALTIME_BIT 0 /* file's blocks come from rt area */
200#define XFS_DIFLAG_PREALLOC_BIT 1 /* file space has been preallocated */
201#define XFS_DIFLAG_NEWRTBM_BIT 2 /* for rtbitmap inode, new format */
202#define XFS_DIFLAG_IMMUTABLE_BIT 3 /* inode is immutable */
203#define XFS_DIFLAG_APPEND_BIT 4 /* inode is append-only */
204#define XFS_DIFLAG_SYNC_BIT 5 /* inode is written synchronously */
205#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */
206#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */
207#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */
208#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
209#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
210#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
211#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
212#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
213#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
214#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
215#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
216#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
217#define XFS_DIFLAG_IMMUTABLE (1 << XFS_DIFLAG_IMMUTABLE_BIT)
218#define XFS_DIFLAG_APPEND (1 << XFS_DIFLAG_APPEND_BIT)
219#define XFS_DIFLAG_SYNC (1 << XFS_DIFLAG_SYNC_BIT)
220#define XFS_DIFLAG_NOATIME (1 << XFS_DIFLAG_NOATIME_BIT)
221#define XFS_DIFLAG_NODUMP (1 << XFS_DIFLAG_NODUMP_BIT)
222#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
223#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
224#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
225#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
226#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
227#define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT)
228#define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT)
229
230#ifdef CONFIG_XFS_RT
231#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
232#else
233#define XFS_IS_REALTIME_INODE(ip) (0)
234#endif
235
236#define XFS_DIFLAG_ANY \
237 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
238 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
239 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
240 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
241 XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
242
243#endif /* __XFS_DINODE_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2.c b/fs/xfs/libxfs/xfs_dir2.c
index 7075aaf131f4..a69fb3a1e161 100644
--- a/fs/xfs/libxfs/xfs_dir2.c
+++ b/fs/xfs/libxfs/xfs_dir2.c
@@ -20,9 +20,6 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_inum.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 23#include "xfs_mount.h"
27#include "xfs_da_format.h" 24#include "xfs_da_format.h"
28#include "xfs_da_btree.h" 25#include "xfs_da_btree.h"
@@ -34,10 +31,25 @@
34#include "xfs_dir2_priv.h" 31#include "xfs_dir2_priv.h"
35#include "xfs_error.h" 32#include "xfs_error.h"
36#include "xfs_trace.h" 33#include "xfs_trace.h"
37#include "xfs_dinode.h"
38 34
39struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR }; 35struct xfs_name xfs_name_dotdot = { (unsigned char *)"..", 2, XFS_DIR3_FT_DIR };
40 36
37/*
38 * @mode, if set, indicates that the type field needs to be set up.
39 * This uses the transformation from file mode to DT_* as defined in linux/fs.h
40 * for file type specification. This will be propagated into the directory
41 * structure if appropriate for the given operation and filesystem config.
42 */
43const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
44 [0] = XFS_DIR3_FT_UNKNOWN,
45 [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
46 [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
47 [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
48 [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
49 [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
50 [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
51 [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
52};
41 53
42/* 54/*
43 * ASCII case-insensitive (ie. A-Z) support for directories that was 55 * ASCII case-insensitive (ie. A-Z) support for directories that was
diff --git a/fs/xfs/libxfs/xfs_dir2.h b/fs/xfs/libxfs/xfs_dir2.h
index 4dff261e6ed5..e55353651f5b 100644
--- a/fs/xfs/libxfs/xfs_dir2.h
+++ b/fs/xfs/libxfs/xfs_dir2.h
@@ -32,6 +32,12 @@ struct xfs_dir2_data_unused;
32extern struct xfs_name xfs_name_dotdot; 32extern struct xfs_name xfs_name_dotdot;
33 33
34/* 34/*
35 * directory filetype conversion tables.
36 */
37#define S_SHIFT 12
38extern const unsigned char xfs_mode_to_ftype[];
39
40/*
35 * directory operations vector for encode/decode routines 41 * directory operations vector for encode/decode routines
36 */ 42 */
37struct xfs_dir_ops { 43struct xfs_dir_ops {
@@ -177,4 +183,138 @@ extern const struct xfs_buf_ops xfs_dir3_leaf1_buf_ops;
177extern const struct xfs_buf_ops xfs_dir3_free_buf_ops; 183extern const struct xfs_buf_ops xfs_dir3_free_buf_ops;
178extern const struct xfs_buf_ops xfs_dir3_data_buf_ops; 184extern const struct xfs_buf_ops xfs_dir3_data_buf_ops;
179 185
186/*
187 * Directory offset/block conversion functions.
188 *
189 * DB blocks here are logical directory block numbers, not filesystem blocks.
190 */
191
192/*
193 * Convert dataptr to byte in file space
194 */
195static inline xfs_dir2_off_t
196xfs_dir2_dataptr_to_byte(xfs_dir2_dataptr_t dp)
197{
198 return (xfs_dir2_off_t)dp << XFS_DIR2_DATA_ALIGN_LOG;
199}
200
201/*
202 * Convert byte in file space to dataptr. It had better be aligned.
203 */
204static inline xfs_dir2_dataptr_t
205xfs_dir2_byte_to_dataptr(xfs_dir2_off_t by)
206{
207 return (xfs_dir2_dataptr_t)(by >> XFS_DIR2_DATA_ALIGN_LOG);
208}
209
210/*
211 * Convert byte in space to (DB) block
212 */
213static inline xfs_dir2_db_t
214xfs_dir2_byte_to_db(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
215{
216 return (xfs_dir2_db_t)(by >> geo->blklog);
217}
218
219/*
220 * Convert dataptr to a block number
221 */
222static inline xfs_dir2_db_t
223xfs_dir2_dataptr_to_db(struct xfs_da_geometry *geo, xfs_dir2_dataptr_t dp)
224{
225 return xfs_dir2_byte_to_db(geo, xfs_dir2_dataptr_to_byte(dp));
226}
227
228/*
229 * Convert byte in space to offset in a block
230 */
231static inline xfs_dir2_data_aoff_t
232xfs_dir2_byte_to_off(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
233{
234 return (xfs_dir2_data_aoff_t)(by & (geo->blksize - 1));
235}
236
237/*
238 * Convert dataptr to a byte offset in a block
239 */
240static inline xfs_dir2_data_aoff_t
241xfs_dir2_dataptr_to_off(struct xfs_da_geometry *geo, xfs_dir2_dataptr_t dp)
242{
243 return xfs_dir2_byte_to_off(geo, xfs_dir2_dataptr_to_byte(dp));
244}
245
246/*
247 * Convert block and offset to byte in space
248 */
249static inline xfs_dir2_off_t
250xfs_dir2_db_off_to_byte(struct xfs_da_geometry *geo, xfs_dir2_db_t db,
251 xfs_dir2_data_aoff_t o)
252{
253 return ((xfs_dir2_off_t)db << geo->blklog) + o;
254}
255
256/*
257 * Convert block (DB) to block (dablk)
258 */
259static inline xfs_dablk_t
260xfs_dir2_db_to_da(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
261{
262 return (xfs_dablk_t)(db << (geo->blklog - geo->fsblog));
263}
264
265/*
266 * Convert byte in space to (DA) block
267 */
268static inline xfs_dablk_t
269xfs_dir2_byte_to_da(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
270{
271 return xfs_dir2_db_to_da(geo, xfs_dir2_byte_to_db(geo, by));
272}
273
274/*
275 * Convert block and offset to dataptr
276 */
277static inline xfs_dir2_dataptr_t
278xfs_dir2_db_off_to_dataptr(struct xfs_da_geometry *geo, xfs_dir2_db_t db,
279 xfs_dir2_data_aoff_t o)
280{
281 return xfs_dir2_byte_to_dataptr(xfs_dir2_db_off_to_byte(geo, db, o));
282}
283
284/*
285 * Convert block (dablk) to block (DB)
286 */
287static inline xfs_dir2_db_t
288xfs_dir2_da_to_db(struct xfs_da_geometry *geo, xfs_dablk_t da)
289{
290 return (xfs_dir2_db_t)(da >> (geo->blklog - geo->fsblog));
291}
292
293/*
294 * Convert block (dablk) to byte offset in space
295 */
296static inline xfs_dir2_off_t
297xfs_dir2_da_to_byte(struct xfs_da_geometry *geo, xfs_dablk_t da)
298{
299 return xfs_dir2_db_off_to_byte(geo, xfs_dir2_da_to_db(geo, da), 0);
300}
301
302/*
303 * Directory tail pointer accessor functions. Based on block geometry.
304 */
305static inline struct xfs_dir2_block_tail *
306xfs_dir2_block_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_data_hdr *hdr)
307{
308 return ((struct xfs_dir2_block_tail *)
309 ((char *)hdr + geo->blksize)) - 1;
310}
311
312static inline struct xfs_dir2_leaf_tail *
313xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
314{
315 return (struct xfs_dir2_leaf_tail *)
316 ((char *)lp + geo->blksize -
317 sizeof(struct xfs_dir2_leaf_tail));
318}
319
180#endif /* __XFS_DIR2_H__ */ 320#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/libxfs/xfs_dir2_block.c b/fs/xfs/libxfs/xfs_dir2_block.c
index 9628ceccfa02..9354e190b82e 100644
--- a/fs/xfs/libxfs/xfs_dir2_block.c
+++ b/fs/xfs/libxfs/xfs_dir2_block.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_da_format.h" 25#include "xfs_da_format.h"
28#include "xfs_da_btree.h" 26#include "xfs_da_btree.h"
@@ -36,7 +34,6 @@
36#include "xfs_error.h" 34#include "xfs_error.h"
37#include "xfs_trace.h" 35#include "xfs_trace.h"
38#include "xfs_cksum.h" 36#include "xfs_cksum.h"
39#include "xfs_dinode.h"
40 37
41/* 38/*
42 * Local function prototypes. 39 * Local function prototypes.
@@ -353,7 +350,6 @@ xfs_dir2_block_addname(
353 int low; /* low index for binary srch */ 350 int low; /* low index for binary srch */
354 int lowstale; /* low stale index */ 351 int lowstale; /* low stale index */
355 int mid=0; /* midpoint for binary srch */ 352 int mid=0; /* midpoint for binary srch */
356 xfs_mount_t *mp; /* filesystem mount point */
357 int needlog; /* need to log header */ 353 int needlog; /* need to log header */
358 int needscan; /* need to rescan freespace */ 354 int needscan; /* need to rescan freespace */
359 __be16 *tagp; /* pointer to tag value */ 355 __be16 *tagp; /* pointer to tag value */
@@ -363,7 +359,6 @@ xfs_dir2_block_addname(
363 359
364 dp = args->dp; 360 dp = args->dp;
365 tp = args->trans; 361 tp = args->trans;
366 mp = dp->i_mount;
367 362
368 /* Read the (one and only) directory block into bp. */ 363 /* Read the (one and only) directory block into bp. */
369 error = xfs_dir3_block_read(tp, dp, &bp); 364 error = xfs_dir3_block_read(tp, dp, &bp);
@@ -618,7 +613,6 @@ xfs_dir2_block_lookup(
618 xfs_inode_t *dp; /* incore inode */ 613 xfs_inode_t *dp; /* incore inode */
619 int ent; /* entry index */ 614 int ent; /* entry index */
620 int error; /* error return value */ 615 int error; /* error return value */
621 xfs_mount_t *mp; /* filesystem mount point */
622 616
623 trace_xfs_dir2_block_lookup(args); 617 trace_xfs_dir2_block_lookup(args);
624 618
@@ -629,7 +623,6 @@ xfs_dir2_block_lookup(
629 if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent))) 623 if ((error = xfs_dir2_block_lookup_int(args, &bp, &ent)))
630 return error; 624 return error;
631 dp = args->dp; 625 dp = args->dp;
632 mp = dp->i_mount;
633 hdr = bp->b_addr; 626 hdr = bp->b_addr;
634 xfs_dir3_data_check(dp, bp); 627 xfs_dir3_data_check(dp, bp);
635 btp = xfs_dir2_block_tail_p(args->geo, hdr); 628 btp = xfs_dir2_block_tail_p(args->geo, hdr);
@@ -770,7 +763,6 @@ xfs_dir2_block_removename(
770 xfs_inode_t *dp; /* incore inode */ 763 xfs_inode_t *dp; /* incore inode */
771 int ent; /* block leaf entry index */ 764 int ent; /* block leaf entry index */
772 int error; /* error return value */ 765 int error; /* error return value */
773 xfs_mount_t *mp; /* filesystem mount point */
774 int needlog; /* need to log block header */ 766 int needlog; /* need to log block header */
775 int needscan; /* need to fixup bestfree */ 767 int needscan; /* need to fixup bestfree */
776 xfs_dir2_sf_hdr_t sfh; /* shortform header */ 768 xfs_dir2_sf_hdr_t sfh; /* shortform header */
@@ -788,7 +780,6 @@ xfs_dir2_block_removename(
788 } 780 }
789 dp = args->dp; 781 dp = args->dp;
790 tp = args->trans; 782 tp = args->trans;
791 mp = dp->i_mount;
792 hdr = bp->b_addr; 783 hdr = bp->b_addr;
793 btp = xfs_dir2_block_tail_p(args->geo, hdr); 784 btp = xfs_dir2_block_tail_p(args->geo, hdr);
794 blp = xfs_dir2_block_leaf_p(btp); 785 blp = xfs_dir2_block_leaf_p(btp);
@@ -852,7 +843,6 @@ xfs_dir2_block_replace(
852 xfs_inode_t *dp; /* incore inode */ 843 xfs_inode_t *dp; /* incore inode */
853 int ent; /* leaf entry index */ 844 int ent; /* leaf entry index */
854 int error; /* error return value */ 845 int error; /* error return value */
855 xfs_mount_t *mp; /* filesystem mount point */
856 846
857 trace_xfs_dir2_block_replace(args); 847 trace_xfs_dir2_block_replace(args);
858 848
@@ -864,7 +854,6 @@ xfs_dir2_block_replace(
864 return error; 854 return error;
865 } 855 }
866 dp = args->dp; 856 dp = args->dp;
867 mp = dp->i_mount;
868 hdr = bp->b_addr; 857 hdr = bp->b_addr;
869 btp = xfs_dir2_block_tail_p(args->geo, hdr); 858 btp = xfs_dir2_block_tail_p(args->geo, hdr);
870 blp = xfs_dir2_block_leaf_p(btp); 859 blp = xfs_dir2_block_leaf_p(btp);
diff --git a/fs/xfs/libxfs/xfs_dir2_data.c b/fs/xfs/libxfs/xfs_dir2_data.c
index fdd803fecb8e..5ff31be9b1cd 100644
--- a/fs/xfs/libxfs/xfs_dir2_data.c
+++ b/fs/xfs/libxfs/xfs_dir2_data.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_da_format.h" 25#include "xfs_da_format.h"
28#include "xfs_da_btree.h" 26#include "xfs_da_btree.h"
diff --git a/fs/xfs/libxfs/xfs_dir2_leaf.c b/fs/xfs/libxfs/xfs_dir2_leaf.c
index a19174eb3cb2..106119955400 100644
--- a/fs/xfs/libxfs/xfs_dir2_leaf.c
+++ b/fs/xfs/libxfs/xfs_dir2_leaf.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_da_format.h" 25#include "xfs_da_format.h"
28#include "xfs_da_btree.h" 26#include "xfs_da_btree.h"
@@ -384,7 +382,6 @@ xfs_dir2_block_to_leaf(
384 xfs_dir2_db_t ldb; /* leaf block's bno */ 382 xfs_dir2_db_t ldb; /* leaf block's bno */
385 xfs_dir2_leaf_t *leaf; /* leaf structure */ 383 xfs_dir2_leaf_t *leaf; /* leaf structure */
386 xfs_dir2_leaf_tail_t *ltp; /* leaf's tail */ 384 xfs_dir2_leaf_tail_t *ltp; /* leaf's tail */
387 xfs_mount_t *mp; /* filesystem mount point */
388 int needlog; /* need to log block header */ 385 int needlog; /* need to log block header */
389 int needscan; /* need to rescan bestfree */ 386 int needscan; /* need to rescan bestfree */
390 xfs_trans_t *tp; /* transaction pointer */ 387 xfs_trans_t *tp; /* transaction pointer */
@@ -395,7 +392,6 @@ xfs_dir2_block_to_leaf(
395 trace_xfs_dir2_block_to_leaf(args); 392 trace_xfs_dir2_block_to_leaf(args);
396 393
397 dp = args->dp; 394 dp = args->dp;
398 mp = dp->i_mount;
399 tp = args->trans; 395 tp = args->trans;
400 /* 396 /*
401 * Add the leaf block to the inode. 397 * Add the leaf block to the inode.
@@ -626,7 +622,6 @@ xfs_dir2_leaf_addname(
626 int lfloghigh; /* high leaf logging index */ 622 int lfloghigh; /* high leaf logging index */
627 int lowstale; /* index of prev stale leaf */ 623 int lowstale; /* index of prev stale leaf */
628 xfs_dir2_leaf_tail_t *ltp; /* leaf tail pointer */ 624 xfs_dir2_leaf_tail_t *ltp; /* leaf tail pointer */
629 xfs_mount_t *mp; /* filesystem mount point */
630 int needbytes; /* leaf block bytes needed */ 625 int needbytes; /* leaf block bytes needed */
631 int needlog; /* need to log data header */ 626 int needlog; /* need to log data header */
632 int needscan; /* need to rescan data free */ 627 int needscan; /* need to rescan data free */
@@ -641,7 +636,6 @@ xfs_dir2_leaf_addname(
641 636
642 dp = args->dp; 637 dp = args->dp;
643 tp = args->trans; 638 tp = args->trans;
644 mp = dp->i_mount;
645 639
646 error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, -1, &lbp); 640 error = xfs_dir3_leaf_read(tp, dp, args->geo->leafblk, -1, &lbp);
647 if (error) 641 if (error)
@@ -1356,11 +1350,9 @@ xfs_dir2_leaf_removename(
1356 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1350 xfs_dir2_leaf_t *leaf; /* leaf structure */
1357 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1351 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1358 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ 1352 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
1359 xfs_mount_t *mp; /* filesystem mount point */
1360 int needlog; /* need to log data header */ 1353 int needlog; /* need to log data header */
1361 int needscan; /* need to rescan data frees */ 1354 int needscan; /* need to rescan data frees */
1362 xfs_dir2_data_off_t oldbest; /* old value of best free */ 1355 xfs_dir2_data_off_t oldbest; /* old value of best free */
1363 xfs_trans_t *tp; /* transaction pointer */
1364 struct xfs_dir2_data_free *bf; /* bestfree table */ 1356 struct xfs_dir2_data_free *bf; /* bestfree table */
1365 struct xfs_dir2_leaf_entry *ents; 1357 struct xfs_dir2_leaf_entry *ents;
1366 struct xfs_dir3_icleaf_hdr leafhdr; 1358 struct xfs_dir3_icleaf_hdr leafhdr;
@@ -1374,8 +1366,6 @@ xfs_dir2_leaf_removename(
1374 return error; 1366 return error;
1375 } 1367 }
1376 dp = args->dp; 1368 dp = args->dp;
1377 tp = args->trans;
1378 mp = dp->i_mount;
1379 leaf = lbp->b_addr; 1369 leaf = lbp->b_addr;
1380 hdr = dbp->b_addr; 1370 hdr = dbp->b_addr;
1381 xfs_dir3_data_check(dp, dbp); 1371 xfs_dir3_data_check(dp, dbp);
@@ -1607,11 +1597,9 @@ xfs_dir2_leaf_trim_data(
1607 int error; /* error return value */ 1597 int error; /* error return value */
1608 xfs_dir2_leaf_t *leaf; /* leaf structure */ 1598 xfs_dir2_leaf_t *leaf; /* leaf structure */
1609 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ 1599 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
1610 xfs_mount_t *mp; /* filesystem mount point */
1611 xfs_trans_t *tp; /* transaction pointer */ 1600 xfs_trans_t *tp; /* transaction pointer */
1612 1601
1613 dp = args->dp; 1602 dp = args->dp;
1614 mp = dp->i_mount;
1615 tp = args->trans; 1603 tp = args->trans;
1616 /* 1604 /*
1617 * Read the offending data block. We need its buffer. 1605 * Read the offending data block. We need its buffer.
diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
index 2ae6ac2c11ae..41b80d3d3877 100644
--- a/fs/xfs/libxfs/xfs_dir2_node.c
+++ b/fs/xfs/libxfs/xfs_dir2_node.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_da_format.h" 25#include "xfs_da_format.h"
28#include "xfs_da_btree.h" 26#include "xfs_da_btree.h"
@@ -297,7 +295,6 @@ xfs_dir2_leaf_to_node(
297 int i; /* leaf freespace index */ 295 int i; /* leaf freespace index */
298 xfs_dir2_leaf_t *leaf; /* leaf structure */ 296 xfs_dir2_leaf_t *leaf; /* leaf structure */
299 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */ 297 xfs_dir2_leaf_tail_t *ltp; /* leaf tail structure */
300 xfs_mount_t *mp; /* filesystem mount point */
301 int n; /* count of live freespc ents */ 298 int n; /* count of live freespc ents */
302 xfs_dir2_data_off_t off; /* freespace entry value */ 299 xfs_dir2_data_off_t off; /* freespace entry value */
303 __be16 *to; /* pointer to freespace entry */ 300 __be16 *to; /* pointer to freespace entry */
@@ -307,7 +304,6 @@ xfs_dir2_leaf_to_node(
307 trace_xfs_dir2_leaf_to_node(args); 304 trace_xfs_dir2_leaf_to_node(args);
308 305
309 dp = args->dp; 306 dp = args->dp;
310 mp = dp->i_mount;
311 tp = args->trans; 307 tp = args->trans;
312 /* 308 /*
313 * Add a freespace block to the directory. 309 * Add a freespace block to the directory.
@@ -387,16 +383,12 @@ xfs_dir2_leafn_add(
387 int lfloghigh; /* high leaf entry logging */ 383 int lfloghigh; /* high leaf entry logging */
388 int lfloglow; /* low leaf entry logging */ 384 int lfloglow; /* low leaf entry logging */
389 int lowstale; /* previous stale entry */ 385 int lowstale; /* previous stale entry */
390 xfs_mount_t *mp; /* filesystem mount point */
391 xfs_trans_t *tp; /* transaction pointer */
392 struct xfs_dir3_icleaf_hdr leafhdr; 386 struct xfs_dir3_icleaf_hdr leafhdr;
393 struct xfs_dir2_leaf_entry *ents; 387 struct xfs_dir2_leaf_entry *ents;
394 388
395 trace_xfs_dir2_leafn_add(args, index); 389 trace_xfs_dir2_leafn_add(args, index);
396 390
397 dp = args->dp; 391 dp = args->dp;
398 mp = dp->i_mount;
399 tp = args->trans;
400 leaf = bp->b_addr; 392 leaf = bp->b_addr;
401 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf); 393 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
402 ents = dp->d_ops->leaf_ents_p(leaf); 394 ents = dp->d_ops->leaf_ents_p(leaf);
@@ -1170,7 +1162,6 @@ xfs_dir2_leafn_remove(
1170 xfs_dir2_leaf_entry_t *lep; /* leaf entry */ 1162 xfs_dir2_leaf_entry_t *lep; /* leaf entry */
1171 int longest; /* longest data free entry */ 1163 int longest; /* longest data free entry */
1172 int off; /* data block entry offset */ 1164 int off; /* data block entry offset */
1173 xfs_mount_t *mp; /* filesystem mount point */
1174 int needlog; /* need to log data header */ 1165 int needlog; /* need to log data header */
1175 int needscan; /* need to rescan data frees */ 1166 int needscan; /* need to rescan data frees */
1176 xfs_trans_t *tp; /* transaction pointer */ 1167 xfs_trans_t *tp; /* transaction pointer */
@@ -1182,7 +1173,6 @@ xfs_dir2_leafn_remove(
1182 1173
1183 dp = args->dp; 1174 dp = args->dp;
1184 tp = args->trans; 1175 tp = args->trans;
1185 mp = dp->i_mount;
1186 leaf = bp->b_addr; 1176 leaf = bp->b_addr;
1187 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf); 1177 dp->d_ops->leaf_hdr_from_disk(&leafhdr, leaf);
1188 ents = dp->d_ops->leaf_ents_p(leaf); 1178 ents = dp->d_ops->leaf_ents_p(leaf);
@@ -1323,7 +1313,6 @@ xfs_dir2_leafn_split(
1323 xfs_da_args_t *args; /* operation arguments */ 1313 xfs_da_args_t *args; /* operation arguments */
1324 xfs_dablk_t blkno; /* new leaf block number */ 1314 xfs_dablk_t blkno; /* new leaf block number */
1325 int error; /* error return value */ 1315 int error; /* error return value */
1326 xfs_mount_t *mp; /* filesystem mount point */
1327 struct xfs_inode *dp; 1316 struct xfs_inode *dp;
1328 1317
1329 /* 1318 /*
@@ -1331,7 +1320,6 @@ xfs_dir2_leafn_split(
1331 */ 1320 */
1332 args = state->args; 1321 args = state->args;
1333 dp = args->dp; 1322 dp = args->dp;
1334 mp = dp->i_mount;
1335 ASSERT(oldblk->magic == XFS_DIR2_LEAFN_MAGIC); 1323 ASSERT(oldblk->magic == XFS_DIR2_LEAFN_MAGIC);
1336 error = xfs_da_grow_inode(args, &blkno); 1324 error = xfs_da_grow_inode(args, &blkno);
1337 if (error) { 1325 if (error) {
@@ -2231,12 +2219,10 @@ xfs_dir2_node_trim_free(
2231 xfs_inode_t *dp; /* incore directory inode */ 2219 xfs_inode_t *dp; /* incore directory inode */
2232 int error; /* error return code */ 2220 int error; /* error return code */
2233 xfs_dir2_free_t *free; /* freespace structure */ 2221 xfs_dir2_free_t *free; /* freespace structure */
2234 xfs_mount_t *mp; /* filesystem mount point */
2235 xfs_trans_t *tp; /* transaction pointer */ 2222 xfs_trans_t *tp; /* transaction pointer */
2236 struct xfs_dir3_icfree_hdr freehdr; 2223 struct xfs_dir3_icfree_hdr freehdr;
2237 2224
2238 dp = args->dp; 2225 dp = args->dp;
2239 mp = dp->i_mount;
2240 tp = args->trans; 2226 tp = args->trans;
2241 /* 2227 /*
2242 * Read the freespace block. 2228 * Read the freespace block.
diff --git a/fs/xfs/libxfs/xfs_dir2_priv.h b/fs/xfs/libxfs/xfs_dir2_priv.h
index 27ce0794d196..ef9f6ead96a4 100644
--- a/fs/xfs/libxfs/xfs_dir2_priv.h
+++ b/fs/xfs/libxfs/xfs_dir2_priv.h
@@ -20,140 +20,6 @@
20 20
21struct dir_context; 21struct dir_context;
22 22
23/*
24 * Directory offset/block conversion functions.
25 *
26 * DB blocks here are logical directory block numbers, not filesystem blocks.
27 */
28
29/*
30 * Convert dataptr to byte in file space
31 */
32static inline xfs_dir2_off_t
33xfs_dir2_dataptr_to_byte(xfs_dir2_dataptr_t dp)
34{
35 return (xfs_dir2_off_t)dp << XFS_DIR2_DATA_ALIGN_LOG;
36}
37
38/*
39 * Convert byte in file space to dataptr. It had better be aligned.
40 */
41static inline xfs_dir2_dataptr_t
42xfs_dir2_byte_to_dataptr(xfs_dir2_off_t by)
43{
44 return (xfs_dir2_dataptr_t)(by >> XFS_DIR2_DATA_ALIGN_LOG);
45}
46
47/*
48 * Convert byte in space to (DB) block
49 */
50static inline xfs_dir2_db_t
51xfs_dir2_byte_to_db(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
52{
53 return (xfs_dir2_db_t)(by >> geo->blklog);
54}
55
56/*
57 * Convert dataptr to a block number
58 */
59static inline xfs_dir2_db_t
60xfs_dir2_dataptr_to_db(struct xfs_da_geometry *geo, xfs_dir2_dataptr_t dp)
61{
62 return xfs_dir2_byte_to_db(geo, xfs_dir2_dataptr_to_byte(dp));
63}
64
65/*
66 * Convert byte in space to offset in a block
67 */
68static inline xfs_dir2_data_aoff_t
69xfs_dir2_byte_to_off(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
70{
71 return (xfs_dir2_data_aoff_t)(by & (geo->blksize - 1));
72}
73
74/*
75 * Convert dataptr to a byte offset in a block
76 */
77static inline xfs_dir2_data_aoff_t
78xfs_dir2_dataptr_to_off(struct xfs_da_geometry *geo, xfs_dir2_dataptr_t dp)
79{
80 return xfs_dir2_byte_to_off(geo, xfs_dir2_dataptr_to_byte(dp));
81}
82
83/*
84 * Convert block and offset to byte in space
85 */
86static inline xfs_dir2_off_t
87xfs_dir2_db_off_to_byte(struct xfs_da_geometry *geo, xfs_dir2_db_t db,
88 xfs_dir2_data_aoff_t o)
89{
90 return ((xfs_dir2_off_t)db << geo->blklog) + o;
91}
92
93/*
94 * Convert block (DB) to block (dablk)
95 */
96static inline xfs_dablk_t
97xfs_dir2_db_to_da(struct xfs_da_geometry *geo, xfs_dir2_db_t db)
98{
99 return (xfs_dablk_t)(db << (geo->blklog - geo->fsblog));
100}
101
102/*
103 * Convert byte in space to (DA) block
104 */
105static inline xfs_dablk_t
106xfs_dir2_byte_to_da(struct xfs_da_geometry *geo, xfs_dir2_off_t by)
107{
108 return xfs_dir2_db_to_da(geo, xfs_dir2_byte_to_db(geo, by));
109}
110
111/*
112 * Convert block and offset to dataptr
113 */
114static inline xfs_dir2_dataptr_t
115xfs_dir2_db_off_to_dataptr(struct xfs_da_geometry *geo, xfs_dir2_db_t db,
116 xfs_dir2_data_aoff_t o)
117{
118 return xfs_dir2_byte_to_dataptr(xfs_dir2_db_off_to_byte(geo, db, o));
119}
120
121/*
122 * Convert block (dablk) to block (DB)
123 */
124static inline xfs_dir2_db_t
125xfs_dir2_da_to_db(struct xfs_da_geometry *geo, xfs_dablk_t da)
126{
127 return (xfs_dir2_db_t)(da >> (geo->blklog - geo->fsblog));
128}
129
130/*
131 * Convert block (dablk) to byte offset in space
132 */
133static inline xfs_dir2_off_t
134xfs_dir2_da_to_byte(struct xfs_da_geometry *geo, xfs_dablk_t da)
135{
136 return xfs_dir2_db_off_to_byte(geo, xfs_dir2_da_to_db(geo, da), 0);
137}
138
139/*
140 * Directory tail pointer accessor functions. Based on block geometry.
141 */
142static inline struct xfs_dir2_block_tail *
143xfs_dir2_block_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_data_hdr *hdr)
144{
145 return ((struct xfs_dir2_block_tail *)
146 ((char *)hdr + geo->blksize)) - 1;
147}
148
149static inline struct xfs_dir2_leaf_tail *
150xfs_dir2_leaf_tail_p(struct xfs_da_geometry *geo, struct xfs_dir2_leaf *lp)
151{
152 return (struct xfs_dir2_leaf_tail *)
153 ((char *)lp + geo->blksize -
154 sizeof(struct xfs_dir2_leaf_tail));
155}
156
157/* xfs_dir2.c */ 23/* xfs_dir2.c */
158extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino); 24extern int xfs_dir_ino_validate(struct xfs_mount *mp, xfs_ino_t ino);
159extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space, 25extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
@@ -161,12 +27,6 @@ extern int xfs_dir2_grow_inode(struct xfs_da_args *args, int space,
161extern int xfs_dir_cilookup_result(struct xfs_da_args *args, 27extern int xfs_dir_cilookup_result(struct xfs_da_args *args,
162 const unsigned char *name, int len); 28 const unsigned char *name, int len);
163 29
164#define S_SHIFT 12
165extern const unsigned char xfs_mode_to_ftype[];
166
167extern unsigned char xfs_dir3_get_dtype(struct xfs_mount *mp,
168 __uint8_t filetype);
169
170 30
171/* xfs_dir2_block.c */ 31/* xfs_dir2_block.c */
172extern int xfs_dir3_block_read(struct xfs_trans *tp, struct xfs_inode *dp, 32extern int xfs_dir3_block_read(struct xfs_trans *tp, struct xfs_inode *dp,
diff --git a/fs/xfs/libxfs/xfs_dir2_sf.c b/fs/xfs/libxfs/xfs_dir2_sf.c
index 5079e051ef08..974d62e677f4 100644
--- a/fs/xfs/libxfs/xfs_dir2_sf.c
+++ b/fs/xfs/libxfs/xfs_dir2_sf.c
@@ -20,8 +20,6 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 23#include "xfs_mount.h"
26#include "xfs_da_format.h" 24#include "xfs_da_format.h"
27#include "xfs_da_btree.h" 25#include "xfs_da_btree.h"
@@ -32,7 +30,6 @@
32#include "xfs_dir2.h" 30#include "xfs_dir2.h"
33#include "xfs_dir2_priv.h" 31#include "xfs_dir2_priv.h"
34#include "xfs_trace.h" 32#include "xfs_trace.h"
35#include "xfs_dinode.h"
36 33
37/* 34/*
38 * Prototypes for internal functions. 35 * Prototypes for internal functions.
@@ -455,13 +452,11 @@ xfs_dir2_sf_addname_hard(
455 xfs_dir2_sf_hdr_t *oldsfp; /* original shortform dir */ 452 xfs_dir2_sf_hdr_t *oldsfp; /* original shortform dir */
456 xfs_dir2_sf_entry_t *sfep; /* entry in new dir */ 453 xfs_dir2_sf_entry_t *sfep; /* entry in new dir */
457 xfs_dir2_sf_hdr_t *sfp; /* new shortform dir */ 454 xfs_dir2_sf_hdr_t *sfp; /* new shortform dir */
458 struct xfs_mount *mp;
459 455
460 /* 456 /*
461 * Copy the old directory to the stack buffer. 457 * Copy the old directory to the stack buffer.
462 */ 458 */
463 dp = args->dp; 459 dp = args->dp;
464 mp = dp->i_mount;
465 460
466 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 461 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
467 old_isize = (int)dp->i_d.di_size; 462 old_isize = (int)dp->i_d.di_size;
@@ -542,7 +537,6 @@ xfs_dir2_sf_addname_pick(
542 xfs_inode_t *dp; /* incore directory inode */ 537 xfs_inode_t *dp; /* incore directory inode */
543 int holefit; /* found hole it will fit in */ 538 int holefit; /* found hole it will fit in */
544 int i; /* entry number */ 539 int i; /* entry number */
545 xfs_mount_t *mp; /* filesystem mount point */
546 xfs_dir2_data_aoff_t offset; /* data block offset */ 540 xfs_dir2_data_aoff_t offset; /* data block offset */
547 xfs_dir2_sf_entry_t *sfep; /* shortform entry */ 541 xfs_dir2_sf_entry_t *sfep; /* shortform entry */
548 xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ 542 xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
@@ -550,7 +544,6 @@ xfs_dir2_sf_addname_pick(
550 int used; /* data bytes used */ 544 int used; /* data bytes used */
551 545
552 dp = args->dp; 546 dp = args->dp;
553 mp = dp->i_mount;
554 547
555 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 548 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
556 size = dp->d_ops->data_entsize(args->namelen); 549 size = dp->d_ops->data_entsize(args->namelen);
@@ -616,10 +609,8 @@ xfs_dir2_sf_check(
616 int offset; /* data offset */ 609 int offset; /* data offset */
617 xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */ 610 xfs_dir2_sf_entry_t *sfep; /* shortform dir entry */
618 xfs_dir2_sf_hdr_t *sfp; /* shortform structure */ 611 xfs_dir2_sf_hdr_t *sfp; /* shortform structure */
619 struct xfs_mount *mp;
620 612
621 dp = args->dp; 613 dp = args->dp;
622 mp = dp->i_mount;
623 614
624 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data; 615 sfp = (xfs_dir2_sf_hdr_t *)dp->i_df.if_u1.if_data;
625 offset = dp->d_ops->data_first_offset; 616 offset = dp->d_ops->data_first_offset;
@@ -1016,12 +1007,10 @@ xfs_dir2_sf_toino4(
1016 int oldsize; /* old inode size */ 1007 int oldsize; /* old inode size */
1017 xfs_dir2_sf_entry_t *sfep; /* new sf entry */ 1008 xfs_dir2_sf_entry_t *sfep; /* new sf entry */
1018 xfs_dir2_sf_hdr_t *sfp; /* new sf directory */ 1009 xfs_dir2_sf_hdr_t *sfp; /* new sf directory */
1019 struct xfs_mount *mp;
1020 1010
1021 trace_xfs_dir2_sf_toino4(args); 1011 trace_xfs_dir2_sf_toino4(args);
1022 1012
1023 dp = args->dp; 1013 dp = args->dp;
1024 mp = dp->i_mount;
1025 1014
1026 /* 1015 /*
1027 * Copy the old directory to the buffer. 1016 * Copy the old directory to the buffer.
@@ -1094,12 +1083,10 @@ xfs_dir2_sf_toino8(
1094 int oldsize; /* old inode size */ 1083 int oldsize; /* old inode size */
1095 xfs_dir2_sf_entry_t *sfep; /* new sf entry */ 1084 xfs_dir2_sf_entry_t *sfep; /* new sf entry */
1096 xfs_dir2_sf_hdr_t *sfp; /* new sf directory */ 1085 xfs_dir2_sf_hdr_t *sfp; /* new sf directory */
1097 struct xfs_mount *mp;
1098 1086
1099 trace_xfs_dir2_sf_toino8(args); 1087 trace_xfs_dir2_sf_toino8(args);
1100 1088
1101 dp = args->dp; 1089 dp = args->dp;
1102 mp = dp->i_mount;
1103 1090
1104 /* 1091 /*
1105 * Copy the old directory to the buffer. 1092 * Copy the old directory to the buffer.
diff --git a/fs/xfs/libxfs/xfs_dquot_buf.c b/fs/xfs/libxfs/xfs_dquot_buf.c
index bb969337efc8..6fbf2d853a54 100644
--- a/fs/xfs/libxfs/xfs_dquot_buf.c
+++ b/fs/xfs/libxfs/xfs_dquot_buf.c
@@ -22,8 +22,6 @@
22#include "xfs_format.h" 22#include "xfs_format.h"
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
29#include "xfs_quota.h" 27#include "xfs_quota.h"
diff --git a/fs/xfs/libxfs/xfs_format.h b/fs/xfs/libxfs/xfs_format.h
index 7e42bba9a420..fbd6da263571 100644
--- a/fs/xfs/libxfs/xfs_format.h
+++ b/fs/xfs/libxfs/xfs_format.h
@@ -34,6 +34,1077 @@ struct xfs_buf;
34struct xfs_ifork; 34struct xfs_ifork;
35 35
36/* 36/*
37 * Super block
38 * Fits into a sector-sized buffer at address 0 of each allocation group.
39 * Only the first of these is ever updated except during growfs.
40 */
41#define XFS_SB_MAGIC 0x58465342 /* 'XFSB' */
42#define XFS_SB_VERSION_1 1 /* 5.3, 6.0.1, 6.1 */
43#define XFS_SB_VERSION_2 2 /* 6.2 - attributes */
44#define XFS_SB_VERSION_3 3 /* 6.2 - new inode version */
45#define XFS_SB_VERSION_4 4 /* 6.2+ - bitmask version */
46#define XFS_SB_VERSION_5 5 /* CRC enabled filesystem */
47#define XFS_SB_VERSION_NUMBITS 0x000f
48#define XFS_SB_VERSION_ALLFBITS 0xfff0
49#define XFS_SB_VERSION_ATTRBIT 0x0010
50#define XFS_SB_VERSION_NLINKBIT 0x0020
51#define XFS_SB_VERSION_QUOTABIT 0x0040
52#define XFS_SB_VERSION_ALIGNBIT 0x0080
53#define XFS_SB_VERSION_DALIGNBIT 0x0100
54#define XFS_SB_VERSION_SHAREDBIT 0x0200
55#define XFS_SB_VERSION_LOGV2BIT 0x0400
56#define XFS_SB_VERSION_SECTORBIT 0x0800
57#define XFS_SB_VERSION_EXTFLGBIT 0x1000
58#define XFS_SB_VERSION_DIRV2BIT 0x2000
59#define XFS_SB_VERSION_BORGBIT 0x4000 /* ASCII only case-insens. */
60#define XFS_SB_VERSION_MOREBITSBIT 0x8000
61
62/*
63 * Supported feature bit list is just all bits in the versionnum field because
64 * we've used them all up and understand them all. Except, of course, for the
65 * shared superblock bit, which nobody knows what it does and so is unsupported.
66 */
67#define XFS_SB_VERSION_OKBITS \
68 ((XFS_SB_VERSION_NUMBITS | XFS_SB_VERSION_ALLFBITS) & \
69 ~XFS_SB_VERSION_SHAREDBIT)
70
71/*
72 * There are two words to hold XFS "feature" bits: the original
73 * word, sb_versionnum, and sb_features2. Whenever a bit is set in
74 * sb_features2, the feature bit XFS_SB_VERSION_MOREBITSBIT must be set.
75 *
76 * These defines represent bits in sb_features2.
77 */
78#define XFS_SB_VERSION2_RESERVED1BIT 0x00000001
79#define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Superblk counters */
80#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004
81#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */
82#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */
83#define XFS_SB_VERSION2_PROJID32BIT 0x00000080 /* 32 bit project id */
84#define XFS_SB_VERSION2_CRCBIT 0x00000100 /* metadata CRCs */
85#define XFS_SB_VERSION2_FTYPE 0x00000200 /* inode type in dir */
86
87#define XFS_SB_VERSION2_OKBITS \
88 (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \
89 XFS_SB_VERSION2_ATTR2BIT | \
90 XFS_SB_VERSION2_PROJID32BIT | \
91 XFS_SB_VERSION2_FTYPE)
92
93/*
94 * Superblock - in core version. Must match the ondisk version below.
95 * Must be padded to 64 bit alignment.
96 */
97typedef struct xfs_sb {
98 __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
99 __uint32_t sb_blocksize; /* logical block size, bytes */
100 xfs_rfsblock_t sb_dblocks; /* number of data blocks */
101 xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */
102 xfs_rtblock_t sb_rextents; /* number of realtime extents */
103 uuid_t sb_uuid; /* file system unique id */
104 xfs_fsblock_t sb_logstart; /* starting block of log if internal */
105 xfs_ino_t sb_rootino; /* root inode number */
106 xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */
107 xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */
108 xfs_agblock_t sb_rextsize; /* realtime extent size, blocks */
109 xfs_agblock_t sb_agblocks; /* size of an allocation group */
110 xfs_agnumber_t sb_agcount; /* number of allocation groups */
111 xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */
112 xfs_extlen_t sb_logblocks; /* number of log blocks */
113 __uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */
114 __uint16_t sb_sectsize; /* volume sector size, bytes */
115 __uint16_t sb_inodesize; /* inode size, bytes */
116 __uint16_t sb_inopblock; /* inodes per block */
117 char sb_fname[12]; /* file system name */
118 __uint8_t sb_blocklog; /* log2 of sb_blocksize */
119 __uint8_t sb_sectlog; /* log2 of sb_sectsize */
120 __uint8_t sb_inodelog; /* log2 of sb_inodesize */
121 __uint8_t sb_inopblog; /* log2 of sb_inopblock */
122 __uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */
123 __uint8_t sb_rextslog; /* log2 of sb_rextents */
124 __uint8_t sb_inprogress; /* mkfs is in progress, don't mount */
125 __uint8_t sb_imax_pct; /* max % of fs for inode space */
126 /* statistics */
127 /*
128 * These fields must remain contiguous. If you really
129 * want to change their layout, make sure you fix the
130 * code in xfs_trans_apply_sb_deltas().
131 */
132 __uint64_t sb_icount; /* allocated inodes */
133 __uint64_t sb_ifree; /* free inodes */
134 __uint64_t sb_fdblocks; /* free data blocks */
135 __uint64_t sb_frextents; /* free realtime extents */
136 /*
137 * End contiguous fields.
138 */
139 xfs_ino_t sb_uquotino; /* user quota inode */
140 xfs_ino_t sb_gquotino; /* group quota inode */
141 __uint16_t sb_qflags; /* quota flags */
142 __uint8_t sb_flags; /* misc. flags */
143 __uint8_t sb_shared_vn; /* shared version number */
144 xfs_extlen_t sb_inoalignmt; /* inode chunk alignment, fsblocks */
145 __uint32_t sb_unit; /* stripe or raid unit */
146 __uint32_t sb_width; /* stripe or raid width */
147 __uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */
148 __uint8_t sb_logsectlog; /* log2 of the log sector size */
149 __uint16_t sb_logsectsize; /* sector size for the log, bytes */
150 __uint32_t sb_logsunit; /* stripe unit size for the log */
151 __uint32_t sb_features2; /* additional feature bits */
152
153 /*
154 * bad features2 field as a result of failing to pad the sb
155 * structure to 64 bits. Some machines will be using this field
156 * for features2 bits. Easiest just to mark it bad and not use
157 * it for anything else.
158 */
159 __uint32_t sb_bad_features2;
160
161 /* version 5 superblock fields start here */
162
163 /* feature masks */
164 __uint32_t sb_features_compat;
165 __uint32_t sb_features_ro_compat;
166 __uint32_t sb_features_incompat;
167 __uint32_t sb_features_log_incompat;
168
169 __uint32_t sb_crc; /* superblock crc */
170 __uint32_t sb_pad;
171
172 xfs_ino_t sb_pquotino; /* project quota inode */
173 xfs_lsn_t sb_lsn; /* last write sequence */
174
175 /* must be padded to 64 bit alignment */
176} xfs_sb_t;
177
178#define XFS_SB_CRC_OFF offsetof(struct xfs_sb, sb_crc)
179
180/*
181 * Superblock - on disk version. Must match the in core version above.
182 * Must be padded to 64 bit alignment.
183 */
184typedef struct xfs_dsb {
185 __be32 sb_magicnum; /* magic number == XFS_SB_MAGIC */
186 __be32 sb_blocksize; /* logical block size, bytes */
187 __be64 sb_dblocks; /* number of data blocks */
188 __be64 sb_rblocks; /* number of realtime blocks */
189 __be64 sb_rextents; /* number of realtime extents */
190 uuid_t sb_uuid; /* file system unique id */
191 __be64 sb_logstart; /* starting block of log if internal */
192 __be64 sb_rootino; /* root inode number */
193 __be64 sb_rbmino; /* bitmap inode for realtime extents */
194 __be64 sb_rsumino; /* summary inode for rt bitmap */
195 __be32 sb_rextsize; /* realtime extent size, blocks */
196 __be32 sb_agblocks; /* size of an allocation group */
197 __be32 sb_agcount; /* number of allocation groups */
198 __be32 sb_rbmblocks; /* number of rt bitmap blocks */
199 __be32 sb_logblocks; /* number of log blocks */
200 __be16 sb_versionnum; /* header version == XFS_SB_VERSION */
201 __be16 sb_sectsize; /* volume sector size, bytes */
202 __be16 sb_inodesize; /* inode size, bytes */
203 __be16 sb_inopblock; /* inodes per block */
204 char sb_fname[12]; /* file system name */
205 __u8 sb_blocklog; /* log2 of sb_blocksize */
206 __u8 sb_sectlog; /* log2 of sb_sectsize */
207 __u8 sb_inodelog; /* log2 of sb_inodesize */
208 __u8 sb_inopblog; /* log2 of sb_inopblock */
209 __u8 sb_agblklog; /* log2 of sb_agblocks (rounded up) */
210 __u8 sb_rextslog; /* log2 of sb_rextents */
211 __u8 sb_inprogress; /* mkfs is in progress, don't mount */
212 __u8 sb_imax_pct; /* max % of fs for inode space */
213 /* statistics */
214 /*
215 * These fields must remain contiguous. If you really
216 * want to change their layout, make sure you fix the
217 * code in xfs_trans_apply_sb_deltas().
218 */
219 __be64 sb_icount; /* allocated inodes */
220 __be64 sb_ifree; /* free inodes */
221 __be64 sb_fdblocks; /* free data blocks */
222 __be64 sb_frextents; /* free realtime extents */
223 /*
224 * End contiguous fields.
225 */
226 __be64 sb_uquotino; /* user quota inode */
227 __be64 sb_gquotino; /* group quota inode */
228 __be16 sb_qflags; /* quota flags */
229 __u8 sb_flags; /* misc. flags */
230 __u8 sb_shared_vn; /* shared version number */
231 __be32 sb_inoalignmt; /* inode chunk alignment, fsblocks */
232 __be32 sb_unit; /* stripe or raid unit */
233 __be32 sb_width; /* stripe or raid width */
234 __u8 sb_dirblklog; /* log2 of dir block size (fsbs) */
235 __u8 sb_logsectlog; /* log2 of the log sector size */
236 __be16 sb_logsectsize; /* sector size for the log, bytes */
237 __be32 sb_logsunit; /* stripe unit size for the log */
238 __be32 sb_features2; /* additional feature bits */
239 /*
240 * bad features2 field as a result of failing to pad the sb
241 * structure to 64 bits. Some machines will be using this field
242 * for features2 bits. Easiest just to mark it bad and not use
243 * it for anything else.
244 */
245 __be32 sb_bad_features2;
246
247 /* version 5 superblock fields start here */
248
249 /* feature masks */
250 __be32 sb_features_compat;
251 __be32 sb_features_ro_compat;
252 __be32 sb_features_incompat;
253 __be32 sb_features_log_incompat;
254
255 __le32 sb_crc; /* superblock crc */
256 __be32 sb_pad;
257
258 __be64 sb_pquotino; /* project quota inode */
259 __be64 sb_lsn; /* last write sequence */
260
261 /* must be padded to 64 bit alignment */
262} xfs_dsb_t;
263
264/*
265 * Sequence number values for the fields.
266 */
267typedef enum {
268 XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS,
269 XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO,
270 XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS,
271 XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS,
272 XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE,
273 XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG,
274 XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG,
275 XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT,
276 XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
277 XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
278 XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
279 XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
280 XFS_SBS_FEATURES2, XFS_SBS_BAD_FEATURES2, XFS_SBS_FEATURES_COMPAT,
281 XFS_SBS_FEATURES_RO_COMPAT, XFS_SBS_FEATURES_INCOMPAT,
282 XFS_SBS_FEATURES_LOG_INCOMPAT, XFS_SBS_CRC, XFS_SBS_PAD,
283 XFS_SBS_PQUOTINO, XFS_SBS_LSN,
284 XFS_SBS_FIELDCOUNT
285} xfs_sb_field_t;
286
287/*
288 * Mask values, defined based on the xfs_sb_field_t values.
289 * Only define the ones we're using.
290 */
291#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x)
292#define XFS_SB_UUID XFS_SB_MVAL(UUID)
293#define XFS_SB_FNAME XFS_SB_MVAL(FNAME)
294#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO)
295#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO)
296#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO)
297#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM)
298#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO)
299#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO)
300#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS)
301#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN)
302#define XFS_SB_UNIT XFS_SB_MVAL(UNIT)
303#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH)
304#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
305#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
306#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
307#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2)
308#define XFS_SB_BAD_FEATURES2 XFS_SB_MVAL(BAD_FEATURES2)
309#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
310#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
311#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
312#define XFS_SB_FEATURES_LOG_INCOMPAT XFS_SB_MVAL(FEATURES_LOG_INCOMPAT)
313#define XFS_SB_CRC XFS_SB_MVAL(CRC)
314#define XFS_SB_PQUOTINO XFS_SB_MVAL(PQUOTINO)
315#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT)
316#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1)
317#define XFS_SB_MOD_BITS \
318 (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
319 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
320 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
321 XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
322 XFS_SB_BAD_FEATURES2 | XFS_SB_FEATURES_COMPAT | \
323 XFS_SB_FEATURES_RO_COMPAT | XFS_SB_FEATURES_INCOMPAT | \
324 XFS_SB_FEATURES_LOG_INCOMPAT | XFS_SB_PQUOTINO)
325
326
327/*
328 * Misc. Flags - warning - these will be cleared by xfs_repair unless
329 * a feature bit is set when the flag is used.
330 */
331#define XFS_SBF_NOFLAGS 0x00 /* no flags set */
332#define XFS_SBF_READONLY 0x01 /* only read-only mounts allowed */
333
334/*
335 * define max. shared version we can interoperate with
336 */
337#define XFS_SB_MAX_SHARED_VN 0
338
339#define XFS_SB_VERSION_NUM(sbp) ((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS)
340
341/*
342 * The first XFS version we support is a v4 superblock with V2 directories.
343 */
344static inline bool xfs_sb_good_v4_features(struct xfs_sb *sbp)
345{
346 if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
347 return false;
348
349 /* check for unknown features in the fs */
350 if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
351 ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
352 (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
353 return false;
354
355 return true;
356}
357
358static inline bool xfs_sb_good_version(struct xfs_sb *sbp)
359{
360 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5)
361 return true;
362 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4)
363 return xfs_sb_good_v4_features(sbp);
364 return false;
365}
366
367/*
368 * Detect a mismatched features2 field. Older kernels read/wrote
369 * this into the wrong slot, so to be safe we keep them in sync.
370 */
371static inline bool xfs_sb_has_mismatched_features2(struct xfs_sb *sbp)
372{
373 return sbp->sb_bad_features2 != sbp->sb_features2;
374}
375
376static inline bool xfs_sb_version_hasattr(struct xfs_sb *sbp)
377{
378 return (sbp->sb_versionnum & XFS_SB_VERSION_ATTRBIT);
379}
380
381static inline void xfs_sb_version_addattr(struct xfs_sb *sbp)
382{
383 sbp->sb_versionnum |= XFS_SB_VERSION_ATTRBIT;
384}
385
386static inline bool xfs_sb_version_hasquota(struct xfs_sb *sbp)
387{
388 return (sbp->sb_versionnum & XFS_SB_VERSION_QUOTABIT);
389}
390
391static inline void xfs_sb_version_addquota(struct xfs_sb *sbp)
392{
393 sbp->sb_versionnum |= XFS_SB_VERSION_QUOTABIT;
394}
395
396static inline bool xfs_sb_version_hasalign(struct xfs_sb *sbp)
397{
398 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
399 (sbp->sb_versionnum & XFS_SB_VERSION_ALIGNBIT));
400}
401
402static inline bool xfs_sb_version_hasdalign(struct xfs_sb *sbp)
403{
404 return (sbp->sb_versionnum & XFS_SB_VERSION_DALIGNBIT);
405}
406
407static inline bool xfs_sb_version_haslogv2(struct xfs_sb *sbp)
408{
409 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
410 (sbp->sb_versionnum & XFS_SB_VERSION_LOGV2BIT);
411}
412
413static inline bool xfs_sb_version_hasextflgbit(struct xfs_sb *sbp)
414{
415 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
416 (sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT);
417}
418
419static inline bool xfs_sb_version_hassector(struct xfs_sb *sbp)
420{
421 return (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT);
422}
423
424static inline bool xfs_sb_version_hasasciici(struct xfs_sb *sbp)
425{
426 return (sbp->sb_versionnum & XFS_SB_VERSION_BORGBIT);
427}
428
429static inline bool xfs_sb_version_hasmorebits(struct xfs_sb *sbp)
430{
431 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
432 (sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT);
433}
434
435/*
436 * sb_features2 bit version macros.
437 */
438static inline bool xfs_sb_version_haslazysbcount(struct xfs_sb *sbp)
439{
440 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
441 (xfs_sb_version_hasmorebits(sbp) &&
442 (sbp->sb_features2 & XFS_SB_VERSION2_LAZYSBCOUNTBIT));
443}
444
445static inline bool xfs_sb_version_hasattr2(struct xfs_sb *sbp)
446{
447 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
448 (xfs_sb_version_hasmorebits(sbp) &&
449 (sbp->sb_features2 & XFS_SB_VERSION2_ATTR2BIT));
450}
451
452static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp)
453{
454 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
455 sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
456 sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT;
457}
458
459static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp)
460{
461 sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
462 sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
463 if (!sbp->sb_features2)
464 sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
465}
466
467static inline bool xfs_sb_version_hasprojid32bit(struct xfs_sb *sbp)
468{
469 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
470 (xfs_sb_version_hasmorebits(sbp) &&
471 (sbp->sb_features2 & XFS_SB_VERSION2_PROJID32BIT));
472}
473
474static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
475{
476 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
477 sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
478 sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
479}
480
481/*
482 * Extended v5 superblock feature masks. These are to be used for new v5
483 * superblock features only.
484 *
485 * Compat features are new features that old kernels will not notice or affect
486 * and so can mount read-write without issues.
487 *
488 * RO-Compat (read only) are features that old kernels can read but will break
489 * if they write. Hence only read-only mounts of such filesystems are allowed on
490 * kernels that don't support the feature bit.
491 *
492 * InCompat features are features which old kernels will not understand and so
493 * must not mount.
494 *
495 * Log-InCompat features are for changes to log formats or new transactions that
496 * can't be replayed on older kernels. The fields are set when the filesystem is
497 * mounted, and a clean unmount clears the fields.
498 */
499#define XFS_SB_FEAT_COMPAT_ALL 0
500#define XFS_SB_FEAT_COMPAT_UNKNOWN ~XFS_SB_FEAT_COMPAT_ALL
501static inline bool
502xfs_sb_has_compat_feature(
503 struct xfs_sb *sbp,
504 __uint32_t feature)
505{
506 return (sbp->sb_features_compat & feature) != 0;
507}
508
509#define XFS_SB_FEAT_RO_COMPAT_FINOBT (1 << 0) /* free inode btree */
510#define XFS_SB_FEAT_RO_COMPAT_ALL \
511 (XFS_SB_FEAT_RO_COMPAT_FINOBT)
512#define XFS_SB_FEAT_RO_COMPAT_UNKNOWN ~XFS_SB_FEAT_RO_COMPAT_ALL
513static inline bool
514xfs_sb_has_ro_compat_feature(
515 struct xfs_sb *sbp,
516 __uint32_t feature)
517{
518 return (sbp->sb_features_ro_compat & feature) != 0;
519}
520
521#define XFS_SB_FEAT_INCOMPAT_FTYPE (1 << 0) /* filetype in dirent */
522#define XFS_SB_FEAT_INCOMPAT_ALL \
523 (XFS_SB_FEAT_INCOMPAT_FTYPE)
524
525#define XFS_SB_FEAT_INCOMPAT_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_ALL
526static inline bool
527xfs_sb_has_incompat_feature(
528 struct xfs_sb *sbp,
529 __uint32_t feature)
530{
531 return (sbp->sb_features_incompat & feature) != 0;
532}
533
534#define XFS_SB_FEAT_INCOMPAT_LOG_ALL 0
535#define XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_LOG_ALL
536static inline bool
537xfs_sb_has_incompat_log_feature(
538 struct xfs_sb *sbp,
539 __uint32_t feature)
540{
541 return (sbp->sb_features_log_incompat & feature) != 0;
542}
543
544/*
545 * V5 superblock specific feature checks
546 */
547static inline int xfs_sb_version_hascrc(struct xfs_sb *sbp)
548{
549 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
550}
551
552static inline int xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
553{
554 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
555}
556
557static inline int xfs_sb_version_hasftype(struct xfs_sb *sbp)
558{
559 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
560 xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_FTYPE)) ||
561 (xfs_sb_version_hasmorebits(sbp) &&
562 (sbp->sb_features2 & XFS_SB_VERSION2_FTYPE));
563}
564
565static inline int xfs_sb_version_hasfinobt(xfs_sb_t *sbp)
566{
567 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) &&
568 (sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_FINOBT);
569}
570
571/*
572 * end of superblock version macros
573 */
574
575static inline bool
576xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
577{
578 return (ino == sbp->sb_uquotino ||
579 ino == sbp->sb_gquotino ||
580 ino == sbp->sb_pquotino);
581}
582
583#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */
584#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
585#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr))
586
587#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d))
588#define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \
589 xfs_daddr_to_agno(mp,d), xfs_daddr_to_agbno(mp,d))
590#define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \
591 XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno))
592
593/*
594 * File system sector to basic block conversions.
595 */
596#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log)
597
598/*
599 * File system block to basic block conversions.
600 */
601#define XFS_FSB_TO_BB(mp,fsbno) ((fsbno) << (mp)->m_blkbb_log)
602#define XFS_BB_TO_FSB(mp,bb) \
603 (((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log)
604#define XFS_BB_TO_FSBT(mp,bb) ((bb) >> (mp)->m_blkbb_log)
605
606/*
607 * File system block to byte conversions.
608 */
609#define XFS_FSB_TO_B(mp,fsbno) ((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog)
610#define XFS_B_TO_FSB(mp,b) \
611 ((((__uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog)
612#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
613#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask)
614
615/*
616 * Allocation group header
617 *
618 * This is divided into three structures, placed in sequential 512-byte
619 * buffers after a copy of the superblock (also in a 512-byte buffer).
620 */
621#define XFS_AGF_MAGIC 0x58414746 /* 'XAGF' */
622#define XFS_AGI_MAGIC 0x58414749 /* 'XAGI' */
623#define XFS_AGFL_MAGIC 0x5841464c /* 'XAFL' */
624#define XFS_AGF_VERSION 1
625#define XFS_AGI_VERSION 1
626
627#define XFS_AGF_GOOD_VERSION(v) ((v) == XFS_AGF_VERSION)
628#define XFS_AGI_GOOD_VERSION(v) ((v) == XFS_AGI_VERSION)
629
630/*
631 * Btree number 0 is bno, 1 is cnt. This value gives the size of the
632 * arrays below.
633 */
634#define XFS_BTNUM_AGF ((int)XFS_BTNUM_CNTi + 1)
635
636/*
637 * The second word of agf_levels in the first a.g. overlaps the EFS
638 * superblock's magic number. Since the magic numbers valid for EFS
639 * are > 64k, our value cannot be confused for an EFS superblock's.
640 */
641
642typedef struct xfs_agf {
643 /*
644 * Common allocation group header information
645 */
646 __be32 agf_magicnum; /* magic number == XFS_AGF_MAGIC */
647 __be32 agf_versionnum; /* header version == XFS_AGF_VERSION */
648 __be32 agf_seqno; /* sequence # starting from 0 */
649 __be32 agf_length; /* size in blocks of a.g. */
650 /*
651 * Freespace information
652 */
653 __be32 agf_roots[XFS_BTNUM_AGF]; /* root blocks */
654 __be32 agf_spare0; /* spare field */
655 __be32 agf_levels[XFS_BTNUM_AGF]; /* btree levels */
656 __be32 agf_spare1; /* spare field */
657
658 __be32 agf_flfirst; /* first freelist block's index */
659 __be32 agf_fllast; /* last freelist block's index */
660 __be32 agf_flcount; /* count of blocks in freelist */
661 __be32 agf_freeblks; /* total free blocks */
662
663 __be32 agf_longest; /* longest free space */
664 __be32 agf_btreeblks; /* # of blocks held in AGF btrees */
665 uuid_t agf_uuid; /* uuid of filesystem */
666
667 /*
668 * reserve some contiguous space for future logged fields before we add
669 * the unlogged fields. This makes the range logging via flags and
670 * structure offsets much simpler.
671 */
672 __be64 agf_spare64[16];
673
674 /* unlogged fields, written during buffer writeback. */
675 __be64 agf_lsn; /* last write sequence */
676 __be32 agf_crc; /* crc of agf sector */
677 __be32 agf_spare2;
678
679 /* structure must be padded to 64 bit alignment */
680} xfs_agf_t;
681
682#define XFS_AGF_CRC_OFF offsetof(struct xfs_agf, agf_crc)
683
684#define XFS_AGF_MAGICNUM 0x00000001
685#define XFS_AGF_VERSIONNUM 0x00000002
686#define XFS_AGF_SEQNO 0x00000004
687#define XFS_AGF_LENGTH 0x00000008
688#define XFS_AGF_ROOTS 0x00000010
689#define XFS_AGF_LEVELS 0x00000020
690#define XFS_AGF_FLFIRST 0x00000040
691#define XFS_AGF_FLLAST 0x00000080
692#define XFS_AGF_FLCOUNT 0x00000100
693#define XFS_AGF_FREEBLKS 0x00000200
694#define XFS_AGF_LONGEST 0x00000400
695#define XFS_AGF_BTREEBLKS 0x00000800
696#define XFS_AGF_UUID 0x00001000
697#define XFS_AGF_NUM_BITS 13
698#define XFS_AGF_ALL_BITS ((1 << XFS_AGF_NUM_BITS) - 1)
699
700#define XFS_AGF_FLAGS \
701 { XFS_AGF_MAGICNUM, "MAGICNUM" }, \
702 { XFS_AGF_VERSIONNUM, "VERSIONNUM" }, \
703 { XFS_AGF_SEQNO, "SEQNO" }, \
704 { XFS_AGF_LENGTH, "LENGTH" }, \
705 { XFS_AGF_ROOTS, "ROOTS" }, \
706 { XFS_AGF_LEVELS, "LEVELS" }, \
707 { XFS_AGF_FLFIRST, "FLFIRST" }, \
708 { XFS_AGF_FLLAST, "FLLAST" }, \
709 { XFS_AGF_FLCOUNT, "FLCOUNT" }, \
710 { XFS_AGF_FREEBLKS, "FREEBLKS" }, \
711 { XFS_AGF_LONGEST, "LONGEST" }, \
712 { XFS_AGF_BTREEBLKS, "BTREEBLKS" }, \
713 { XFS_AGF_UUID, "UUID" }
714
715/* disk block (xfs_daddr_t) in the AG */
716#define XFS_AGF_DADDR(mp) ((xfs_daddr_t)(1 << (mp)->m_sectbb_log))
717#define XFS_AGF_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGF_DADDR(mp))
718#define XFS_BUF_TO_AGF(bp) ((xfs_agf_t *)((bp)->b_addr))
719
720/*
721 * Size of the unlinked inode hash table in the agi.
722 */
723#define XFS_AGI_UNLINKED_BUCKETS 64
724
725typedef struct xfs_agi {
726 /*
727 * Common allocation group header information
728 */
729 __be32 agi_magicnum; /* magic number == XFS_AGI_MAGIC */
730 __be32 agi_versionnum; /* header version == XFS_AGI_VERSION */
731 __be32 agi_seqno; /* sequence # starting from 0 */
732 __be32 agi_length; /* size in blocks of a.g. */
733 /*
734 * Inode information
735 * Inodes are mapped by interpreting the inode number, so no
736 * mapping data is needed here.
737 */
738 __be32 agi_count; /* count of allocated inodes */
739 __be32 agi_root; /* root of inode btree */
740 __be32 agi_level; /* levels in inode btree */
741 __be32 agi_freecount; /* number of free inodes */
742
743 __be32 agi_newino; /* new inode just allocated */
744 __be32 agi_dirino; /* last directory inode chunk */
745 /*
746 * Hash table of inodes which have been unlinked but are
747 * still being referenced.
748 */
749 __be32 agi_unlinked[XFS_AGI_UNLINKED_BUCKETS];
750 /*
751 * This marks the end of logging region 1 and start of logging region 2.
752 */
753 uuid_t agi_uuid; /* uuid of filesystem */
754 __be32 agi_crc; /* crc of agi sector */
755 __be32 agi_pad32;
756 __be64 agi_lsn; /* last write sequence */
757
758 __be32 agi_free_root; /* root of the free inode btree */
759 __be32 agi_free_level;/* levels in free inode btree */
760
761 /* structure must be padded to 64 bit alignment */
762} xfs_agi_t;
763
764#define XFS_AGI_CRC_OFF offsetof(struct xfs_agi, agi_crc)
765
766#define XFS_AGI_MAGICNUM (1 << 0)
767#define XFS_AGI_VERSIONNUM (1 << 1)
768#define XFS_AGI_SEQNO (1 << 2)
769#define XFS_AGI_LENGTH (1 << 3)
770#define XFS_AGI_COUNT (1 << 4)
771#define XFS_AGI_ROOT (1 << 5)
772#define XFS_AGI_LEVEL (1 << 6)
773#define XFS_AGI_FREECOUNT (1 << 7)
774#define XFS_AGI_NEWINO (1 << 8)
775#define XFS_AGI_DIRINO (1 << 9)
776#define XFS_AGI_UNLINKED (1 << 10)
777#define XFS_AGI_NUM_BITS_R1 11 /* end of the 1st agi logging region */
778#define XFS_AGI_ALL_BITS_R1 ((1 << XFS_AGI_NUM_BITS_R1) - 1)
779#define XFS_AGI_FREE_ROOT (1 << 11)
780#define XFS_AGI_FREE_LEVEL (1 << 12)
781#define XFS_AGI_NUM_BITS_R2 13
782
783/* disk block (xfs_daddr_t) in the AG */
784#define XFS_AGI_DADDR(mp) ((xfs_daddr_t)(2 << (mp)->m_sectbb_log))
785#define XFS_AGI_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGI_DADDR(mp))
786#define XFS_BUF_TO_AGI(bp) ((xfs_agi_t *)((bp)->b_addr))
787
788/*
789 * The third a.g. block contains the a.g. freelist, an array
790 * of block pointers to blocks owned by the allocation btree code.
791 */
792#define XFS_AGFL_DADDR(mp) ((xfs_daddr_t)(3 << (mp)->m_sectbb_log))
793#define XFS_AGFL_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_AGFL_DADDR(mp))
794#define XFS_BUF_TO_AGFL(bp) ((xfs_agfl_t *)((bp)->b_addr))
795
796#define XFS_BUF_TO_AGFL_BNO(mp, bp) \
797 (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
798 &(XFS_BUF_TO_AGFL(bp)->agfl_bno[0]) : \
799 (__be32 *)(bp)->b_addr)
800
801/*
802 * Size of the AGFL. For CRC-enabled filesystes we steal a couple of
803 * slots in the beginning of the block for a proper header with the
804 * location information and CRC.
805 */
806#define XFS_AGFL_SIZE(mp) \
807 (((mp)->m_sb.sb_sectsize - \
808 (xfs_sb_version_hascrc(&((mp)->m_sb)) ? \
809 sizeof(struct xfs_agfl) : 0)) / \
810 sizeof(xfs_agblock_t))
811
812typedef struct xfs_agfl {
813 __be32 agfl_magicnum;
814 __be32 agfl_seqno;
815 uuid_t agfl_uuid;
816 __be64 agfl_lsn;
817 __be32 agfl_crc;
818 __be32 agfl_bno[]; /* actually XFS_AGFL_SIZE(mp) */
819} xfs_agfl_t;
820
821#define XFS_AGFL_CRC_OFF offsetof(struct xfs_agfl, agfl_crc)
822
823
824#define XFS_AG_MAXLEVELS(mp) ((mp)->m_ag_maxlevels)
825#define XFS_MIN_FREELIST_RAW(bl,cl,mp) \
826 (MIN(bl + 1, XFS_AG_MAXLEVELS(mp)) + MIN(cl + 1, XFS_AG_MAXLEVELS(mp)))
827#define XFS_MIN_FREELIST(a,mp) \
828 (XFS_MIN_FREELIST_RAW( \
829 be32_to_cpu((a)->agf_levels[XFS_BTNUM_BNOi]), \
830 be32_to_cpu((a)->agf_levels[XFS_BTNUM_CNTi]), mp))
831#define XFS_MIN_FREELIST_PAG(pag,mp) \
832 (XFS_MIN_FREELIST_RAW( \
833 (unsigned int)(pag)->pagf_levels[XFS_BTNUM_BNOi], \
834 (unsigned int)(pag)->pagf_levels[XFS_BTNUM_CNTi], mp))
835
836#define XFS_AGB_TO_FSB(mp,agno,agbno) \
837 (((xfs_fsblock_t)(agno) << (mp)->m_sb.sb_agblklog) | (agbno))
838#define XFS_FSB_TO_AGNO(mp,fsbno) \
839 ((xfs_agnumber_t)((fsbno) >> (mp)->m_sb.sb_agblklog))
840#define XFS_FSB_TO_AGBNO(mp,fsbno) \
841 ((xfs_agblock_t)((fsbno) & xfs_mask32lo((mp)->m_sb.sb_agblklog)))
842#define XFS_AGB_TO_DADDR(mp,agno,agbno) \
843 ((xfs_daddr_t)XFS_FSB_TO_BB(mp, \
844 (xfs_fsblock_t)(agno) * (mp)->m_sb.sb_agblocks + (agbno)))
845#define XFS_AG_DADDR(mp,agno,d) (XFS_AGB_TO_DADDR(mp, agno, 0) + (d))
846
847/*
848 * For checking for bad ranges of xfs_daddr_t's, covering multiple
849 * allocation groups or a single xfs_daddr_t that's a superblock copy.
850 */
851#define XFS_AG_CHECK_DADDR(mp,d,len) \
852 ((len) == 1 ? \
853 ASSERT((d) == XFS_SB_DADDR || \
854 xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \
855 ASSERT(xfs_daddr_to_agno(mp, d) == \
856 xfs_daddr_to_agno(mp, (d) + (len) - 1)))
857
858typedef struct xfs_timestamp {
859 __be32 t_sec; /* timestamp seconds */
860 __be32 t_nsec; /* timestamp nanoseconds */
861} xfs_timestamp_t;
862
863/*
864 * On-disk inode structure.
865 *
866 * This is just the header or "dinode core", the inode is expanded to fill a
867 * variable size the leftover area split into a data and an attribute fork.
868 * The format of the data and attribute fork depends on the format of the
869 * inode as indicated by di_format and di_aformat. To access the data and
870 * attribute use the XFS_DFORK_DPTR, XFS_DFORK_APTR, and XFS_DFORK_PTR macros
871 * below.
872 *
873 * There is a very similar struct icdinode in xfs_inode which matches the
874 * layout of the first 96 bytes of this structure, but is kept in native
875 * format instead of big endian.
876 *
877 * Note: di_flushiter is only used by v1/2 inodes - it's effectively a zeroed
878 * padding field for v3 inodes.
879 */
880#define XFS_DINODE_MAGIC 0x494e /* 'IN' */
881#define XFS_DINODE_GOOD_VERSION(v) ((v) >= 1 && (v) <= 3)
882typedef struct xfs_dinode {
883 __be16 di_magic; /* inode magic # = XFS_DINODE_MAGIC */
884 __be16 di_mode; /* mode and type of file */
885 __u8 di_version; /* inode version */
886 __u8 di_format; /* format of di_c data */
887 __be16 di_onlink; /* old number of links to file */
888 __be32 di_uid; /* owner's user id */
889 __be32 di_gid; /* owner's group id */
890 __be32 di_nlink; /* number of links to file */
891 __be16 di_projid_lo; /* lower part of owner's project id */
892 __be16 di_projid_hi; /* higher part owner's project id */
893 __u8 di_pad[6]; /* unused, zeroed space */
894 __be16 di_flushiter; /* incremented on flush */
895 xfs_timestamp_t di_atime; /* time last accessed */
896 xfs_timestamp_t di_mtime; /* time last modified */
897 xfs_timestamp_t di_ctime; /* time created/inode modified */
898 __be64 di_size; /* number of bytes in file */
899 __be64 di_nblocks; /* # of direct & btree blocks used */
900 __be32 di_extsize; /* basic/minimum extent size for file */
901 __be32 di_nextents; /* number of extents in data fork */
902 __be16 di_anextents; /* number of extents in attribute fork*/
903 __u8 di_forkoff; /* attr fork offs, <<3 for 64b align */
904 __s8 di_aformat; /* format of attr fork's data */
905 __be32 di_dmevmask; /* DMIG event mask */
906 __be16 di_dmstate; /* DMIG state info */
907 __be16 di_flags; /* random flags, XFS_DIFLAG_... */
908 __be32 di_gen; /* generation number */
909
910 /* di_next_unlinked is the only non-core field in the old dinode */
911 __be32 di_next_unlinked;/* agi unlinked list ptr */
912
913 /* start of the extended dinode, writable fields */
914 __le32 di_crc; /* CRC of the inode */
915 __be64 di_changecount; /* number of attribute changes */
916 __be64 di_lsn; /* flush sequence */
917 __be64 di_flags2; /* more random flags */
918 __u8 di_pad2[16]; /* more padding for future expansion */
919
920 /* fields only written to during inode creation */
921 xfs_timestamp_t di_crtime; /* time created */
922 __be64 di_ino; /* inode number */
923 uuid_t di_uuid; /* UUID of the filesystem */
924
925 /* structure must be padded to 64 bit alignment */
926} xfs_dinode_t;
927
928#define XFS_DINODE_CRC_OFF offsetof(struct xfs_dinode, di_crc)
929
930#define DI_MAX_FLUSH 0xffff
931
932/*
933 * Size of the core inode on disk. Version 1 and 2 inodes have
934 * the same size, but version 3 has grown a few additional fields.
935 */
936static inline uint xfs_dinode_size(int version)
937{
938 if (version == 3)
939 return sizeof(struct xfs_dinode);
940 return offsetof(struct xfs_dinode, di_crc);
941}
942
943/*
944 * The 32 bit link count in the inode theoretically maxes out at UINT_MAX.
945 * Since the pathconf interface is signed, we use 2^31 - 1 instead.
946 * The old inode format had a 16 bit link count, so its maximum is USHRT_MAX.
947 */
948#define XFS_MAXLINK ((1U << 31) - 1U)
949#define XFS_MAXLINK_1 65535U
950
951/*
952 * Values for di_format
953 */
954typedef enum xfs_dinode_fmt {
955 XFS_DINODE_FMT_DEV, /* xfs_dev_t */
956 XFS_DINODE_FMT_LOCAL, /* bulk data */
957 XFS_DINODE_FMT_EXTENTS, /* struct xfs_bmbt_rec */
958 XFS_DINODE_FMT_BTREE, /* struct xfs_bmdr_block */
959 XFS_DINODE_FMT_UUID /* uuid_t */
960} xfs_dinode_fmt_t;
961
962/*
963 * Inode minimum and maximum sizes.
964 */
965#define XFS_DINODE_MIN_LOG 8
966#define XFS_DINODE_MAX_LOG 11
967#define XFS_DINODE_MIN_SIZE (1 << XFS_DINODE_MIN_LOG)
968#define XFS_DINODE_MAX_SIZE (1 << XFS_DINODE_MAX_LOG)
969
970/*
971 * Inode size for given fs.
972 */
973#define XFS_LITINO(mp, version) \
974 ((int)(((mp)->m_sb.sb_inodesize) - xfs_dinode_size(version)))
975
976/*
977 * Inode data & attribute fork sizes, per inode.
978 */
979#define XFS_DFORK_Q(dip) ((dip)->di_forkoff != 0)
980#define XFS_DFORK_BOFF(dip) ((int)((dip)->di_forkoff << 3))
981
982#define XFS_DFORK_DSIZE(dip,mp) \
983 (XFS_DFORK_Q(dip) ? \
984 XFS_DFORK_BOFF(dip) : \
985 XFS_LITINO(mp, (dip)->di_version))
986#define XFS_DFORK_ASIZE(dip,mp) \
987 (XFS_DFORK_Q(dip) ? \
988 XFS_LITINO(mp, (dip)->di_version) - XFS_DFORK_BOFF(dip) : \
989 0)
990#define XFS_DFORK_SIZE(dip,mp,w) \
991 ((w) == XFS_DATA_FORK ? \
992 XFS_DFORK_DSIZE(dip, mp) : \
993 XFS_DFORK_ASIZE(dip, mp))
994
995/*
996 * Return pointers to the data or attribute forks.
997 */
998#define XFS_DFORK_DPTR(dip) \
999 ((char *)dip + xfs_dinode_size(dip->di_version))
1000#define XFS_DFORK_APTR(dip) \
1001 (XFS_DFORK_DPTR(dip) + XFS_DFORK_BOFF(dip))
1002#define XFS_DFORK_PTR(dip,w) \
1003 ((w) == XFS_DATA_FORK ? XFS_DFORK_DPTR(dip) : XFS_DFORK_APTR(dip))
1004
1005#define XFS_DFORK_FORMAT(dip,w) \
1006 ((w) == XFS_DATA_FORK ? \
1007 (dip)->di_format : \
1008 (dip)->di_aformat)
1009#define XFS_DFORK_NEXTENTS(dip,w) \
1010 ((w) == XFS_DATA_FORK ? \
1011 be32_to_cpu((dip)->di_nextents) : \
1012 be16_to_cpu((dip)->di_anextents))
1013
1014/*
1015 * For block and character special files the 32bit dev_t is stored at the
1016 * beginning of the data fork.
1017 */
1018static inline xfs_dev_t xfs_dinode_get_rdev(struct xfs_dinode *dip)
1019{
1020 return be32_to_cpu(*(__be32 *)XFS_DFORK_DPTR(dip));
1021}
1022
1023static inline void xfs_dinode_put_rdev(struct xfs_dinode *dip, xfs_dev_t rdev)
1024{
1025 *(__be32 *)XFS_DFORK_DPTR(dip) = cpu_to_be32(rdev);
1026}
1027
1028/*
1029 * Values for di_flags
1030 * There should be a one-to-one correspondence between these flags and the
1031 * XFS_XFLAG_s.
1032 */
1033#define XFS_DIFLAG_REALTIME_BIT 0 /* file's blocks come from rt area */
1034#define XFS_DIFLAG_PREALLOC_BIT 1 /* file space has been preallocated */
1035#define XFS_DIFLAG_NEWRTBM_BIT 2 /* for rtbitmap inode, new format */
1036#define XFS_DIFLAG_IMMUTABLE_BIT 3 /* inode is immutable */
1037#define XFS_DIFLAG_APPEND_BIT 4 /* inode is append-only */
1038#define XFS_DIFLAG_SYNC_BIT 5 /* inode is written synchronously */
1039#define XFS_DIFLAG_NOATIME_BIT 6 /* do not update atime */
1040#define XFS_DIFLAG_NODUMP_BIT 7 /* do not dump */
1041#define XFS_DIFLAG_RTINHERIT_BIT 8 /* create with realtime bit set */
1042#define XFS_DIFLAG_PROJINHERIT_BIT 9 /* create with parents projid */
1043#define XFS_DIFLAG_NOSYMLINKS_BIT 10 /* disallow symlink creation */
1044#define XFS_DIFLAG_EXTSIZE_BIT 11 /* inode extent size allocator hint */
1045#define XFS_DIFLAG_EXTSZINHERIT_BIT 12 /* inherit inode extent size */
1046#define XFS_DIFLAG_NODEFRAG_BIT 13 /* do not reorganize/defragment */
1047#define XFS_DIFLAG_FILESTREAM_BIT 14 /* use filestream allocator */
1048#define XFS_DIFLAG_REALTIME (1 << XFS_DIFLAG_REALTIME_BIT)
1049#define XFS_DIFLAG_PREALLOC (1 << XFS_DIFLAG_PREALLOC_BIT)
1050#define XFS_DIFLAG_NEWRTBM (1 << XFS_DIFLAG_NEWRTBM_BIT)
1051#define XFS_DIFLAG_IMMUTABLE (1 << XFS_DIFLAG_IMMUTABLE_BIT)
1052#define XFS_DIFLAG_APPEND (1 << XFS_DIFLAG_APPEND_BIT)
1053#define XFS_DIFLAG_SYNC (1 << XFS_DIFLAG_SYNC_BIT)
1054#define XFS_DIFLAG_NOATIME (1 << XFS_DIFLAG_NOATIME_BIT)
1055#define XFS_DIFLAG_NODUMP (1 << XFS_DIFLAG_NODUMP_BIT)
1056#define XFS_DIFLAG_RTINHERIT (1 << XFS_DIFLAG_RTINHERIT_BIT)
1057#define XFS_DIFLAG_PROJINHERIT (1 << XFS_DIFLAG_PROJINHERIT_BIT)
1058#define XFS_DIFLAG_NOSYMLINKS (1 << XFS_DIFLAG_NOSYMLINKS_BIT)
1059#define XFS_DIFLAG_EXTSIZE (1 << XFS_DIFLAG_EXTSIZE_BIT)
1060#define XFS_DIFLAG_EXTSZINHERIT (1 << XFS_DIFLAG_EXTSZINHERIT_BIT)
1061#define XFS_DIFLAG_NODEFRAG (1 << XFS_DIFLAG_NODEFRAG_BIT)
1062#define XFS_DIFLAG_FILESTREAM (1 << XFS_DIFLAG_FILESTREAM_BIT)
1063
1064#define XFS_DIFLAG_ANY \
1065 (XFS_DIFLAG_REALTIME | XFS_DIFLAG_PREALLOC | XFS_DIFLAG_NEWRTBM | \
1066 XFS_DIFLAG_IMMUTABLE | XFS_DIFLAG_APPEND | XFS_DIFLAG_SYNC | \
1067 XFS_DIFLAG_NOATIME | XFS_DIFLAG_NODUMP | XFS_DIFLAG_RTINHERIT | \
1068 XFS_DIFLAG_PROJINHERIT | XFS_DIFLAG_NOSYMLINKS | XFS_DIFLAG_EXTSIZE | \
1069 XFS_DIFLAG_EXTSZINHERIT | XFS_DIFLAG_NODEFRAG | XFS_DIFLAG_FILESTREAM)
1070
1071/*
1072 * Inode number format:
1073 * low inopblog bits - offset in block
1074 * next agblklog bits - block number in ag
1075 * next agno_log bits - ag number
1076 * high agno_log-agblklog-inopblog bits - 0
1077 */
1078#define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1)
1079#define XFS_INO_OFFSET_BITS(mp) (mp)->m_sb.sb_inopblog
1080#define XFS_INO_AGBNO_BITS(mp) (mp)->m_sb.sb_agblklog
1081#define XFS_INO_AGINO_BITS(mp) (mp)->m_agino_log
1082#define XFS_INO_AGNO_BITS(mp) (mp)->m_agno_log
1083#define XFS_INO_BITS(mp) \
1084 XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp)
1085#define XFS_INO_TO_AGNO(mp,i) \
1086 ((xfs_agnumber_t)((i) >> XFS_INO_AGINO_BITS(mp)))
1087#define XFS_INO_TO_AGINO(mp,i) \
1088 ((xfs_agino_t)(i) & XFS_INO_MASK(XFS_INO_AGINO_BITS(mp)))
1089#define XFS_INO_TO_AGBNO(mp,i) \
1090 (((xfs_agblock_t)(i) >> XFS_INO_OFFSET_BITS(mp)) & \
1091 XFS_INO_MASK(XFS_INO_AGBNO_BITS(mp)))
1092#define XFS_INO_TO_OFFSET(mp,i) \
1093 ((int)(i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
1094#define XFS_INO_TO_FSB(mp,i) \
1095 XFS_AGB_TO_FSB(mp, XFS_INO_TO_AGNO(mp,i), XFS_INO_TO_AGBNO(mp,i))
1096#define XFS_AGINO_TO_INO(mp,a,i) \
1097 (((xfs_ino_t)(a) << XFS_INO_AGINO_BITS(mp)) | (i))
1098#define XFS_AGINO_TO_AGBNO(mp,i) ((i) >> XFS_INO_OFFSET_BITS(mp))
1099#define XFS_AGINO_TO_OFFSET(mp,i) \
1100 ((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
1101#define XFS_OFFBNO_TO_AGINO(mp,b,o) \
1102 ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o)))
1103
1104#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL))
1105#define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL))
1106
1107/*
37 * RealTime Device format definitions 1108 * RealTime Device format definitions
38 */ 1109 */
39 1110
@@ -413,4 +1484,40 @@ struct xfs_btree_block {
413#define XFS_BTREE_LBLOCK_CRC_OFF \ 1484#define XFS_BTREE_LBLOCK_CRC_OFF \
414 offsetof(struct xfs_btree_block, bb_u.l.bb_crc) 1485 offsetof(struct xfs_btree_block, bb_u.l.bb_crc)
415 1486
1487/*
1488 * On-disk XFS access control list structure.
1489 */
1490struct xfs_acl_entry {
1491 __be32 ae_tag;
1492 __be32 ae_id;
1493 __be16 ae_perm;
1494 __be16 ae_pad; /* fill the implicit hole in the structure */
1495};
1496
1497struct xfs_acl {
1498 __be32 acl_cnt;
1499 struct xfs_acl_entry acl_entry[0];
1500};
1501
1502/*
1503 * The number of ACL entries allowed is defined by the on-disk format.
1504 * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
1505 * limited only by the maximum size of the xattr that stores the information.
1506 */
1507#define XFS_ACL_MAX_ENTRIES(mp) \
1508 (xfs_sb_version_hascrc(&mp->m_sb) \
1509 ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
1510 sizeof(struct xfs_acl_entry) \
1511 : 25)
1512
1513#define XFS_ACL_MAX_SIZE(mp) \
1514 (sizeof(struct xfs_acl) + \
1515 sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
1516
1517/* On-disk XFS extended attribute names */
1518#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
1519#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
1520#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
1521#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
1522
416#endif /* __XFS_FORMAT_H__ */ 1523#endif /* __XFS_FORMAT_H__ */
diff --git a/fs/xfs/libxfs/xfs_ialloc.c b/fs/xfs/libxfs/xfs_ialloc.c
index 23dcb72fc5e6..116ef1ddb3e3 100644
--- a/fs/xfs/libxfs/xfs_ialloc.c
+++ b/fs/xfs/libxfs/xfs_ialloc.c
@@ -22,9 +22,7 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_inum.h"
26#include "xfs_sb.h" 25#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_inode.h" 27#include "xfs_inode.h"
30#include "xfs_btree.h" 28#include "xfs_btree.h"
@@ -39,7 +37,6 @@
39#include "xfs_buf_item.h" 37#include "xfs_buf_item.h"
40#include "xfs_icreate_item.h" 38#include "xfs_icreate_item.h"
41#include "xfs_icache.h" 39#include "xfs_icache.h"
42#include "xfs_dinode.h"
43#include "xfs_trace.h" 40#include "xfs_trace.h"
44 41
45 42
@@ -48,12 +45,12 @@
48 */ 45 */
49static inline int 46static inline int
50xfs_ialloc_cluster_alignment( 47xfs_ialloc_cluster_alignment(
51 xfs_alloc_arg_t *args) 48 struct xfs_mount *mp)
52{ 49{
53 if (xfs_sb_version_hasalign(&args->mp->m_sb) && 50 if (xfs_sb_version_hasalign(&mp->m_sb) &&
54 args->mp->m_sb.sb_inoalignmt >= 51 mp->m_sb.sb_inoalignmt >=
55 XFS_B_TO_FSBT(args->mp, args->mp->m_inode_cluster_size)) 52 XFS_B_TO_FSBT(mp, mp->m_inode_cluster_size))
56 return args->mp->m_sb.sb_inoalignmt; 53 return mp->m_sb.sb_inoalignmt;
57 return 1; 54 return 1;
58} 55}
59 56
@@ -412,7 +409,7 @@ xfs_ialloc_ag_alloc(
412 * but not to use them in the actual exact allocation. 409 * but not to use them in the actual exact allocation.
413 */ 410 */
414 args.alignment = 1; 411 args.alignment = 1;
415 args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1; 412 args.minalignslop = xfs_ialloc_cluster_alignment(args.mp) - 1;
416 413
417 /* Allow space for the inode btree to split. */ 414 /* Allow space for the inode btree to split. */
418 args.minleft = args.mp->m_in_maxlevels - 1; 415 args.minleft = args.mp->m_in_maxlevels - 1;
@@ -448,7 +445,7 @@ xfs_ialloc_ag_alloc(
448 args.alignment = args.mp->m_dalign; 445 args.alignment = args.mp->m_dalign;
449 isaligned = 1; 446 isaligned = 1;
450 } else 447 } else
451 args.alignment = xfs_ialloc_cluster_alignment(&args); 448 args.alignment = xfs_ialloc_cluster_alignment(args.mp);
452 /* 449 /*
453 * Need to figure out where to allocate the inode blocks. 450 * Need to figure out where to allocate the inode blocks.
454 * Ideally they should be spaced out through the a.g. 451 * Ideally they should be spaced out through the a.g.
@@ -477,7 +474,7 @@ xfs_ialloc_ag_alloc(
477 args.type = XFS_ALLOCTYPE_NEAR_BNO; 474 args.type = XFS_ALLOCTYPE_NEAR_BNO;
478 args.agbno = be32_to_cpu(agi->agi_root); 475 args.agbno = be32_to_cpu(agi->agi_root);
479 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); 476 args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
480 args.alignment = xfs_ialloc_cluster_alignment(&args); 477 args.alignment = xfs_ialloc_cluster_alignment(args.mp);
481 if ((error = xfs_alloc_vextent(&args))) 478 if ((error = xfs_alloc_vextent(&args)))
482 return error; 479 return error;
483 } 480 }
@@ -632,10 +629,24 @@ xfs_ialloc_ag_select(
632 } 629 }
633 630
634 /* 631 /*
635 * Is there enough free space for the file plus a block of 632 * Check that there is enough free space for the file plus a
636 * inodes? (if we need to allocate some)? 633 * chunk of inodes if we need to allocate some. If this is the
634 * first pass across the AGs, take into account the potential
635 * space needed for alignment of inode chunks when checking the
636 * longest contiguous free space in the AG - this prevents us
637 * from getting ENOSPC because we have free space larger than
638 * m_ialloc_blks but alignment constraints prevent us from using
639 * it.
640 *
641 * If we can't find an AG with space for full alignment slack to
642 * be taken into account, we must be near ENOSPC in all AGs.
643 * Hence we don't include alignment for the second pass and so
644 * if we fail allocation due to alignment issues then it is most
645 * likely a real ENOSPC condition.
637 */ 646 */
638 ineed = mp->m_ialloc_blks; 647 ineed = mp->m_ialloc_blks;
648 if (flags && ineed > 1)
649 ineed += xfs_ialloc_cluster_alignment(mp);
639 longest = pag->pagf_longest; 650 longest = pag->pagf_longest;
640 if (!longest) 651 if (!longest)
641 longest = pag->pagf_flcount > 0; 652 longest = pag->pagf_flcount > 0;
@@ -1137,11 +1148,7 @@ xfs_dialloc_ag_update_inobt(
1137 XFS_WANT_CORRUPTED_RETURN((rec.ir_free == frec->ir_free) && 1148 XFS_WANT_CORRUPTED_RETURN((rec.ir_free == frec->ir_free) &&
1138 (rec.ir_freecount == frec->ir_freecount)); 1149 (rec.ir_freecount == frec->ir_freecount));
1139 1150
1140 error = xfs_inobt_update(cur, &rec); 1151 return xfs_inobt_update(cur, &rec);
1141 if (error)
1142 return error;
1143
1144 return 0;
1145} 1152}
1146 1153
1147/* 1154/*
diff --git a/fs/xfs/libxfs/xfs_ialloc.h b/fs/xfs/libxfs/xfs_ialloc.h
index 95ad1c002d60..100007d56449 100644
--- a/fs/xfs/libxfs/xfs_ialloc.h
+++ b/fs/xfs/libxfs/xfs_ialloc.h
@@ -160,4 +160,8 @@ int xfs_ialloc_inode_init(struct xfs_mount *mp, struct xfs_trans *tp,
160 xfs_agnumber_t agno, xfs_agblock_t agbno, 160 xfs_agnumber_t agno, xfs_agblock_t agbno,
161 xfs_agblock_t length, unsigned int gen); 161 xfs_agblock_t length, unsigned int gen);
162 162
163int xfs_read_agi(struct xfs_mount *mp, struct xfs_trans *tp,
164 xfs_agnumber_t agno, struct xfs_buf **bpp);
165
166
163#endif /* __XFS_IALLOC_H__ */ 167#endif /* __XFS_IALLOC_H__ */
diff --git a/fs/xfs/libxfs/xfs_ialloc_btree.c b/fs/xfs/libxfs/xfs_ialloc_btree.c
index c9b06f30fe86..964c465ca69c 100644
--- a/fs/xfs/libxfs/xfs_ialloc_btree.c
+++ b/fs/xfs/libxfs/xfs_ialloc_btree.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
29#include "xfs_btree.h" 27#include "xfs_btree.h"
diff --git a/fs/xfs/libxfs/xfs_inode_buf.c b/fs/xfs/libxfs/xfs_inode_buf.c
index f18fd2da49f7..002b6b3a1988 100644
--- a/fs/xfs/libxfs/xfs_inode_buf.c
+++ b/fs/xfs/libxfs/xfs_inode_buf.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
28#include "xfs_error.h" 26#include "xfs_error.h"
@@ -30,7 +28,6 @@
30#include "xfs_icache.h" 28#include "xfs_icache.h"
31#include "xfs_trans.h" 29#include "xfs_trans.h"
32#include "xfs_ialloc.h" 30#include "xfs_ialloc.h"
33#include "xfs_dinode.h"
34 31
35/* 32/*
36 * Check that none of the inode's in the buffer have a next 33 * Check that none of the inode's in the buffer have a next
diff --git a/fs/xfs/libxfs/xfs_inode_fork.c b/fs/xfs/libxfs/xfs_inode_fork.c
index 6a00f7fed69d..0defbd02f62d 100644
--- a/fs/xfs/libxfs/xfs_inode_fork.c
+++ b/fs/xfs/libxfs/xfs_inode_fork.c
@@ -22,9 +22,6 @@
22#include "xfs_format.h" 22#include "xfs_format.h"
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_inum.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 25#include "xfs_mount.h"
29#include "xfs_inode.h" 26#include "xfs_inode.h"
30#include "xfs_trans.h" 27#include "xfs_trans.h"
@@ -34,7 +31,6 @@
34#include "xfs_error.h" 31#include "xfs_error.h"
35#include "xfs_trace.h" 32#include "xfs_trace.h"
36#include "xfs_attr_sf.h" 33#include "xfs_attr_sf.h"
37#include "xfs_dinode.h"
38 34
39kmem_zone_t *xfs_ifork_zone; 35kmem_zone_t *xfs_ifork_zone;
40 36
diff --git a/fs/xfs/libxfs/xfs_inum.h b/fs/xfs/libxfs/xfs_inum.h
deleted file mode 100644
index 4ff2278e147a..000000000000
--- a/fs/xfs/libxfs/xfs_inum.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
17 */
18#ifndef __XFS_INUM_H__
19#define __XFS_INUM_H__
20
21/*
22 * Inode number format:
23 * low inopblog bits - offset in block
24 * next agblklog bits - block number in ag
25 * next agno_log bits - ag number
26 * high agno_log-agblklog-inopblog bits - 0
27 */
28
29struct xfs_mount;
30
31#define XFS_INO_MASK(k) (__uint32_t)((1ULL << (k)) - 1)
32#define XFS_INO_OFFSET_BITS(mp) (mp)->m_sb.sb_inopblog
33#define XFS_INO_AGBNO_BITS(mp) (mp)->m_sb.sb_agblklog
34#define XFS_INO_AGINO_BITS(mp) (mp)->m_agino_log
35#define XFS_INO_AGNO_BITS(mp) (mp)->m_agno_log
36#define XFS_INO_BITS(mp) \
37 XFS_INO_AGNO_BITS(mp) + XFS_INO_AGINO_BITS(mp)
38#define XFS_INO_TO_AGNO(mp,i) \
39 ((xfs_agnumber_t)((i) >> XFS_INO_AGINO_BITS(mp)))
40#define XFS_INO_TO_AGINO(mp,i) \
41 ((xfs_agino_t)(i) & XFS_INO_MASK(XFS_INO_AGINO_BITS(mp)))
42#define XFS_INO_TO_AGBNO(mp,i) \
43 (((xfs_agblock_t)(i) >> XFS_INO_OFFSET_BITS(mp)) & \
44 XFS_INO_MASK(XFS_INO_AGBNO_BITS(mp)))
45#define XFS_INO_TO_OFFSET(mp,i) \
46 ((int)(i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
47#define XFS_INO_TO_FSB(mp,i) \
48 XFS_AGB_TO_FSB(mp, XFS_INO_TO_AGNO(mp,i), XFS_INO_TO_AGBNO(mp,i))
49#define XFS_AGINO_TO_INO(mp,a,i) \
50 (((xfs_ino_t)(a) << XFS_INO_AGINO_BITS(mp)) | (i))
51#define XFS_AGINO_TO_AGBNO(mp,i) ((i) >> XFS_INO_OFFSET_BITS(mp))
52#define XFS_AGINO_TO_OFFSET(mp,i) \
53 ((i) & XFS_INO_MASK(XFS_INO_OFFSET_BITS(mp)))
54#define XFS_OFFBNO_TO_AGINO(mp,b,o) \
55 ((xfs_agino_t)(((b) << XFS_INO_OFFSET_BITS(mp)) | (o)))
56
57#define XFS_MAXINUMBER ((xfs_ino_t)((1ULL << 56) - 1ULL))
58#define XFS_MAXINUMBER_32 ((xfs_ino_t)((1ULL << 32) - 1ULL))
59
60#endif /* __XFS_INUM_H__ */
diff --git a/fs/xfs/libxfs/xfs_log_format.h b/fs/xfs/libxfs/xfs_log_format.h
index aff12f2d4428..265314690415 100644
--- a/fs/xfs/libxfs/xfs_log_format.h
+++ b/fs/xfs/libxfs/xfs_log_format.h
@@ -361,7 +361,7 @@ typedef struct xfs_ictimestamp {
361 361
362/* 362/*
363 * NOTE: This structure must be kept identical to struct xfs_dinode 363 * NOTE: This structure must be kept identical to struct xfs_dinode
364 * in xfs_dinode.h except for the endianness annotations. 364 * except for the endianness annotations.
365 */ 365 */
366typedef struct xfs_icdinode { 366typedef struct xfs_icdinode {
367 __uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */ 367 __uint16_t di_magic; /* inode magic # = XFS_DINODE_MAGIC */
diff --git a/fs/xfs/libxfs/xfs_log_rlimit.c b/fs/xfs/libxfs/xfs_log_rlimit.c
index ee7e0e80246b..c10597973333 100644
--- a/fs/xfs/libxfs/xfs_log_rlimit.c
+++ b/fs/xfs/libxfs/xfs_log_rlimit.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_ag.h"
25#include "xfs_sb.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_da_format.h" 25#include "xfs_da_format.h"
28#include "xfs_trans_space.h" 26#include "xfs_trans_space.h"
diff --git a/fs/xfs/libxfs/xfs_rtbitmap.c b/fs/xfs/libxfs/xfs_rtbitmap.c
index 7c818f1e4484..9b59ffa1fc19 100644
--- a/fs/xfs/libxfs/xfs_rtbitmap.c
+++ b/fs/xfs/libxfs/xfs_rtbitmap.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
29#include "xfs_bmap.h" 27#include "xfs_bmap.h"
@@ -36,7 +34,6 @@
36#include "xfs_trace.h" 34#include "xfs_trace.h"
37#include "xfs_buf.h" 35#include "xfs_buf.h"
38#include "xfs_icache.h" 36#include "xfs_icache.h"
39#include "xfs_dinode.h"
40#include "xfs_rtalloc.h" 37#include "xfs_rtalloc.h"
41 38
42 39
diff --git a/fs/xfs/libxfs/xfs_sb.c b/fs/xfs/libxfs/xfs_sb.c
index 5f902fa7913f..752915fa775a 100644
--- a/fs/xfs/libxfs/xfs_sb.c
+++ b/fs/xfs/libxfs/xfs_sb.c
@@ -23,7 +23,6 @@
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 26#include "xfs_mount.h"
28#include "xfs_inode.h" 27#include "xfs_inode.h"
29#include "xfs_ialloc.h" 28#include "xfs_ialloc.h"
@@ -33,7 +32,6 @@
33#include "xfs_cksum.h" 32#include "xfs_cksum.h"
34#include "xfs_trans.h" 33#include "xfs_trans.h"
35#include "xfs_buf_item.h" 34#include "xfs_buf_item.h"
36#include "xfs_dinode.h"
37#include "xfs_bmap_btree.h" 35#include "xfs_bmap_btree.h"
38#include "xfs_alloc_btree.h" 36#include "xfs_alloc_btree.h"
39#include "xfs_ialloc_btree.h" 37#include "xfs_ialloc_btree.h"
diff --git a/fs/xfs/libxfs/xfs_sb.h b/fs/xfs/libxfs/xfs_sb.h
index 2e739708afd3..8eb1c54bafbf 100644
--- a/fs/xfs/libxfs/xfs_sb.h
+++ b/fs/xfs/libxfs/xfs_sb.h
@@ -19,590 +19,6 @@
19#define __XFS_SB_H__ 19#define __XFS_SB_H__
20 20
21/* 21/*
22 * Super block
23 * Fits into a sector-sized buffer at address 0 of each allocation group.
24 * Only the first of these is ever updated except during growfs.
25 */
26
27struct xfs_buf;
28struct xfs_mount;
29struct xfs_trans;
30
31#define XFS_SB_MAGIC 0x58465342 /* 'XFSB' */
32#define XFS_SB_VERSION_1 1 /* 5.3, 6.0.1, 6.1 */
33#define XFS_SB_VERSION_2 2 /* 6.2 - attributes */
34#define XFS_SB_VERSION_3 3 /* 6.2 - new inode version */
35#define XFS_SB_VERSION_4 4 /* 6.2+ - bitmask version */
36#define XFS_SB_VERSION_5 5 /* CRC enabled filesystem */
37#define XFS_SB_VERSION_NUMBITS 0x000f
38#define XFS_SB_VERSION_ALLFBITS 0xfff0
39#define XFS_SB_VERSION_ATTRBIT 0x0010
40#define XFS_SB_VERSION_NLINKBIT 0x0020
41#define XFS_SB_VERSION_QUOTABIT 0x0040
42#define XFS_SB_VERSION_ALIGNBIT 0x0080
43#define XFS_SB_VERSION_DALIGNBIT 0x0100
44#define XFS_SB_VERSION_SHAREDBIT 0x0200
45#define XFS_SB_VERSION_LOGV2BIT 0x0400
46#define XFS_SB_VERSION_SECTORBIT 0x0800
47#define XFS_SB_VERSION_EXTFLGBIT 0x1000
48#define XFS_SB_VERSION_DIRV2BIT 0x2000
49#define XFS_SB_VERSION_BORGBIT 0x4000 /* ASCII only case-insens. */
50#define XFS_SB_VERSION_MOREBITSBIT 0x8000
51
52/*
53 * Supported feature bit list is just all bits in the versionnum field because
54 * we've used them all up and understand them all. Except, of course, for the
55 * shared superblock bit, which nobody knows what it does and so is unsupported.
56 */
57#define XFS_SB_VERSION_OKBITS \
58 ((XFS_SB_VERSION_NUMBITS | XFS_SB_VERSION_ALLFBITS) & \
59 ~XFS_SB_VERSION_SHAREDBIT)
60
61/*
62 * There are two words to hold XFS "feature" bits: the original
63 * word, sb_versionnum, and sb_features2. Whenever a bit is set in
64 * sb_features2, the feature bit XFS_SB_VERSION_MOREBITSBIT must be set.
65 *
66 * These defines represent bits in sb_features2.
67 */
68#define XFS_SB_VERSION2_RESERVED1BIT 0x00000001
69#define XFS_SB_VERSION2_LAZYSBCOUNTBIT 0x00000002 /* Superblk counters */
70#define XFS_SB_VERSION2_RESERVED4BIT 0x00000004
71#define XFS_SB_VERSION2_ATTR2BIT 0x00000008 /* Inline attr rework */
72#define XFS_SB_VERSION2_PARENTBIT 0x00000010 /* parent pointers */
73#define XFS_SB_VERSION2_PROJID32BIT 0x00000080 /* 32 bit project id */
74#define XFS_SB_VERSION2_CRCBIT 0x00000100 /* metadata CRCs */
75#define XFS_SB_VERSION2_FTYPE 0x00000200 /* inode type in dir */
76
77#define XFS_SB_VERSION2_OKBITS \
78 (XFS_SB_VERSION2_LAZYSBCOUNTBIT | \
79 XFS_SB_VERSION2_ATTR2BIT | \
80 XFS_SB_VERSION2_PROJID32BIT | \
81 XFS_SB_VERSION2_FTYPE)
82
83/*
84 * Superblock - in core version. Must match the ondisk version below.
85 * Must be padded to 64 bit alignment.
86 */
87typedef struct xfs_sb {
88 __uint32_t sb_magicnum; /* magic number == XFS_SB_MAGIC */
89 __uint32_t sb_blocksize; /* logical block size, bytes */
90 xfs_rfsblock_t sb_dblocks; /* number of data blocks */
91 xfs_rfsblock_t sb_rblocks; /* number of realtime blocks */
92 xfs_rtblock_t sb_rextents; /* number of realtime extents */
93 uuid_t sb_uuid; /* file system unique id */
94 xfs_fsblock_t sb_logstart; /* starting block of log if internal */
95 xfs_ino_t sb_rootino; /* root inode number */
96 xfs_ino_t sb_rbmino; /* bitmap inode for realtime extents */
97 xfs_ino_t sb_rsumino; /* summary inode for rt bitmap */
98 xfs_agblock_t sb_rextsize; /* realtime extent size, blocks */
99 xfs_agblock_t sb_agblocks; /* size of an allocation group */
100 xfs_agnumber_t sb_agcount; /* number of allocation groups */
101 xfs_extlen_t sb_rbmblocks; /* number of rt bitmap blocks */
102 xfs_extlen_t sb_logblocks; /* number of log blocks */
103 __uint16_t sb_versionnum; /* header version == XFS_SB_VERSION */
104 __uint16_t sb_sectsize; /* volume sector size, bytes */
105 __uint16_t sb_inodesize; /* inode size, bytes */
106 __uint16_t sb_inopblock; /* inodes per block */
107 char sb_fname[12]; /* file system name */
108 __uint8_t sb_blocklog; /* log2 of sb_blocksize */
109 __uint8_t sb_sectlog; /* log2 of sb_sectsize */
110 __uint8_t sb_inodelog; /* log2 of sb_inodesize */
111 __uint8_t sb_inopblog; /* log2 of sb_inopblock */
112 __uint8_t sb_agblklog; /* log2 of sb_agblocks (rounded up) */
113 __uint8_t sb_rextslog; /* log2 of sb_rextents */
114 __uint8_t sb_inprogress; /* mkfs is in progress, don't mount */
115 __uint8_t sb_imax_pct; /* max % of fs for inode space */
116 /* statistics */
117 /*
118 * These fields must remain contiguous. If you really
119 * want to change their layout, make sure you fix the
120 * code in xfs_trans_apply_sb_deltas().
121 */
122 __uint64_t sb_icount; /* allocated inodes */
123 __uint64_t sb_ifree; /* free inodes */
124 __uint64_t sb_fdblocks; /* free data blocks */
125 __uint64_t sb_frextents; /* free realtime extents */
126 /*
127 * End contiguous fields.
128 */
129 xfs_ino_t sb_uquotino; /* user quota inode */
130 xfs_ino_t sb_gquotino; /* group quota inode */
131 __uint16_t sb_qflags; /* quota flags */
132 __uint8_t sb_flags; /* misc. flags */
133 __uint8_t sb_shared_vn; /* shared version number */
134 xfs_extlen_t sb_inoalignmt; /* inode chunk alignment, fsblocks */
135 __uint32_t sb_unit; /* stripe or raid unit */
136 __uint32_t sb_width; /* stripe or raid width */
137 __uint8_t sb_dirblklog; /* log2 of dir block size (fsbs) */
138 __uint8_t sb_logsectlog; /* log2 of the log sector size */
139 __uint16_t sb_logsectsize; /* sector size for the log, bytes */
140 __uint32_t sb_logsunit; /* stripe unit size for the log */
141 __uint32_t sb_features2; /* additional feature bits */
142
143 /*
144 * bad features2 field as a result of failing to pad the sb
145 * structure to 64 bits. Some machines will be using this field
146 * for features2 bits. Easiest just to mark it bad and not use
147 * it for anything else.
148 */
149 __uint32_t sb_bad_features2;
150
151 /* version 5 superblock fields start here */
152
153 /* feature masks */
154 __uint32_t sb_features_compat;
155 __uint32_t sb_features_ro_compat;
156 __uint32_t sb_features_incompat;
157 __uint32_t sb_features_log_incompat;
158
159 __uint32_t sb_crc; /* superblock crc */
160 __uint32_t sb_pad;
161
162 xfs_ino_t sb_pquotino; /* project quota inode */
163 xfs_lsn_t sb_lsn; /* last write sequence */
164
165 /* must be padded to 64 bit alignment */
166} xfs_sb_t;
167
168#define XFS_SB_CRC_OFF offsetof(struct xfs_sb, sb_crc)
169
170/*
171 * Superblock - on disk version. Must match the in core version above.
172 * Must be padded to 64 bit alignment.
173 */
174typedef struct xfs_dsb {
175 __be32 sb_magicnum; /* magic number == XFS_SB_MAGIC */
176 __be32 sb_blocksize; /* logical block size, bytes */
177 __be64 sb_dblocks; /* number of data blocks */
178 __be64 sb_rblocks; /* number of realtime blocks */
179 __be64 sb_rextents; /* number of realtime extents */
180 uuid_t sb_uuid; /* file system unique id */
181 __be64 sb_logstart; /* starting block of log if internal */
182 __be64 sb_rootino; /* root inode number */
183 __be64 sb_rbmino; /* bitmap inode for realtime extents */
184 __be64 sb_rsumino; /* summary inode for rt bitmap */
185 __be32 sb_rextsize; /* realtime extent size, blocks */
186 __be32 sb_agblocks; /* size of an allocation group */
187 __be32 sb_agcount; /* number of allocation groups */
188 __be32 sb_rbmblocks; /* number of rt bitmap blocks */
189 __be32 sb_logblocks; /* number of log blocks */
190 __be16 sb_versionnum; /* header version == XFS_SB_VERSION */
191 __be16 sb_sectsize; /* volume sector size, bytes */
192 __be16 sb_inodesize; /* inode size, bytes */
193 __be16 sb_inopblock; /* inodes per block */
194 char sb_fname[12]; /* file system name */
195 __u8 sb_blocklog; /* log2 of sb_blocksize */
196 __u8 sb_sectlog; /* log2 of sb_sectsize */
197 __u8 sb_inodelog; /* log2 of sb_inodesize */
198 __u8 sb_inopblog; /* log2 of sb_inopblock */
199 __u8 sb_agblklog; /* log2 of sb_agblocks (rounded up) */
200 __u8 sb_rextslog; /* log2 of sb_rextents */
201 __u8 sb_inprogress; /* mkfs is in progress, don't mount */
202 __u8 sb_imax_pct; /* max % of fs for inode space */
203 /* statistics */
204 /*
205 * These fields must remain contiguous. If you really
206 * want to change their layout, make sure you fix the
207 * code in xfs_trans_apply_sb_deltas().
208 */
209 __be64 sb_icount; /* allocated inodes */
210 __be64 sb_ifree; /* free inodes */
211 __be64 sb_fdblocks; /* free data blocks */
212 __be64 sb_frextents; /* free realtime extents */
213 /*
214 * End contiguous fields.
215 */
216 __be64 sb_uquotino; /* user quota inode */
217 __be64 sb_gquotino; /* group quota inode */
218 __be16 sb_qflags; /* quota flags */
219 __u8 sb_flags; /* misc. flags */
220 __u8 sb_shared_vn; /* shared version number */
221 __be32 sb_inoalignmt; /* inode chunk alignment, fsblocks */
222 __be32 sb_unit; /* stripe or raid unit */
223 __be32 sb_width; /* stripe or raid width */
224 __u8 sb_dirblklog; /* log2 of dir block size (fsbs) */
225 __u8 sb_logsectlog; /* log2 of the log sector size */
226 __be16 sb_logsectsize; /* sector size for the log, bytes */
227 __be32 sb_logsunit; /* stripe unit size for the log */
228 __be32 sb_features2; /* additional feature bits */
229 /*
230 * bad features2 field as a result of failing to pad the sb
231 * structure to 64 bits. Some machines will be using this field
232 * for features2 bits. Easiest just to mark it bad and not use
233 * it for anything else.
234 */
235 __be32 sb_bad_features2;
236
237 /* version 5 superblock fields start here */
238
239 /* feature masks */
240 __be32 sb_features_compat;
241 __be32 sb_features_ro_compat;
242 __be32 sb_features_incompat;
243 __be32 sb_features_log_incompat;
244
245 __le32 sb_crc; /* superblock crc */
246 __be32 sb_pad;
247
248 __be64 sb_pquotino; /* project quota inode */
249 __be64 sb_lsn; /* last write sequence */
250
251 /* must be padded to 64 bit alignment */
252} xfs_dsb_t;
253
254/*
255 * Sequence number values for the fields.
256 */
257typedef enum {
258 XFS_SBS_MAGICNUM, XFS_SBS_BLOCKSIZE, XFS_SBS_DBLOCKS, XFS_SBS_RBLOCKS,
259 XFS_SBS_REXTENTS, XFS_SBS_UUID, XFS_SBS_LOGSTART, XFS_SBS_ROOTINO,
260 XFS_SBS_RBMINO, XFS_SBS_RSUMINO, XFS_SBS_REXTSIZE, XFS_SBS_AGBLOCKS,
261 XFS_SBS_AGCOUNT, XFS_SBS_RBMBLOCKS, XFS_SBS_LOGBLOCKS,
262 XFS_SBS_VERSIONNUM, XFS_SBS_SECTSIZE, XFS_SBS_INODESIZE,
263 XFS_SBS_INOPBLOCK, XFS_SBS_FNAME, XFS_SBS_BLOCKLOG,
264 XFS_SBS_SECTLOG, XFS_SBS_INODELOG, XFS_SBS_INOPBLOG, XFS_SBS_AGBLKLOG,
265 XFS_SBS_REXTSLOG, XFS_SBS_INPROGRESS, XFS_SBS_IMAX_PCT, XFS_SBS_ICOUNT,
266 XFS_SBS_IFREE, XFS_SBS_FDBLOCKS, XFS_SBS_FREXTENTS, XFS_SBS_UQUOTINO,
267 XFS_SBS_GQUOTINO, XFS_SBS_QFLAGS, XFS_SBS_FLAGS, XFS_SBS_SHARED_VN,
268 XFS_SBS_INOALIGNMT, XFS_SBS_UNIT, XFS_SBS_WIDTH, XFS_SBS_DIRBLKLOG,
269 XFS_SBS_LOGSECTLOG, XFS_SBS_LOGSECTSIZE, XFS_SBS_LOGSUNIT,
270 XFS_SBS_FEATURES2, XFS_SBS_BAD_FEATURES2, XFS_SBS_FEATURES_COMPAT,
271 XFS_SBS_FEATURES_RO_COMPAT, XFS_SBS_FEATURES_INCOMPAT,
272 XFS_SBS_FEATURES_LOG_INCOMPAT, XFS_SBS_CRC, XFS_SBS_PAD,
273 XFS_SBS_PQUOTINO, XFS_SBS_LSN,
274 XFS_SBS_FIELDCOUNT
275} xfs_sb_field_t;
276
277/*
278 * Mask values, defined based on the xfs_sb_field_t values.
279 * Only define the ones we're using.
280 */
281#define XFS_SB_MVAL(x) (1LL << XFS_SBS_ ## x)
282#define XFS_SB_UUID XFS_SB_MVAL(UUID)
283#define XFS_SB_FNAME XFS_SB_MVAL(FNAME)
284#define XFS_SB_ROOTINO XFS_SB_MVAL(ROOTINO)
285#define XFS_SB_RBMINO XFS_SB_MVAL(RBMINO)
286#define XFS_SB_RSUMINO XFS_SB_MVAL(RSUMINO)
287#define XFS_SB_VERSIONNUM XFS_SB_MVAL(VERSIONNUM)
288#define XFS_SB_UQUOTINO XFS_SB_MVAL(UQUOTINO)
289#define XFS_SB_GQUOTINO XFS_SB_MVAL(GQUOTINO)
290#define XFS_SB_QFLAGS XFS_SB_MVAL(QFLAGS)
291#define XFS_SB_SHARED_VN XFS_SB_MVAL(SHARED_VN)
292#define XFS_SB_UNIT XFS_SB_MVAL(UNIT)
293#define XFS_SB_WIDTH XFS_SB_MVAL(WIDTH)
294#define XFS_SB_ICOUNT XFS_SB_MVAL(ICOUNT)
295#define XFS_SB_IFREE XFS_SB_MVAL(IFREE)
296#define XFS_SB_FDBLOCKS XFS_SB_MVAL(FDBLOCKS)
297#define XFS_SB_FEATURES2 XFS_SB_MVAL(FEATURES2)
298#define XFS_SB_BAD_FEATURES2 XFS_SB_MVAL(BAD_FEATURES2)
299#define XFS_SB_FEATURES_COMPAT XFS_SB_MVAL(FEATURES_COMPAT)
300#define XFS_SB_FEATURES_RO_COMPAT XFS_SB_MVAL(FEATURES_RO_COMPAT)
301#define XFS_SB_FEATURES_INCOMPAT XFS_SB_MVAL(FEATURES_INCOMPAT)
302#define XFS_SB_FEATURES_LOG_INCOMPAT XFS_SB_MVAL(FEATURES_LOG_INCOMPAT)
303#define XFS_SB_CRC XFS_SB_MVAL(CRC)
304#define XFS_SB_PQUOTINO XFS_SB_MVAL(PQUOTINO)
305#define XFS_SB_NUM_BITS ((int)XFS_SBS_FIELDCOUNT)
306#define XFS_SB_ALL_BITS ((1LL << XFS_SB_NUM_BITS) - 1)
307#define XFS_SB_MOD_BITS \
308 (XFS_SB_UUID | XFS_SB_ROOTINO | XFS_SB_RBMINO | XFS_SB_RSUMINO | \
309 XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO | XFS_SB_GQUOTINO | \
310 XFS_SB_QFLAGS | XFS_SB_SHARED_VN | XFS_SB_UNIT | XFS_SB_WIDTH | \
311 XFS_SB_ICOUNT | XFS_SB_IFREE | XFS_SB_FDBLOCKS | XFS_SB_FEATURES2 | \
312 XFS_SB_BAD_FEATURES2 | XFS_SB_FEATURES_COMPAT | \
313 XFS_SB_FEATURES_RO_COMPAT | XFS_SB_FEATURES_INCOMPAT | \
314 XFS_SB_FEATURES_LOG_INCOMPAT | XFS_SB_PQUOTINO)
315
316
317/*
318 * Misc. Flags - warning - these will be cleared by xfs_repair unless
319 * a feature bit is set when the flag is used.
320 */
321#define XFS_SBF_NOFLAGS 0x00 /* no flags set */
322#define XFS_SBF_READONLY 0x01 /* only read-only mounts allowed */
323
324/*
325 * define max. shared version we can interoperate with
326 */
327#define XFS_SB_MAX_SHARED_VN 0
328
329#define XFS_SB_VERSION_NUM(sbp) ((sbp)->sb_versionnum & XFS_SB_VERSION_NUMBITS)
330
331/*
332 * The first XFS version we support is a v4 superblock with V2 directories.
333 */
334static inline bool xfs_sb_good_v4_features(struct xfs_sb *sbp)
335{
336 if (!(sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT))
337 return false;
338
339 /* check for unknown features in the fs */
340 if ((sbp->sb_versionnum & ~XFS_SB_VERSION_OKBITS) ||
341 ((sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT) &&
342 (sbp->sb_features2 & ~XFS_SB_VERSION2_OKBITS)))
343 return false;
344
345 return true;
346}
347
348static inline bool xfs_sb_good_version(struct xfs_sb *sbp)
349{
350 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5)
351 return true;
352 if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4)
353 return xfs_sb_good_v4_features(sbp);
354 return false;
355}
356
357/*
358 * Detect a mismatched features2 field. Older kernels read/wrote
359 * this into the wrong slot, so to be safe we keep them in sync.
360 */
361static inline bool xfs_sb_has_mismatched_features2(struct xfs_sb *sbp)
362{
363 return sbp->sb_bad_features2 != sbp->sb_features2;
364}
365
366static inline bool xfs_sb_version_hasattr(struct xfs_sb *sbp)
367{
368 return (sbp->sb_versionnum & XFS_SB_VERSION_ATTRBIT);
369}
370
371static inline void xfs_sb_version_addattr(struct xfs_sb *sbp)
372{
373 sbp->sb_versionnum |= XFS_SB_VERSION_ATTRBIT;
374}
375
376static inline bool xfs_sb_version_hasquota(struct xfs_sb *sbp)
377{
378 return (sbp->sb_versionnum & XFS_SB_VERSION_QUOTABIT);
379}
380
381static inline void xfs_sb_version_addquota(struct xfs_sb *sbp)
382{
383 sbp->sb_versionnum |= XFS_SB_VERSION_QUOTABIT;
384}
385
386static inline bool xfs_sb_version_hasalign(struct xfs_sb *sbp)
387{
388 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
389 (sbp->sb_versionnum & XFS_SB_VERSION_ALIGNBIT));
390}
391
392static inline bool xfs_sb_version_hasdalign(struct xfs_sb *sbp)
393{
394 return (sbp->sb_versionnum & XFS_SB_VERSION_DALIGNBIT);
395}
396
397static inline bool xfs_sb_version_haslogv2(struct xfs_sb *sbp)
398{
399 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
400 (sbp->sb_versionnum & XFS_SB_VERSION_LOGV2BIT);
401}
402
403static inline bool xfs_sb_version_hasextflgbit(struct xfs_sb *sbp)
404{
405 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
406 (sbp->sb_versionnum & XFS_SB_VERSION_EXTFLGBIT);
407}
408
409static inline bool xfs_sb_version_hassector(struct xfs_sb *sbp)
410{
411 return (sbp->sb_versionnum & XFS_SB_VERSION_SECTORBIT);
412}
413
414static inline bool xfs_sb_version_hasasciici(struct xfs_sb *sbp)
415{
416 return (sbp->sb_versionnum & XFS_SB_VERSION_BORGBIT);
417}
418
419static inline bool xfs_sb_version_hasmorebits(struct xfs_sb *sbp)
420{
421 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 ||
422 (sbp->sb_versionnum & XFS_SB_VERSION_MOREBITSBIT);
423}
424
425/*
426 * sb_features2 bit version macros.
427 */
428static inline bool xfs_sb_version_haslazysbcount(struct xfs_sb *sbp)
429{
430 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
431 (xfs_sb_version_hasmorebits(sbp) &&
432 (sbp->sb_features2 & XFS_SB_VERSION2_LAZYSBCOUNTBIT));
433}
434
435static inline bool xfs_sb_version_hasattr2(struct xfs_sb *sbp)
436{
437 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
438 (xfs_sb_version_hasmorebits(sbp) &&
439 (sbp->sb_features2 & XFS_SB_VERSION2_ATTR2BIT));
440}
441
442static inline void xfs_sb_version_addattr2(struct xfs_sb *sbp)
443{
444 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
445 sbp->sb_features2 |= XFS_SB_VERSION2_ATTR2BIT;
446 sbp->sb_bad_features2 |= XFS_SB_VERSION2_ATTR2BIT;
447}
448
449static inline void xfs_sb_version_removeattr2(struct xfs_sb *sbp)
450{
451 sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
452 sbp->sb_bad_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
453 if (!sbp->sb_features2)
454 sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
455}
456
457static inline bool xfs_sb_version_hasprojid32bit(struct xfs_sb *sbp)
458{
459 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) ||
460 (xfs_sb_version_hasmorebits(sbp) &&
461 (sbp->sb_features2 & XFS_SB_VERSION2_PROJID32BIT));
462}
463
464static inline void xfs_sb_version_addprojid32bit(struct xfs_sb *sbp)
465{
466 sbp->sb_versionnum |= XFS_SB_VERSION_MOREBITSBIT;
467 sbp->sb_features2 |= XFS_SB_VERSION2_PROJID32BIT;
468 sbp->sb_bad_features2 |= XFS_SB_VERSION2_PROJID32BIT;
469}
470
471/*
472 * Extended v5 superblock feature masks. These are to be used for new v5
473 * superblock features only.
474 *
475 * Compat features are new features that old kernels will not notice or affect
476 * and so can mount read-write without issues.
477 *
478 * RO-Compat (read only) are features that old kernels can read but will break
479 * if they write. Hence only read-only mounts of such filesystems are allowed on
480 * kernels that don't support the feature bit.
481 *
482 * InCompat features are features which old kernels will not understand and so
483 * must not mount.
484 *
485 * Log-InCompat features are for changes to log formats or new transactions that
486 * can't be replayed on older kernels. The fields are set when the filesystem is
487 * mounted, and a clean unmount clears the fields.
488 */
489#define XFS_SB_FEAT_COMPAT_ALL 0
490#define XFS_SB_FEAT_COMPAT_UNKNOWN ~XFS_SB_FEAT_COMPAT_ALL
491static inline bool
492xfs_sb_has_compat_feature(
493 struct xfs_sb *sbp,
494 __uint32_t feature)
495{
496 return (sbp->sb_features_compat & feature) != 0;
497}
498
499#define XFS_SB_FEAT_RO_COMPAT_FINOBT (1 << 0) /* free inode btree */
500#define XFS_SB_FEAT_RO_COMPAT_ALL \
501 (XFS_SB_FEAT_RO_COMPAT_FINOBT)
502#define XFS_SB_FEAT_RO_COMPAT_UNKNOWN ~XFS_SB_FEAT_RO_COMPAT_ALL
503static inline bool
504xfs_sb_has_ro_compat_feature(
505 struct xfs_sb *sbp,
506 __uint32_t feature)
507{
508 return (sbp->sb_features_ro_compat & feature) != 0;
509}
510
511#define XFS_SB_FEAT_INCOMPAT_FTYPE (1 << 0) /* filetype in dirent */
512#define XFS_SB_FEAT_INCOMPAT_ALL \
513 (XFS_SB_FEAT_INCOMPAT_FTYPE)
514
515#define XFS_SB_FEAT_INCOMPAT_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_ALL
516static inline bool
517xfs_sb_has_incompat_feature(
518 struct xfs_sb *sbp,
519 __uint32_t feature)
520{
521 return (sbp->sb_features_incompat & feature) != 0;
522}
523
524#define XFS_SB_FEAT_INCOMPAT_LOG_ALL 0
525#define XFS_SB_FEAT_INCOMPAT_LOG_UNKNOWN ~XFS_SB_FEAT_INCOMPAT_LOG_ALL
526static inline bool
527xfs_sb_has_incompat_log_feature(
528 struct xfs_sb *sbp,
529 __uint32_t feature)
530{
531 return (sbp->sb_features_log_incompat & feature) != 0;
532}
533
534/*
535 * V5 superblock specific feature checks
536 */
537static inline int xfs_sb_version_hascrc(struct xfs_sb *sbp)
538{
539 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
540}
541
542static inline int xfs_sb_version_has_pquotino(struct xfs_sb *sbp)
543{
544 return XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5;
545}
546
547static inline int xfs_sb_version_hasftype(struct xfs_sb *sbp)
548{
549 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5 &&
550 xfs_sb_has_incompat_feature(sbp, XFS_SB_FEAT_INCOMPAT_FTYPE)) ||
551 (xfs_sb_version_hasmorebits(sbp) &&
552 (sbp->sb_features2 & XFS_SB_VERSION2_FTYPE));
553}
554
555static inline int xfs_sb_version_hasfinobt(xfs_sb_t *sbp)
556{
557 return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) &&
558 (sbp->sb_features_ro_compat & XFS_SB_FEAT_RO_COMPAT_FINOBT);
559}
560
561/*
562 * end of superblock version macros
563 */
564
565static inline bool
566xfs_is_quota_inode(struct xfs_sb *sbp, xfs_ino_t ino)
567{
568 return (ino == sbp->sb_uquotino ||
569 ino == sbp->sb_gquotino ||
570 ino == sbp->sb_pquotino);
571}
572
573#define XFS_SB_DADDR ((xfs_daddr_t)0) /* daddr in filesystem/ag */
574#define XFS_SB_BLOCK(mp) XFS_HDR_BLOCK(mp, XFS_SB_DADDR)
575#define XFS_BUF_TO_SBP(bp) ((xfs_dsb_t *)((bp)->b_addr))
576
577#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d))
578#define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \
579 xfs_daddr_to_agno(mp,d), xfs_daddr_to_agbno(mp,d))
580#define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \
581 XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno))
582
583/*
584 * File system sector to basic block conversions.
585 */
586#define XFS_FSS_TO_BB(mp,sec) ((sec) << (mp)->m_sectbb_log)
587
588/*
589 * File system block to basic block conversions.
590 */
591#define XFS_FSB_TO_BB(mp,fsbno) ((fsbno) << (mp)->m_blkbb_log)
592#define XFS_BB_TO_FSB(mp,bb) \
593 (((bb) + (XFS_FSB_TO_BB(mp,1) - 1)) >> (mp)->m_blkbb_log)
594#define XFS_BB_TO_FSBT(mp,bb) ((bb) >> (mp)->m_blkbb_log)
595
596/*
597 * File system block to byte conversions.
598 */
599#define XFS_FSB_TO_B(mp,fsbno) ((xfs_fsize_t)(fsbno) << (mp)->m_sb.sb_blocklog)
600#define XFS_B_TO_FSB(mp,b) \
601 ((((__uint64_t)(b)) + (mp)->m_blockmask) >> (mp)->m_sb.sb_blocklog)
602#define XFS_B_TO_FSBT(mp,b) (((__uint64_t)(b)) >> (mp)->m_sb.sb_blocklog)
603#define XFS_B_FSB_OFFSET(mp,b) ((b) & (mp)->m_blockmask)
604
605/*
606 * perag get/put wrappers for ref counting 22 * perag get/put wrappers for ref counting
607 */ 23 */
608extern struct xfs_perag *xfs_perag_get(struct xfs_mount *, xfs_agnumber_t); 24extern struct xfs_perag *xfs_perag_get(struct xfs_mount *, xfs_agnumber_t);
diff --git a/fs/xfs/libxfs/xfs_symlink_remote.c b/fs/xfs/libxfs/xfs_symlink_remote.c
index 5782f037eab4..c80c5236c3da 100644
--- a/fs/xfs/libxfs/xfs_symlink_remote.c
+++ b/fs/xfs/libxfs/xfs_symlink_remote.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_shared.h" 23#include "xfs_shared.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_ag.h"
26#include "xfs_sb.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_bmap_btree.h" 26#include "xfs_bmap_btree.h"
29#include "xfs_inode.h" 27#include "xfs_inode.h"
diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
index f2bda7c76b8a..6c1330f29050 100644
--- a/fs/xfs/libxfs/xfs_trans_resv.c
+++ b/fs/xfs/libxfs/xfs_trans_resv.c
@@ -22,8 +22,6 @@
22#include "xfs_format.h" 22#include "xfs_format.h"
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_da_format.h" 26#include "xfs_da_format.h"
29#include "xfs_da_btree.h" 27#include "xfs_da_btree.h"
diff --git a/fs/xfs/xfs_acl.c b/fs/xfs/xfs_acl.c
index a65fa5dde6e9..4b641676f258 100644
--- a/fs/xfs/xfs_acl.c
+++ b/fs/xfs/xfs_acl.c
@@ -19,8 +19,6 @@
19#include "xfs_format.h" 19#include "xfs_format.h"
20#include "xfs_log_format.h" 20#include "xfs_log_format.h"
21#include "xfs_trans_resv.h" 21#include "xfs_trans_resv.h"
22#include "xfs_ag.h"
23#include "xfs_sb.h"
24#include "xfs_mount.h" 22#include "xfs_mount.h"
25#include "xfs_inode.h" 23#include "xfs_inode.h"
26#include "xfs_acl.h" 24#include "xfs_acl.h"
diff --git a/fs/xfs/xfs_acl.h b/fs/xfs/xfs_acl.h
index 5dc163744511..3841b07f27bf 100644
--- a/fs/xfs/xfs_acl.h
+++ b/fs/xfs/xfs_acl.h
@@ -22,42 +22,6 @@ struct inode;
22struct posix_acl; 22struct posix_acl;
23struct xfs_inode; 23struct xfs_inode;
24 24
25#define XFS_ACL_NOT_PRESENT (-1)
26
27/* On-disk XFS access control list structure */
28struct xfs_acl_entry {
29 __be32 ae_tag;
30 __be32 ae_id;
31 __be16 ae_perm;
32 __be16 ae_pad; /* fill the implicit hole in the structure */
33};
34
35struct xfs_acl {
36 __be32 acl_cnt;
37 struct xfs_acl_entry acl_entry[0];
38};
39
40/*
41 * The number of ACL entries allowed is defined by the on-disk format.
42 * For v4 superblocks, that is limited to 25 entries. For v5 superblocks, it is
43 * limited only by the maximum size of the xattr that stores the information.
44 */
45#define XFS_ACL_MAX_ENTRIES(mp) \
46 (xfs_sb_version_hascrc(&mp->m_sb) \
47 ? (XATTR_SIZE_MAX - sizeof(struct xfs_acl)) / \
48 sizeof(struct xfs_acl_entry) \
49 : 25)
50
51#define XFS_ACL_MAX_SIZE(mp) \
52 (sizeof(struct xfs_acl) + \
53 sizeof(struct xfs_acl_entry) * XFS_ACL_MAX_ENTRIES((mp)))
54
55/* On-disk XFS extended attribute names */
56#define SGI_ACL_FILE (unsigned char *)"SGI_ACL_FILE"
57#define SGI_ACL_DEFAULT (unsigned char *)"SGI_ACL_DEFAULT"
58#define SGI_ACL_FILE_SIZE (sizeof(SGI_ACL_FILE)-1)
59#define SGI_ACL_DEFAULT_SIZE (sizeof(SGI_ACL_DEFAULT)-1)
60
61#ifdef CONFIG_XFS_POSIX_ACL 25#ifdef CONFIG_XFS_POSIX_ACL
62extern struct posix_acl *xfs_get_acl(struct inode *inode, int type); 26extern struct posix_acl *xfs_get_acl(struct inode *inode, int type);
63extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type); 27extern int xfs_set_acl(struct inode *inode, struct posix_acl *acl, int type);
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index f5b2453a43b2..18e2f3bbae5e 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -20,8 +20,6 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 23#include "xfs_mount.h"
26#include "xfs_inode.h" 24#include "xfs_inode.h"
27#include "xfs_trans.h" 25#include "xfs_trans.h"
@@ -33,7 +31,6 @@
33#include "xfs_bmap.h" 31#include "xfs_bmap.h"
34#include "xfs_bmap_util.h" 32#include "xfs_bmap_util.h"
35#include "xfs_bmap_btree.h" 33#include "xfs_bmap_btree.h"
36#include "xfs_dinode.h"
37#include <linux/aio.h> 34#include <linux/aio.h>
38#include <linux/gfp.h> 35#include <linux/gfp.h>
39#include <linux/mpage.h> 36#include <linux/mpage.h>
diff --git a/fs/xfs/xfs_attr_inactive.c b/fs/xfs/xfs_attr_inactive.c
index aa2a8b1838a2..83af4c149635 100644
--- a/fs/xfs/xfs_attr_inactive.c
+++ b/fs/xfs/xfs_attr_inactive.c
@@ -23,8 +23,6 @@
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_bit.h" 25#include "xfs_bit.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_da_format.h" 27#include "xfs_da_format.h"
30#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
@@ -39,7 +37,6 @@
39#include "xfs_error.h" 37#include "xfs_error.h"
40#include "xfs_quota.h" 38#include "xfs_quota.h"
41#include "xfs_trace.h" 39#include "xfs_trace.h"
42#include "xfs_dinode.h"
43#include "xfs_dir2.h" 40#include "xfs_dir2.h"
44 41
45/* 42/*
diff --git a/fs/xfs/xfs_attr_list.c b/fs/xfs/xfs_attr_list.c
index 62db83ab6cbc..a43d370d2c58 100644
--- a/fs/xfs/xfs_attr_list.c
+++ b/fs/xfs/xfs_attr_list.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_da_format.h" 26#include "xfs_da_format.h"
29#include "xfs_da_btree.h" 27#include "xfs_da_btree.h"
@@ -39,7 +37,6 @@
39#include "xfs_trace.h" 37#include "xfs_trace.h"
40#include "xfs_buf_item.h" 38#include "xfs_buf_item.h"
41#include "xfs_cksum.h" 39#include "xfs_cksum.h"
42#include "xfs_dinode.h"
43#include "xfs_dir2.h" 40#include "xfs_dir2.h"
44 41
45STATIC int 42STATIC int
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 281002689d64..22a5dcb70b32 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -23,8 +23,6 @@
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_bit.h" 25#include "xfs_bit.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_da_format.h" 27#include "xfs_da_format.h"
30#include "xfs_inode.h" 28#include "xfs_inode.h"
@@ -42,7 +40,6 @@
42#include "xfs_trace.h" 40#include "xfs_trace.h"
43#include "xfs_icache.h" 41#include "xfs_icache.h"
44#include "xfs_log.h" 42#include "xfs_log.h"
45#include "xfs_dinode.h"
46 43
47/* Kernel only BMAP related definitions and functions */ 44/* Kernel only BMAP related definitions and functions */
48 45
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 24b4ebea0d4d..bb502a391792 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -34,18 +34,16 @@
34#include <linux/backing-dev.h> 34#include <linux/backing-dev.h>
35#include <linux/freezer.h> 35#include <linux/freezer.h>
36 36
37#include "xfs_format.h"
37#include "xfs_log_format.h" 38#include "xfs_log_format.h"
38#include "xfs_trans_resv.h" 39#include "xfs_trans_resv.h"
39#include "xfs_sb.h" 40#include "xfs_sb.h"
40#include "xfs_ag.h"
41#include "xfs_mount.h" 41#include "xfs_mount.h"
42#include "xfs_trace.h" 42#include "xfs_trace.h"
43#include "xfs_log.h" 43#include "xfs_log.h"
44 44
45static kmem_zone_t *xfs_buf_zone; 45static kmem_zone_t *xfs_buf_zone;
46 46
47static struct workqueue_struct *xfslogd_workqueue;
48
49#ifdef XFS_BUF_LOCK_TRACKING 47#ifdef XFS_BUF_LOCK_TRACKING
50# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid) 48# define XB_SET_OWNER(bp) ((bp)->b_last_holder = current->pid)
51# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1) 49# define XB_CLEAR_OWNER(bp) ((bp)->b_last_holder = -1)
@@ -463,7 +461,7 @@ _xfs_buf_find(
463 * have to check that the buffer falls within the filesystem bounds. 461 * have to check that the buffer falls within the filesystem bounds.
464 */ 462 */
465 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks); 463 eofs = XFS_FSB_TO_BB(btp->bt_mount, btp->bt_mount->m_sb.sb_dblocks);
466 if (blkno >= eofs) { 464 if (blkno < 0 || blkno >= eofs) {
467 /* 465 /*
468 * XXX (dgc): we should really be returning -EFSCORRUPTED here, 466 * XXX (dgc): we should really be returning -EFSCORRUPTED here,
469 * but none of the higher level infrastructure supports 467 * but none of the higher level infrastructure supports
@@ -1043,7 +1041,7 @@ xfs_buf_ioend_work(
1043 struct work_struct *work) 1041 struct work_struct *work)
1044{ 1042{
1045 struct xfs_buf *bp = 1043 struct xfs_buf *bp =
1046 container_of(work, xfs_buf_t, b_iodone_work); 1044 container_of(work, xfs_buf_t, b_ioend_work);
1047 1045
1048 xfs_buf_ioend(bp); 1046 xfs_buf_ioend(bp);
1049} 1047}
@@ -1052,8 +1050,8 @@ void
1052xfs_buf_ioend_async( 1050xfs_buf_ioend_async(
1053 struct xfs_buf *bp) 1051 struct xfs_buf *bp)
1054{ 1052{
1055 INIT_WORK(&bp->b_iodone_work, xfs_buf_ioend_work); 1053 INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
1056 queue_work(xfslogd_workqueue, &bp->b_iodone_work); 1054 queue_work(bp->b_ioend_wq, &bp->b_ioend_work);
1057} 1055}
1058 1056
1059void 1057void
@@ -1222,6 +1220,13 @@ _xfs_buf_ioapply(
1222 */ 1220 */
1223 bp->b_error = 0; 1221 bp->b_error = 0;
1224 1222
1223 /*
1224 * Initialize the I/O completion workqueue if we haven't yet or the
1225 * submitter has not opted to specify a custom one.
1226 */
1227 if (!bp->b_ioend_wq)
1228 bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
1229
1225 if (bp->b_flags & XBF_WRITE) { 1230 if (bp->b_flags & XBF_WRITE) {
1226 if (bp->b_flags & XBF_SYNCIO) 1231 if (bp->b_flags & XBF_SYNCIO)
1227 rw = WRITE_SYNC; 1232 rw = WRITE_SYNC;
@@ -1882,15 +1887,8 @@ xfs_buf_init(void)
1882 if (!xfs_buf_zone) 1887 if (!xfs_buf_zone)
1883 goto out; 1888 goto out;
1884 1889
1885 xfslogd_workqueue = alloc_workqueue("xfslogd",
1886 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 1);
1887 if (!xfslogd_workqueue)
1888 goto out_free_buf_zone;
1889
1890 return 0; 1890 return 0;
1891 1891
1892 out_free_buf_zone:
1893 kmem_zone_destroy(xfs_buf_zone);
1894 out: 1892 out:
1895 return -ENOMEM; 1893 return -ENOMEM;
1896} 1894}
@@ -1898,6 +1896,5 @@ xfs_buf_init(void)
1898void 1896void
1899xfs_buf_terminate(void) 1897xfs_buf_terminate(void)
1900{ 1898{
1901 destroy_workqueue(xfslogd_workqueue);
1902 kmem_zone_destroy(xfs_buf_zone); 1899 kmem_zone_destroy(xfs_buf_zone);
1903} 1900}
diff --git a/fs/xfs/xfs_buf.h b/fs/xfs/xfs_buf.h
index 82002c00af90..75ff5d5a7d2e 100644
--- a/fs/xfs/xfs_buf.h
+++ b/fs/xfs/xfs_buf.h
@@ -164,7 +164,8 @@ typedef struct xfs_buf {
164 struct xfs_perag *b_pag; /* contains rbtree root */ 164 struct xfs_perag *b_pag; /* contains rbtree root */
165 xfs_buftarg_t *b_target; /* buffer target (device) */ 165 xfs_buftarg_t *b_target; /* buffer target (device) */
166 void *b_addr; /* virtual address of buffer */ 166 void *b_addr; /* virtual address of buffer */
167 struct work_struct b_iodone_work; 167 struct work_struct b_ioend_work;
168 struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
168 xfs_buf_iodone_t b_iodone; /* I/O completion function */ 169 xfs_buf_iodone_t b_iodone; /* I/O completion function */
169 struct completion b_iowait; /* queue for I/O waiters */ 170 struct completion b_iowait; /* queue for I/O waiters */
170 void *b_fspriv; 171 void *b_fspriv;
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index f15969543326..3f9bd58edec7 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -17,11 +17,11 @@
17 */ 17 */
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h" 19#include "xfs_fs.h"
20#include "xfs_format.h"
20#include "xfs_log_format.h" 21#include "xfs_log_format.h"
21#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
22#include "xfs_bit.h" 23#include "xfs_bit.h"
23#include "xfs_sb.h" 24#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 25#include "xfs_mount.h"
26#include "xfs_trans.h" 26#include "xfs_trans.h"
27#include "xfs_buf_item.h" 27#include "xfs_buf_item.h"
diff --git a/fs/xfs/xfs_dir2_readdir.c b/fs/xfs/xfs_dir2_readdir.c
index f1b69edcdf31..098cd78fe708 100644
--- a/fs/xfs/xfs_dir2_readdir.c
+++ b/fs/xfs/xfs_dir2_readdir.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_da_format.h" 26#include "xfs_da_format.h"
29#include "xfs_da_btree.h" 27#include "xfs_da_btree.h"
@@ -34,7 +32,6 @@
34#include "xfs_trace.h" 32#include "xfs_trace.h"
35#include "xfs_bmap.h" 33#include "xfs_bmap.h"
36#include "xfs_trans.h" 34#include "xfs_trans.h"
37#include "xfs_dinode.h"
38 35
39/* 36/*
40 * Directory file type support functions 37 * Directory file type support functions
@@ -44,7 +41,7 @@ static unsigned char xfs_dir3_filetype_table[] = {
44 DT_FIFO, DT_SOCK, DT_LNK, DT_WHT, 41 DT_FIFO, DT_SOCK, DT_LNK, DT_WHT,
45}; 42};
46 43
47unsigned char 44static unsigned char
48xfs_dir3_get_dtype( 45xfs_dir3_get_dtype(
49 struct xfs_mount *mp, 46 struct xfs_mount *mp,
50 __uint8_t filetype) 47 __uint8_t filetype)
@@ -57,22 +54,6 @@ xfs_dir3_get_dtype(
57 54
58 return xfs_dir3_filetype_table[filetype]; 55 return xfs_dir3_filetype_table[filetype];
59} 56}
60/*
61 * @mode, if set, indicates that the type field needs to be set up.
62 * This uses the transformation from file mode to DT_* as defined in linux/fs.h
63 * for file type specification. This will be propagated into the directory
64 * structure if appropriate for the given operation and filesystem config.
65 */
66const unsigned char xfs_mode_to_ftype[S_IFMT >> S_SHIFT] = {
67 [0] = XFS_DIR3_FT_UNKNOWN,
68 [S_IFREG >> S_SHIFT] = XFS_DIR3_FT_REG_FILE,
69 [S_IFDIR >> S_SHIFT] = XFS_DIR3_FT_DIR,
70 [S_IFCHR >> S_SHIFT] = XFS_DIR3_FT_CHRDEV,
71 [S_IFBLK >> S_SHIFT] = XFS_DIR3_FT_BLKDEV,
72 [S_IFIFO >> S_SHIFT] = XFS_DIR3_FT_FIFO,
73 [S_IFSOCK >> S_SHIFT] = XFS_DIR3_FT_SOCK,
74 [S_IFLNK >> S_SHIFT] = XFS_DIR3_FT_SYMLINK,
75};
76 57
77STATIC int 58STATIC int
78xfs_dir2_sf_getdents( 59xfs_dir2_sf_getdents(
diff --git a/fs/xfs/xfs_discard.c b/fs/xfs/xfs_discard.c
index 13d08a1b390e..799e5a2d334d 100644
--- a/fs/xfs/xfs_discard.c
+++ b/fs/xfs/xfs_discard.c
@@ -20,7 +20,6 @@
20#include "xfs_log_format.h" 20#include "xfs_log_format.h"
21#include "xfs_trans_resv.h" 21#include "xfs_trans_resv.h"
22#include "xfs_sb.h" 22#include "xfs_sb.h"
23#include "xfs_ag.h"
24#include "xfs_mount.h" 23#include "xfs_mount.h"
25#include "xfs_quota.h" 24#include "xfs_quota.h"
26#include "xfs_inode.h" 25#include "xfs_inode.h"
diff --git a/fs/xfs/xfs_dquot.c b/fs/xfs/xfs_dquot.c
index 63c2de49f61d..02c01bbbc789 100644
--- a/fs/xfs/xfs_dquot.c
+++ b/fs/xfs/xfs_dquot.c
@@ -22,8 +22,6 @@
22#include "xfs_shared.h" 22#include "xfs_shared.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
29#include "xfs_bmap.h" 27#include "xfs_bmap.h"
diff --git a/fs/xfs/xfs_dquot_item.c b/fs/xfs/xfs_dquot_item.c
index f33fbaaa4d8a..814cff94e78f 100644
--- a/fs/xfs/xfs_dquot_item.c
+++ b/fs/xfs/xfs_dquot_item.c
@@ -20,8 +20,6 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 23#include "xfs_mount.h"
26#include "xfs_inode.h" 24#include "xfs_inode.h"
27#include "xfs_quota.h" 25#include "xfs_quota.h"
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index b92fd7bc49e3..3ee186ac1093 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -20,8 +20,6 @@
20#include "xfs_fs.h" 20#include "xfs_fs.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 23#include "xfs_mount.h"
26#include "xfs_error.h" 24#include "xfs_error.h"
27 25
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 5a6bd5d8779a..5eb4a14e0a0f 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -19,10 +19,9 @@
19#include "xfs_format.h" 19#include "xfs_format.h"
20#include "xfs_log_format.h" 20#include "xfs_log_format.h"
21#include "xfs_trans_resv.h" 21#include "xfs_trans_resv.h"
22#include "xfs_sb.h"
23#include "xfs_ag.h"
24#include "xfs_mount.h" 22#include "xfs_mount.h"
25#include "xfs_da_format.h" 23#include "xfs_da_format.h"
24#include "xfs_da_btree.h"
26#include "xfs_dir2.h" 25#include "xfs_dir2.h"
27#include "xfs_export.h" 26#include "xfs_export.h"
28#include "xfs_inode.h" 27#include "xfs_inode.h"
diff --git a/fs/xfs/xfs_extent_busy.c b/fs/xfs/xfs_extent_busy.c
index fd22f69049d4..c263e079273e 100644
--- a/fs/xfs/xfs_extent_busy.c
+++ b/fs/xfs/xfs_extent_busy.c
@@ -24,7 +24,6 @@
24#include "xfs_shared.h" 24#include "xfs_shared.h"
25#include "xfs_trans_resv.h" 25#include "xfs_trans_resv.h"
26#include "xfs_sb.h" 26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 27#include "xfs_mount.h"
29#include "xfs_alloc.h" 28#include "xfs_alloc.h"
30#include "xfs_extent_busy.h" 29#include "xfs_extent_busy.h"
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index c4327419dc5c..cb7fe64cdbfa 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -17,10 +17,9 @@
17 */ 17 */
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h" 19#include "xfs_fs.h"
20#include "xfs_format.h"
20#include "xfs_log_format.h" 21#include "xfs_log_format.h"
21#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
22#include "xfs_sb.h"
23#include "xfs_ag.h"
24#include "xfs_mount.h" 23#include "xfs_mount.h"
25#include "xfs_trans.h" 24#include "xfs_trans.h"
26#include "xfs_trans_priv.h" 25#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index eb596b419942..13e974e6a889 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_da_format.h" 25#include "xfs_da_format.h"
28#include "xfs_da_btree.h" 26#include "xfs_da_btree.h"
@@ -37,7 +35,6 @@
37#include "xfs_ioctl.h" 35#include "xfs_ioctl.h"
38#include "xfs_trace.h" 36#include "xfs_trace.h"
39#include "xfs_log.h" 37#include "xfs_log.h"
40#include "xfs_dinode.h"
41#include "xfs_icache.h" 38#include "xfs_icache.h"
42 39
43#include <linux/aio.h> 40#include <linux/aio.h>
@@ -933,7 +930,6 @@ xfs_file_readdir(
933{ 930{
934 struct inode *inode = file_inode(file); 931 struct inode *inode = file_inode(file);
935 xfs_inode_t *ip = XFS_I(inode); 932 xfs_inode_t *ip = XFS_I(inode);
936 int error;
937 size_t bufsize; 933 size_t bufsize;
938 934
939 /* 935 /*
@@ -950,10 +946,7 @@ xfs_file_readdir(
950 */ 946 */
951 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size); 947 bufsize = (size_t)min_t(loff_t, 32768, ip->i_d.di_size);
952 948
953 error = xfs_readdir(ip, ctx, bufsize); 949 return xfs_readdir(ip, ctx, bufsize);
954 if (error)
955 return error;
956 return 0;
957} 950}
958 951
959STATIC int 952STATIC int
diff --git a/fs/xfs/xfs_filestream.c b/fs/xfs/xfs_filestream.c
index e92730c1d3ca..a2e86e8a0fea 100644
--- a/fs/xfs/xfs_filestream.c
+++ b/fs/xfs/xfs_filestream.c
@@ -20,16 +20,13 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_ag.h"
24#include "xfs_sb.h" 23#include "xfs_sb.h"
25#include "xfs_mount.h" 24#include "xfs_mount.h"
26#include "xfs_inum.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
28#include "xfs_bmap.h" 26#include "xfs_bmap.h"
29#include "xfs_bmap_util.h" 27#include "xfs_bmap_util.h"
30#include "xfs_alloc.h" 28#include "xfs_alloc.h"
31#include "xfs_mru_cache.h" 29#include "xfs_mru_cache.h"
32#include "xfs_dinode.h"
33#include "xfs_filestream.h" 30#include "xfs_filestream.h"
34#include "xfs_trace.h" 31#include "xfs_trace.h"
35 32
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index c05ac8b70fa9..fdc64220fcb0 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -22,7 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h" 24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 25#include "xfs_mount.h"
27#include "xfs_da_format.h" 26#include "xfs_da_format.h"
28#include "xfs_da_btree.h" 27#include "xfs_da_btree.h"
@@ -40,7 +39,6 @@
40#include "xfs_rtalloc.h" 39#include "xfs_rtalloc.h"
41#include "xfs_trace.h" 40#include "xfs_trace.h"
42#include "xfs_log.h" 41#include "xfs_log.h"
43#include "xfs_dinode.h"
44#include "xfs_filestream.h" 42#include "xfs_filestream.h"
45 43
46/* 44/*
diff --git a/fs/xfs/xfs_icache.c b/fs/xfs/xfs_icache.c
index b45f7b27b5df..9771b7ef62ed 100644
--- a/fs/xfs/xfs_icache.c
+++ b/fs/xfs/xfs_icache.c
@@ -20,9 +20,7 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_inum.h"
24#include "xfs_sb.h" 23#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
28#include "xfs_error.h" 26#include "xfs_error.h"
@@ -65,6 +63,7 @@ xfs_inode_alloc(
65 return NULL; 63 return NULL;
66 } 64 }
67 65
66 XFS_STATS_INC(vn_active);
68 ASSERT(atomic_read(&ip->i_pincount) == 0); 67 ASSERT(atomic_read(&ip->i_pincount) == 0);
69 ASSERT(!spin_is_locked(&ip->i_flags_lock)); 68 ASSERT(!spin_is_locked(&ip->i_flags_lock));
70 ASSERT(!xfs_isiflocked(ip)); 69 ASSERT(!xfs_isiflocked(ip));
@@ -130,6 +129,7 @@ xfs_inode_free(
130 /* asserts to verify all state is correct here */ 129 /* asserts to verify all state is correct here */
131 ASSERT(atomic_read(&ip->i_pincount) == 0); 130 ASSERT(atomic_read(&ip->i_pincount) == 0);
132 ASSERT(!xfs_isiflocked(ip)); 131 ASSERT(!xfs_isiflocked(ip));
132 XFS_STATS_DEC(vn_active);
133 133
134 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback); 134 call_rcu(&VFS_I(ip)->i_rcu, xfs_inode_free_callback);
135} 135}
diff --git a/fs/xfs/xfs_icache.h b/fs/xfs/xfs_icache.h
index 46748b86b12f..62f1f91c32cb 100644
--- a/fs/xfs/xfs_icache.h
+++ b/fs/xfs/xfs_icache.h
@@ -34,6 +34,14 @@ struct xfs_eofblocks {
34#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */ 34#define SYNC_TRYLOCK 0x0002 /* only try to lock inodes */
35 35
36/* 36/*
37 * tags for inode radix tree
38 */
39#define XFS_ICI_NO_TAG (-1) /* special flag for an untagged lookup
40 in xfs_inode_ag_iterator */
41#define XFS_ICI_RECLAIM_TAG 0 /* inode is to be reclaimed */
42#define XFS_ICI_EOFBLOCKS_TAG 1 /* inode has blocks beyond EOF */
43
44/*
37 * Flags for xfs_iget() 45 * Flags for xfs_iget()
38 */ 46 */
39#define XFS_IGET_CREATE 0x1 47#define XFS_IGET_CREATE 0x1
diff --git a/fs/xfs/xfs_icreate_item.c b/fs/xfs/xfs_icreate_item.c
index 7e4549233251..d45ca72af6fb 100644
--- a/fs/xfs/xfs_icreate_item.c
+++ b/fs/xfs/xfs_icreate_item.c
@@ -18,11 +18,10 @@
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h" 19#include "xfs_fs.h"
20#include "xfs_shared.h" 20#include "xfs_shared.h"
21#include "xfs_format.h"
21#include "xfs_log_format.h" 22#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
23#include "xfs_bit.h" 24#include "xfs_bit.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 25#include "xfs_mount.h"
27#include "xfs_trans.h" 26#include "xfs_trans.h"
28#include "xfs_trans_priv.h" 27#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 8ed049d1e332..41f804e740d7 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -23,9 +23,7 @@
23#include "xfs_format.h" 23#include "xfs_format.h"
24#include "xfs_log_format.h" 24#include "xfs_log_format.h"
25#include "xfs_trans_resv.h" 25#include "xfs_trans_resv.h"
26#include "xfs_inum.h"
27#include "xfs_sb.h" 26#include "xfs_sb.h"
28#include "xfs_ag.h"
29#include "xfs_mount.h" 27#include "xfs_mount.h"
30#include "xfs_inode.h" 28#include "xfs_inode.h"
31#include "xfs_da_format.h" 29#include "xfs_da_format.h"
@@ -1082,7 +1080,7 @@ xfs_create(
1082 struct xfs_dquot *udqp = NULL; 1080 struct xfs_dquot *udqp = NULL;
1083 struct xfs_dquot *gdqp = NULL; 1081 struct xfs_dquot *gdqp = NULL;
1084 struct xfs_dquot *pdqp = NULL; 1082 struct xfs_dquot *pdqp = NULL;
1085 struct xfs_trans_res tres; 1083 struct xfs_trans_res *tres;
1086 uint resblks; 1084 uint resblks;
1087 1085
1088 trace_xfs_create(dp, name); 1086 trace_xfs_create(dp, name);
@@ -1105,13 +1103,11 @@ xfs_create(
1105 if (is_dir) { 1103 if (is_dir) {
1106 rdev = 0; 1104 rdev = 0;
1107 resblks = XFS_MKDIR_SPACE_RES(mp, name->len); 1105 resblks = XFS_MKDIR_SPACE_RES(mp, name->len);
1108 tres.tr_logres = M_RES(mp)->tr_mkdir.tr_logres; 1106 tres = &M_RES(mp)->tr_mkdir;
1109 tres.tr_logcount = XFS_MKDIR_LOG_COUNT;
1110 tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR); 1107 tp = xfs_trans_alloc(mp, XFS_TRANS_MKDIR);
1111 } else { 1108 } else {
1112 resblks = XFS_CREATE_SPACE_RES(mp, name->len); 1109 resblks = XFS_CREATE_SPACE_RES(mp, name->len);
1113 tres.tr_logres = M_RES(mp)->tr_create.tr_logres; 1110 tres = &M_RES(mp)->tr_create;
1114 tres.tr_logcount = XFS_CREATE_LOG_COUNT;
1115 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE); 1111 tp = xfs_trans_alloc(mp, XFS_TRANS_CREATE);
1116 } 1112 }
1117 1113
@@ -1123,17 +1119,16 @@ xfs_create(
1123 * the case we'll drop the one we have and get a more 1119 * the case we'll drop the one we have and get a more
1124 * appropriate transaction later. 1120 * appropriate transaction later.
1125 */ 1121 */
1126 tres.tr_logflags = XFS_TRANS_PERM_LOG_RES; 1122 error = xfs_trans_reserve(tp, tres, resblks, 0);
1127 error = xfs_trans_reserve(tp, &tres, resblks, 0);
1128 if (error == -ENOSPC) { 1123 if (error == -ENOSPC) {
1129 /* flush outstanding delalloc blocks and retry */ 1124 /* flush outstanding delalloc blocks and retry */
1130 xfs_flush_inodes(mp); 1125 xfs_flush_inodes(mp);
1131 error = xfs_trans_reserve(tp, &tres, resblks, 0); 1126 error = xfs_trans_reserve(tp, tres, resblks, 0);
1132 } 1127 }
1133 if (error == -ENOSPC) { 1128 if (error == -ENOSPC) {
1134 /* No space at all so try a "no-allocation" reservation */ 1129 /* No space at all so try a "no-allocation" reservation */
1135 resblks = 0; 1130 resblks = 0;
1136 error = xfs_trans_reserve(tp, &tres, 0, 0); 1131 error = xfs_trans_reserve(tp, tres, 0, 0);
1137 } 1132 }
1138 if (error) { 1133 if (error) {
1139 cancel_flags = 0; 1134 cancel_flags = 0;
@@ -2488,9 +2483,7 @@ xfs_remove(
2488 xfs_fsblock_t first_block; 2483 xfs_fsblock_t first_block;
2489 int cancel_flags; 2484 int cancel_flags;
2490 int committed; 2485 int committed;
2491 int link_zero;
2492 uint resblks; 2486 uint resblks;
2493 uint log_count;
2494 2487
2495 trace_xfs_remove(dp, name); 2488 trace_xfs_remove(dp, name);
2496 2489
@@ -2505,13 +2498,10 @@ xfs_remove(
2505 if (error) 2498 if (error)
2506 goto std_return; 2499 goto std_return;
2507 2500
2508 if (is_dir) { 2501 if (is_dir)
2509 tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR); 2502 tp = xfs_trans_alloc(mp, XFS_TRANS_RMDIR);
2510 log_count = XFS_DEFAULT_LOG_COUNT; 2503 else
2511 } else {
2512 tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE); 2504 tp = xfs_trans_alloc(mp, XFS_TRANS_REMOVE);
2513 log_count = XFS_REMOVE_LOG_COUNT;
2514 }
2515 cancel_flags = XFS_TRANS_RELEASE_LOG_RES; 2505 cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
2516 2506
2517 /* 2507 /*
@@ -2579,9 +2569,6 @@ xfs_remove(
2579 if (error) 2569 if (error)
2580 goto out_trans_cancel; 2570 goto out_trans_cancel;
2581 2571
2582 /* Determine if this is the last link while the inode is locked */
2583 link_zero = (ip->i_d.di_nlink == 0);
2584
2585 xfs_bmap_init(&free_list, &first_block); 2572 xfs_bmap_init(&free_list, &first_block);
2586 error = xfs_dir_removename(tp, dp, name, ip->i_ino, 2573 error = xfs_dir_removename(tp, dp, name, ip->i_ino,
2587 &first_block, &free_list, resblks); 2574 &first_block, &free_list, resblks);
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 9af2882e1f4c..4ed2ba9342dc 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -20,7 +20,6 @@
20 20
21#include "xfs_inode_buf.h" 21#include "xfs_inode_buf.h"
22#include "xfs_inode_fork.h" 22#include "xfs_inode_fork.h"
23#include "xfs_dinode.h"
24 23
25/* 24/*
26 * Kernel only inode definitions 25 * Kernel only inode definitions
@@ -324,7 +323,6 @@ static inline int xfs_isiflocked(struct xfs_inode *ip)
324 (((pip)->i_mount->m_flags & XFS_MOUNT_GRPID) || \ 323 (((pip)->i_mount->m_flags & XFS_MOUNT_GRPID) || \
325 ((pip)->i_d.di_mode & S_ISGID)) 324 ((pip)->i_d.di_mode & S_ISGID))
326 325
327
328int xfs_release(struct xfs_inode *ip); 326int xfs_release(struct xfs_inode *ip);
329void xfs_inactive(struct xfs_inode *ip); 327void xfs_inactive(struct xfs_inode *ip);
330int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name, 328int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 63de0b0acc32..bf13a5a7e2f4 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -20,8 +20,6 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 23#include "xfs_mount.h"
26#include "xfs_inode.h" 24#include "xfs_inode.h"
27#include "xfs_trans.h" 25#include "xfs_trans.h"
@@ -29,7 +27,6 @@
29#include "xfs_error.h" 27#include "xfs_error.h"
30#include "xfs_trace.h" 28#include "xfs_trace.h"
31#include "xfs_trans_priv.h" 29#include "xfs_trans_priv.h"
32#include "xfs_dinode.h"
33#include "xfs_log.h" 30#include "xfs_log.h"
34 31
35 32
diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
index 24c926b6fe85..a1831980a68e 100644
--- a/fs/xfs/xfs_ioctl.c
+++ b/fs/xfs/xfs_ioctl.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
28#include "xfs_ioctl.h" 26#include "xfs_ioctl.h"
@@ -40,7 +38,6 @@
40#include "xfs_trace.h" 38#include "xfs_trace.h"
41#include "xfs_icache.h" 39#include "xfs_icache.h"
42#include "xfs_symlink.h" 40#include "xfs_symlink.h"
43#include "xfs_dinode.h"
44#include "xfs_trans.h" 41#include "xfs_trans.h"
45 42
46#include <linux/capability.h> 43#include <linux/capability.h>
diff --git a/fs/xfs/xfs_ioctl32.c b/fs/xfs/xfs_ioctl32.c
index 94ce027e28e3..ec6772866f3d 100644
--- a/fs/xfs/xfs_ioctl32.c
+++ b/fs/xfs/xfs_ioctl32.c
@@ -25,8 +25,6 @@
25#include "xfs_format.h" 25#include "xfs_format.h"
26#include "xfs_log_format.h" 26#include "xfs_log_format.h"
27#include "xfs_trans_resv.h" 27#include "xfs_trans_resv.h"
28#include "xfs_sb.h"
29#include "xfs_ag.h"
30#include "xfs_mount.h" 28#include "xfs_mount.h"
31#include "xfs_inode.h" 29#include "xfs_inode.h"
32#include "xfs_itable.h" 30#include "xfs_itable.h"
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index afcf3c926565..c980e2a5086b 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
28#include "xfs_btree.h" 26#include "xfs_btree.h"
@@ -38,7 +36,6 @@
38#include "xfs_quota.h" 36#include "xfs_quota.h"
39#include "xfs_dquot_item.h" 37#include "xfs_dquot_item.h"
40#include "xfs_dquot.h" 38#include "xfs_dquot.h"
41#include "xfs_dinode.h"
42 39
43 40
44#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \ 41#define XFS_WRITEIO_ALIGN(mp,off) (((off) >> mp->m_writeio_log) \
@@ -52,7 +49,6 @@ xfs_iomap_eof_align_last_fsb(
52 xfs_extlen_t extsize, 49 xfs_extlen_t extsize,
53 xfs_fileoff_t *last_fsb) 50 xfs_fileoff_t *last_fsb)
54{ 51{
55 xfs_fileoff_t new_last_fsb = 0;
56 xfs_extlen_t align = 0; 52 xfs_extlen_t align = 0;
57 int eof, error; 53 int eof, error;
58 54
@@ -70,8 +66,8 @@ xfs_iomap_eof_align_last_fsb(
70 else if (mp->m_dalign) 66 else if (mp->m_dalign)
71 align = mp->m_dalign; 67 align = mp->m_dalign;
72 68
73 if (align && XFS_ISIZE(ip) >= XFS_FSB_TO_B(mp, align)) 69 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align))
74 new_last_fsb = roundup_64(*last_fsb, align); 70 align = 0;
75 } 71 }
76 72
77 /* 73 /*
@@ -79,14 +75,14 @@ xfs_iomap_eof_align_last_fsb(
79 * (when file on a real-time subvolume or has di_extsize hint). 75 * (when file on a real-time subvolume or has di_extsize hint).
80 */ 76 */
81 if (extsize) { 77 if (extsize) {
82 if (new_last_fsb) 78 if (align)
83 align = roundup_64(new_last_fsb, extsize); 79 align = roundup_64(align, extsize);
84 else 80 else
85 align = extsize; 81 align = extsize;
86 new_last_fsb = roundup_64(*last_fsb, align);
87 } 82 }
88 83
89 if (new_last_fsb) { 84 if (align) {
85 xfs_fileoff_t new_last_fsb = roundup_64(*last_fsb, align);
90 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof); 86 error = xfs_bmap_eof(ip, new_last_fsb, XFS_DATA_FORK, &eof);
91 if (error) 87 if (error)
92 return error; 88 return error;
@@ -264,7 +260,6 @@ xfs_iomap_eof_want_preallocate(
264{ 260{
265 xfs_fileoff_t start_fsb; 261 xfs_fileoff_t start_fsb;
266 xfs_filblks_t count_fsb; 262 xfs_filblks_t count_fsb;
267 xfs_fsblock_t firstblock;
268 int n, error, imaps; 263 int n, error, imaps;
269 int found_delalloc = 0; 264 int found_delalloc = 0;
270 265
@@ -289,7 +284,6 @@ xfs_iomap_eof_want_preallocate(
289 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes); 284 count_fsb = XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes);
290 while (count_fsb > 0) { 285 while (count_fsb > 0) {
291 imaps = nimaps; 286 imaps = nimaps;
292 firstblock = NULLFSBLOCK;
293 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps, 287 error = xfs_bmapi_read(ip, start_fsb, count_fsb, imap, &imaps,
294 0); 288 0);
295 if (error) 289 if (error)
diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
index ec6dcdc181ee..c50311cae1b1 100644
--- a/fs/xfs/xfs_iops.c
+++ b/fs/xfs/xfs_iops.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_da_format.h" 25#include "xfs_da_format.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
@@ -37,8 +35,7 @@
37#include "xfs_icache.h" 35#include "xfs_icache.h"
38#include "xfs_symlink.h" 36#include "xfs_symlink.h"
39#include "xfs_da_btree.h" 37#include "xfs_da_btree.h"
40#include "xfs_dir2_priv.h" 38#include "xfs_dir2.h"
41#include "xfs_dinode.h"
42#include "xfs_trans_space.h" 39#include "xfs_trans_space.h"
43 40
44#include <linux/capability.h> 41#include <linux/capability.h>
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 894924a5129b..82e314258f73 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -21,9 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_inum.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 24#include "xfs_mount.h"
28#include "xfs_inode.h" 25#include "xfs_inode.h"
29#include "xfs_btree.h" 26#include "xfs_btree.h"
@@ -33,7 +30,6 @@
33#include "xfs_error.h" 30#include "xfs_error.h"
34#include "xfs_trace.h" 31#include "xfs_trace.h"
35#include "xfs_icache.h" 32#include "xfs_icache.h"
36#include "xfs_dinode.h"
37 33
38STATIC int 34STATIC int
39xfs_internal_inum( 35xfs_internal_inum(
@@ -352,7 +348,6 @@ xfs_bulkstat(
352 int *done) /* 1 if there are more stats to get */ 348 int *done) /* 1 if there are more stats to get */
353{ 349{
354 xfs_buf_t *agbp; /* agi header buffer */ 350 xfs_buf_t *agbp; /* agi header buffer */
355 xfs_agi_t *agi; /* agi header data */
356 xfs_agino_t agino; /* inode # in allocation group */ 351 xfs_agino_t agino; /* inode # in allocation group */
357 xfs_agnumber_t agno; /* allocation group number */ 352 xfs_agnumber_t agno; /* allocation group number */
358 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 353 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
@@ -403,7 +398,6 @@ xfs_bulkstat(
403 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 398 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
404 if (error) 399 if (error)
405 break; 400 break;
406 agi = XFS_BUF_TO_AGI(agbp);
407 /* 401 /*
408 * Allocate and initialize a btree cursor for ialloc btree. 402 * Allocate and initialize a btree cursor for ialloc btree.
409 */ 403 */
diff --git a/fs/xfs/xfs_linux.h b/fs/xfs/xfs_linux.h
index 6a51619d8690..c31d2c2eadc4 100644
--- a/fs/xfs/xfs_linux.h
+++ b/fs/xfs/xfs_linux.h
@@ -384,4 +384,10 @@ static inline __uint64_t howmany_64(__uint64_t x, __uint32_t y)
384#endif /* XFS_WARN */ 384#endif /* XFS_WARN */
385#endif /* DEBUG */ 385#endif /* DEBUG */
386 386
387#ifdef CONFIG_XFS_RT
388#define XFS_IS_REALTIME_INODE(ip) ((ip)->i_d.di_flags & XFS_DIFLAG_REALTIME)
389#else
390#define XFS_IS_REALTIME_INODE(ip) (0)
391#endif
392
387#endif /* __XFS_LINUX__ */ 393#endif /* __XFS_LINUX__ */
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index fe88ef67f93a..e408bf5a3ff7 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_error.h" 25#include "xfs_error.h"
28#include "xfs_trans.h" 26#include "xfs_trans.h"
@@ -1031,7 +1029,7 @@ xfs_log_need_covered(xfs_mount_t *mp)
1031 struct xlog *log = mp->m_log; 1029 struct xlog *log = mp->m_log;
1032 int needed = 0; 1030 int needed = 0;
1033 1031
1034 if (!xfs_fs_writable(mp)) 1032 if (!xfs_fs_writable(mp, SB_FREEZE_WRITE))
1035 return 0; 1033 return 0;
1036 1034
1037 if (!xlog_cil_empty(log)) 1035 if (!xlog_cil_empty(log))
@@ -1808,6 +1806,8 @@ xlog_sync(
1808 XFS_BUF_ZEROFLAGS(bp); 1806 XFS_BUF_ZEROFLAGS(bp);
1809 XFS_BUF_ASYNC(bp); 1807 XFS_BUF_ASYNC(bp);
1810 bp->b_flags |= XBF_SYNCIO; 1808 bp->b_flags |= XBF_SYNCIO;
1809 /* use high priority completion wq */
1810 bp->b_ioend_wq = log->l_mp->m_log_workqueue;
1811 1811
1812 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) { 1812 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) {
1813 bp->b_flags |= XBF_FUA; 1813 bp->b_flags |= XBF_FUA;
@@ -1856,6 +1856,8 @@ xlog_sync(
1856 bp->b_flags |= XBF_SYNCIO; 1856 bp->b_flags |= XBF_SYNCIO;
1857 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER) 1857 if (log->l_mp->m_flags & XFS_MOUNT_BARRIER)
1858 bp->b_flags |= XBF_FUA; 1858 bp->b_flags |= XBF_FUA;
1859 /* use high priority completion wq */
1860 bp->b_ioend_wq = log->l_mp->m_log_workqueue;
1859 1861
1860 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); 1862 ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
1861 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); 1863 ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index f506c457011e..45cc0ce18adf 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -17,11 +17,10 @@
17 17
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h" 19#include "xfs_fs.h"
20#include "xfs_format.h"
20#include "xfs_log_format.h" 21#include "xfs_log_format.h"
21#include "xfs_shared.h" 22#include "xfs_shared.h"
22#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 24#include "xfs_mount.h"
26#include "xfs_error.h" 25#include "xfs_error.h"
27#include "xfs_alloc.h" 26#include "xfs_alloc.h"
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 00cd7f3a8f59..a5a945fc3bdc 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -22,11 +22,10 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_inum.h"
26#include "xfs_sb.h" 25#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_da_format.h" 27#include "xfs_da_format.h"
28#include "xfs_da_btree.h"
30#include "xfs_inode.h" 29#include "xfs_inode.h"
31#include "xfs_trans.h" 30#include "xfs_trans.h"
32#include "xfs_log.h" 31#include "xfs_log.h"
@@ -42,7 +41,6 @@
42#include "xfs_trace.h" 41#include "xfs_trace.h"
43#include "xfs_icache.h" 42#include "xfs_icache.h"
44#include "xfs_bmap_btree.h" 43#include "xfs_bmap_btree.h"
45#include "xfs_dinode.h"
46#include "xfs_error.h" 44#include "xfs_error.h"
47#include "xfs_dir2.h" 45#include "xfs_dir2.h"
48 46
diff --git a/fs/xfs/xfs_message.c b/fs/xfs/xfs_message.c
index 63ca2f0420b1..d8b67547ab34 100644
--- a/fs/xfs/xfs_message.c
+++ b/fs/xfs/xfs_message.c
@@ -17,10 +17,9 @@
17 17
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h" 19#include "xfs_fs.h"
20#include "xfs_format.h"
20#include "xfs_log_format.h" 21#include "xfs_log_format.h"
21#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
22#include "xfs_sb.h"
23#include "xfs_ag.h"
24#include "xfs_mount.h" 23#include "xfs_mount.h"
25 24
26/* 25/*
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 51435dbce9c4..d3d38836f87f 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -22,11 +22,10 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_inum.h"
26#include "xfs_sb.h" 25#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_da_format.h" 27#include "xfs_da_format.h"
28#include "xfs_da_btree.h"
30#include "xfs_inode.h" 29#include "xfs_inode.h"
31#include "xfs_dir2.h" 30#include "xfs_dir2.h"
32#include "xfs_ialloc.h" 31#include "xfs_ialloc.h"
@@ -41,7 +40,6 @@
41#include "xfs_fsops.h" 40#include "xfs_fsops.h"
42#include "xfs_trace.h" 41#include "xfs_trace.h"
43#include "xfs_icache.h" 42#include "xfs_icache.h"
44#include "xfs_dinode.h"
45#include "xfs_sysfs.h" 43#include "xfs_sysfs.h"
46 44
47 45
@@ -1074,11 +1072,23 @@ xfs_unmountfs(
1074 xfs_sysfs_del(&mp->m_kobj); 1072 xfs_sysfs_del(&mp->m_kobj);
1075} 1073}
1076 1074
1077int 1075/*
1078xfs_fs_writable(xfs_mount_t *mp) 1076 * Determine whether modifications can proceed. The caller specifies the minimum
1077 * freeze level for which modifications should not be allowed. This allows
1078 * certain operations to proceed while the freeze sequence is in progress, if
1079 * necessary.
1080 */
1081bool
1082xfs_fs_writable(
1083 struct xfs_mount *mp,
1084 int level)
1079{ 1085{
1080 return !(mp->m_super->s_writers.frozen || XFS_FORCED_SHUTDOWN(mp) || 1086 ASSERT(level > SB_UNFROZEN);
1081 (mp->m_flags & XFS_MOUNT_RDONLY)); 1087 if ((mp->m_super->s_writers.frozen >= level) ||
1088 XFS_FORCED_SHUTDOWN(mp) || (mp->m_flags & XFS_MOUNT_RDONLY))
1089 return false;
1090
1091 return true;
1082} 1092}
1083 1093
1084/* 1094/*
@@ -1086,9 +1096,9 @@ xfs_fs_writable(xfs_mount_t *mp)
1086 * 1096 *
1087 * Sync the superblock counters to disk. 1097 * Sync the superblock counters to disk.
1088 * 1098 *
1089 * Note this code can be called during the process of freezing, so 1099 * Note this code can be called during the process of freezing, so we use the
1090 * we may need to use the transaction allocator which does not 1100 * transaction allocator that does not block when the transaction subsystem is
1091 * block when the transaction subsystem is in its frozen state. 1101 * in its frozen state.
1092 */ 1102 */
1093int 1103int
1094xfs_log_sbcount(xfs_mount_t *mp) 1104xfs_log_sbcount(xfs_mount_t *mp)
@@ -1096,7 +1106,8 @@ xfs_log_sbcount(xfs_mount_t *mp)
1096 xfs_trans_t *tp; 1106 xfs_trans_t *tp;
1097 int error; 1107 int error;
1098 1108
1099 if (!xfs_fs_writable(mp)) 1109 /* allow this to proceed during the freeze sequence... */
1110 if (!xfs_fs_writable(mp, SB_FREEZE_COMPLETE))
1100 return 0; 1111 return 0;
1101 1112
1102 xfs_icsb_sync_counters(mp, 0); 1113 xfs_icsb_sync_counters(mp, 0);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index b0447c86e7e2..22ccf69d4d3c 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -168,6 +168,7 @@ typedef struct xfs_mount {
168 /* low free space thresholds */ 168 /* low free space thresholds */
169 struct xfs_kobj m_kobj; 169 struct xfs_kobj m_kobj;
170 170
171 struct workqueue_struct *m_buf_workqueue;
171 struct workqueue_struct *m_data_workqueue; 172 struct workqueue_struct *m_data_workqueue;
172 struct workqueue_struct *m_unwritten_workqueue; 173 struct workqueue_struct *m_unwritten_workqueue;
173 struct workqueue_struct *m_cil_workqueue; 174 struct workqueue_struct *m_cil_workqueue;
@@ -320,10 +321,7 @@ typedef struct xfs_mod_sb {
320 321
321/* 322/*
322 * Per-ag incore structure, copies of information in agf and agi, to improve the 323 * Per-ag incore structure, copies of information in agf and agi, to improve the
323 * performance of allocation group selection. This is defined for the kernel 324 * performance of allocation group selection.
324 * only, and hence is defined here instead of in xfs_ag.h. You need the struct
325 * xfs_mount to be defined to look up a xfs_perag anyway (via mp->m_perag_tree),
326 * so this doesn't introduce any strange header file dependencies.
327 */ 325 */
328typedef struct xfs_perag { 326typedef struct xfs_perag {
329 struct xfs_mount *pag_mount; /* owner filesystem */ 327 struct xfs_mount *pag_mount; /* owner filesystem */
@@ -384,7 +382,7 @@ extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
384extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int); 382extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
385extern int xfs_readsb(xfs_mount_t *, int); 383extern int xfs_readsb(xfs_mount_t *, int);
386extern void xfs_freesb(xfs_mount_t *); 384extern void xfs_freesb(xfs_mount_t *);
387extern int xfs_fs_writable(xfs_mount_t *); 385extern bool xfs_fs_writable(struct xfs_mount *mp, int level);
388extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t); 386extern int xfs_sb_validate_fsb_count(struct xfs_sb *, __uint64_t);
389 387
390extern int xfs_dev_is_read_only(struct xfs_mount *, char *); 388extern int xfs_dev_is_read_only(struct xfs_mount *, char *);
diff --git a/fs/xfs/xfs_qm.c b/fs/xfs/xfs_qm.c
index d68f23021af3..79fb19dd9c83 100644
--- a/fs/xfs/xfs_qm.c
+++ b/fs/xfs/xfs_qm.c
@@ -23,7 +23,6 @@
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h" 25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 26#include "xfs_mount.h"
28#include "xfs_inode.h" 27#include "xfs_inode.h"
29#include "xfs_ialloc.h" 28#include "xfs_ialloc.h"
@@ -38,7 +37,6 @@
38#include "xfs_trace.h" 37#include "xfs_trace.h"
39#include "xfs_icache.h" 38#include "xfs_icache.h"
40#include "xfs_cksum.h" 39#include "xfs_cksum.h"
41#include "xfs_dinode.h"
42 40
43/* 41/*
44 * The global quota manager. There is only one of these for the entire 42 * The global quota manager. There is only one of these for the entire
@@ -1749,23 +1747,21 @@ xfs_qm_vop_dqalloc(
1749 xfs_iunlock(ip, lockflags); 1747 xfs_iunlock(ip, lockflags);
1750 if (O_udqpp) 1748 if (O_udqpp)
1751 *O_udqpp = uq; 1749 *O_udqpp = uq;
1752 else if (uq) 1750 else
1753 xfs_qm_dqrele(uq); 1751 xfs_qm_dqrele(uq);
1754 if (O_gdqpp) 1752 if (O_gdqpp)
1755 *O_gdqpp = gq; 1753 *O_gdqpp = gq;
1756 else if (gq) 1754 else
1757 xfs_qm_dqrele(gq); 1755 xfs_qm_dqrele(gq);
1758 if (O_pdqpp) 1756 if (O_pdqpp)
1759 *O_pdqpp = pq; 1757 *O_pdqpp = pq;
1760 else if (pq) 1758 else
1761 xfs_qm_dqrele(pq); 1759 xfs_qm_dqrele(pq);
1762 return 0; 1760 return 0;
1763 1761
1764error_rele: 1762error_rele:
1765 if (gq) 1763 xfs_qm_dqrele(gq);
1766 xfs_qm_dqrele(gq); 1764 xfs_qm_dqrele(uq);
1767 if (uq)
1768 xfs_qm_dqrele(uq);
1769 return error; 1765 return error;
1770} 1766}
1771 1767
diff --git a/fs/xfs/xfs_qm_bhv.c b/fs/xfs/xfs_qm_bhv.c
index 2c61e61b0205..3e52d5de7ae1 100644
--- a/fs/xfs/xfs_qm_bhv.c
+++ b/fs/xfs/xfs_qm_bhv.c
@@ -20,8 +20,6 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_quota.h" 23#include "xfs_quota.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
diff --git a/fs/xfs/xfs_qm_syscalls.c b/fs/xfs/xfs_qm_syscalls.c
index 80f2d77d929a..74fca68e43b6 100644
--- a/fs/xfs/xfs_qm_syscalls.c
+++ b/fs/xfs/xfs_qm_syscalls.c
@@ -26,7 +26,6 @@
26#include "xfs_trans_resv.h" 26#include "xfs_trans_resv.h"
27#include "xfs_bit.h" 27#include "xfs_bit.h"
28#include "xfs_sb.h" 28#include "xfs_sb.h"
29#include "xfs_ag.h"
30#include "xfs_mount.h" 29#include "xfs_mount.h"
31#include "xfs_inode.h" 30#include "xfs_inode.h"
32#include "xfs_trans.h" 31#include "xfs_trans.h"
@@ -784,19 +783,21 @@ xfs_qm_log_quotaoff(
784{ 783{
785 xfs_trans_t *tp; 784 xfs_trans_t *tp;
786 int error; 785 int error;
787 xfs_qoff_logitem_t *qoffi=NULL; 786 xfs_qoff_logitem_t *qoffi;
788 uint oldsbqflag=0; 787
788 *qoffstartp = NULL;
789 789
790 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); 790 tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
791 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_quotaoff, 0, 0); 791 error = xfs_trans_reserve(tp, &M_RES(mp)->tr_qm_quotaoff, 0, 0);
792 if (error) 792 if (error) {
793 goto error0; 793 xfs_trans_cancel(tp, 0);
794 goto out;
795 }
794 796
795 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); 797 qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
796 xfs_trans_log_quotaoff_item(tp, qoffi); 798 xfs_trans_log_quotaoff_item(tp, qoffi);
797 799
798 spin_lock(&mp->m_sb_lock); 800 spin_lock(&mp->m_sb_lock);
799 oldsbqflag = mp->m_sb.sb_qflags;
800 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL; 801 mp->m_sb.sb_qflags = (mp->m_qflags & ~(flags)) & XFS_MOUNT_QUOTA_ALL;
801 spin_unlock(&mp->m_sb_lock); 802 spin_unlock(&mp->m_sb_lock);
802 803
@@ -809,19 +810,11 @@ xfs_qm_log_quotaoff(
809 */ 810 */
810 xfs_trans_set_sync(tp); 811 xfs_trans_set_sync(tp);
811 error = xfs_trans_commit(tp, 0); 812 error = xfs_trans_commit(tp, 0);
813 if (error)
814 goto out;
812 815
813error0:
814 if (error) {
815 xfs_trans_cancel(tp, 0);
816 /*
817 * No one else is modifying sb_qflags, so this is OK.
818 * We still hold the quotaofflock.
819 */
820 spin_lock(&mp->m_sb_lock);
821 mp->m_sb.sb_qflags = oldsbqflag;
822 spin_unlock(&mp->m_sb_lock);
823 }
824 *qoffstartp = qoffi; 816 *qoffstartp = qoffi;
817out:
825 return error; 818 return error;
826} 819}
827 820
diff --git a/fs/xfs/xfs_quotaops.c b/fs/xfs/xfs_quotaops.c
index b238027df987..7542bbeca6a1 100644
--- a/fs/xfs/xfs_quotaops.c
+++ b/fs/xfs/xfs_quotaops.c
@@ -19,8 +19,6 @@
19#include "xfs_format.h" 19#include "xfs_format.h"
20#include "xfs_log_format.h" 20#include "xfs_log_format.h"
21#include "xfs_trans_resv.h" 21#include "xfs_trans_resv.h"
22#include "xfs_sb.h"
23#include "xfs_ag.h"
24#include "xfs_mount.h" 22#include "xfs_mount.h"
25#include "xfs_inode.h" 23#include "xfs_inode.h"
26#include "xfs_quota.h" 24#include "xfs_quota.h"
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index e1175ea9b551..f2079b6911cc 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -22,8 +22,6 @@
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_bit.h" 24#include "xfs_bit.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
29#include "xfs_bmap.h" 27#include "xfs_bmap.h"
@@ -36,7 +34,6 @@
36#include "xfs_trace.h" 34#include "xfs_trace.h"
37#include "xfs_buf.h" 35#include "xfs_buf.h"
38#include "xfs_icache.h" 36#include "xfs_icache.h"
39#include "xfs_dinode.h"
40#include "xfs_rtalloc.h" 37#include "xfs_rtalloc.h"
41 38
42 39
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 206b97fd1d8a..19cbda196369 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -21,9 +21,7 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_inum.h"
25#include "xfs_sb.h" 24#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_da_format.h" 26#include "xfs_da_format.h"
29#include "xfs_inode.h" 27#include "xfs_inode.h"
@@ -44,7 +42,6 @@
44#include "xfs_icache.h" 42#include "xfs_icache.h"
45#include "xfs_trace.h" 43#include "xfs_trace.h"
46#include "xfs_icreate_item.h" 44#include "xfs_icreate_item.h"
47#include "xfs_dinode.h"
48#include "xfs_filestream.h" 45#include "xfs_filestream.h"
49#include "xfs_quota.h" 46#include "xfs_quota.h"
50#include "xfs_sysfs.h" 47#include "xfs_sysfs.h"
@@ -796,8 +793,7 @@ xfs_open_devices(
796 out_free_ddev_targ: 793 out_free_ddev_targ:
797 xfs_free_buftarg(mp, mp->m_ddev_targp); 794 xfs_free_buftarg(mp, mp->m_ddev_targp);
798 out_close_rtdev: 795 out_close_rtdev:
799 if (rtdev) 796 xfs_blkdev_put(rtdev);
800 xfs_blkdev_put(rtdev);
801 out_close_logdev: 797 out_close_logdev:
802 if (logdev && logdev != ddev) 798 if (logdev && logdev != ddev)
803 xfs_blkdev_put(logdev); 799 xfs_blkdev_put(logdev);
@@ -842,10 +838,15 @@ STATIC int
842xfs_init_mount_workqueues( 838xfs_init_mount_workqueues(
843 struct xfs_mount *mp) 839 struct xfs_mount *mp)
844{ 840{
841 mp->m_buf_workqueue = alloc_workqueue("xfs-buf/%s",
842 WQ_MEM_RECLAIM|WQ_FREEZABLE, 1, mp->m_fsname);
843 if (!mp->m_buf_workqueue)
844 goto out;
845
845 mp->m_data_workqueue = alloc_workqueue("xfs-data/%s", 846 mp->m_data_workqueue = alloc_workqueue("xfs-data/%s",
846 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname); 847 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
847 if (!mp->m_data_workqueue) 848 if (!mp->m_data_workqueue)
848 goto out; 849 goto out_destroy_buf;
849 850
850 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s", 851 mp->m_unwritten_workqueue = alloc_workqueue("xfs-conv/%s",
851 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname); 852 WQ_MEM_RECLAIM|WQ_FREEZABLE, 0, mp->m_fsname);
@@ -863,7 +864,7 @@ xfs_init_mount_workqueues(
863 goto out_destroy_cil; 864 goto out_destroy_cil;
864 865
865 mp->m_log_workqueue = alloc_workqueue("xfs-log/%s", 866 mp->m_log_workqueue = alloc_workqueue("xfs-log/%s",
866 WQ_FREEZABLE, 0, mp->m_fsname); 867 WQ_FREEZABLE|WQ_HIGHPRI, 0, mp->m_fsname);
867 if (!mp->m_log_workqueue) 868 if (!mp->m_log_workqueue)
868 goto out_destroy_reclaim; 869 goto out_destroy_reclaim;
869 870
@@ -884,6 +885,8 @@ out_destroy_unwritten:
884 destroy_workqueue(mp->m_unwritten_workqueue); 885 destroy_workqueue(mp->m_unwritten_workqueue);
885out_destroy_data_iodone_queue: 886out_destroy_data_iodone_queue:
886 destroy_workqueue(mp->m_data_workqueue); 887 destroy_workqueue(mp->m_data_workqueue);
888out_destroy_buf:
889 destroy_workqueue(mp->m_buf_workqueue);
887out: 890out:
888 return -ENOMEM; 891 return -ENOMEM;
889} 892}
@@ -898,6 +901,7 @@ xfs_destroy_mount_workqueues(
898 destroy_workqueue(mp->m_cil_workqueue); 901 destroy_workqueue(mp->m_cil_workqueue);
899 destroy_workqueue(mp->m_data_workqueue); 902 destroy_workqueue(mp->m_data_workqueue);
900 destroy_workqueue(mp->m_unwritten_workqueue); 903 destroy_workqueue(mp->m_unwritten_workqueue);
904 destroy_workqueue(mp->m_buf_workqueue);
901} 905}
902 906
903/* 907/*
@@ -1000,7 +1004,6 @@ xfs_fs_evict_inode(
1000 clear_inode(inode); 1004 clear_inode(inode);
1001 XFS_STATS_INC(vn_rele); 1005 XFS_STATS_INC(vn_rele);
1002 XFS_STATS_INC(vn_remove); 1006 XFS_STATS_INC(vn_remove);
1003 XFS_STATS_DEC(vn_active);
1004 1007
1005 xfs_inactive(ip); 1008 xfs_inactive(ip);
1006} 1009}
diff --git a/fs/xfs/xfs_symlink.c b/fs/xfs/xfs_symlink.c
index 02ae62a998e0..25791df6f638 100644
--- a/fs/xfs/xfs_symlink.c
+++ b/fs/xfs/xfs_symlink.c
@@ -23,8 +23,6 @@
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_bit.h" 25#include "xfs_bit.h"
26#include "xfs_sb.h"
27#include "xfs_ag.h"
28#include "xfs_mount.h" 26#include "xfs_mount.h"
29#include "xfs_da_format.h" 27#include "xfs_da_format.h"
30#include "xfs_da_btree.h" 28#include "xfs_da_btree.h"
@@ -42,7 +40,6 @@
42#include "xfs_symlink.h" 40#include "xfs_symlink.h"
43#include "xfs_trans.h" 41#include "xfs_trans.h"
44#include "xfs_log.h" 42#include "xfs_log.h"
45#include "xfs_dinode.h"
46 43
47/* ----- Kernel only functions below ----- */ 44/* ----- Kernel only functions below ----- */
48STATIC int 45STATIC int
diff --git a/fs/xfs/xfs_trace.c b/fs/xfs/xfs_trace.c
index 1e85bcd0e418..13a029806805 100644
--- a/fs/xfs/xfs_trace.c
+++ b/fs/xfs/xfs_trace.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_da_format.h" 25#include "xfs_da_format.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 30e8e3410955..fa3135b9bf04 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -22,8 +22,6 @@
22#include "xfs_format.h" 22#include "xfs_format.h"
23#include "xfs_log_format.h" 23#include "xfs_log_format.h"
24#include "xfs_trans_resv.h" 24#include "xfs_trans_resv.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h" 25#include "xfs_mount.h"
28#include "xfs_inode.h" 26#include "xfs_inode.h"
29#include "xfs_extent_busy.h" 27#include "xfs_extent_busy.h"
diff --git a/fs/xfs/xfs_trans_ail.c b/fs/xfs/xfs_trans_ail.c
index 859482f53b5a..573aefb5a573 100644
--- a/fs/xfs/xfs_trans_ail.c
+++ b/fs/xfs/xfs_trans_ail.c
@@ -18,10 +18,9 @@
18 */ 18 */
19#include "xfs.h" 19#include "xfs.h"
20#include "xfs_fs.h" 20#include "xfs_fs.h"
21#include "xfs_format.h"
21#include "xfs_log_format.h" 22#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 24#include "xfs_mount.h"
26#include "xfs_trans.h" 25#include "xfs_trans.h"
27#include "xfs_trans_priv.h" 26#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_trans_buf.c b/fs/xfs/xfs_trans_buf.c
index e2b2216b1635..0a4d4ab6d9a9 100644
--- a/fs/xfs/xfs_trans_buf.c
+++ b/fs/xfs/xfs_trans_buf.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
28#include "xfs_trans.h" 26#include "xfs_trans.h"
@@ -229,13 +227,6 @@ xfs_trans_getsb(xfs_trans_t *tp,
229 return bp; 227 return bp;
230} 228}
231 229
232#ifdef DEBUG
233xfs_buftarg_t *xfs_error_target;
234int xfs_do_error;
235int xfs_req_num;
236int xfs_error_mod = 33;
237#endif
238
239/* 230/*
240 * Get and lock the buffer for the caller if it is not already 231 * Get and lock the buffer for the caller if it is not already
241 * locked within the given transaction. If it has not yet been 232 * locked within the given transaction. If it has not yet been
@@ -257,46 +248,11 @@ xfs_trans_read_buf_map(
257 struct xfs_buf **bpp, 248 struct xfs_buf **bpp,
258 const struct xfs_buf_ops *ops) 249 const struct xfs_buf_ops *ops)
259{ 250{
260 xfs_buf_t *bp; 251 struct xfs_buf *bp = NULL;
261 xfs_buf_log_item_t *bip; 252 struct xfs_buf_log_item *bip;
262 int error; 253 int error;
263 254
264 *bpp = NULL; 255 *bpp = NULL;
265 if (!tp) {
266 bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
267 if (!bp)
268 return (flags & XBF_TRYLOCK) ?
269 -EAGAIN : -ENOMEM;
270
271 if (bp->b_error) {
272 error = bp->b_error;
273 xfs_buf_ioerror_alert(bp, __func__);
274 XFS_BUF_UNDONE(bp);
275 xfs_buf_stale(bp);
276 xfs_buf_relse(bp);
277
278 /* bad CRC means corrupted metadata */
279 if (error == -EFSBADCRC)
280 error = -EFSCORRUPTED;
281 return error;
282 }
283#ifdef DEBUG
284 if (xfs_do_error) {
285 if (xfs_error_target == target) {
286 if (((xfs_req_num++) % xfs_error_mod) == 0) {
287 xfs_buf_relse(bp);
288 xfs_debug(mp, "Returning error!");
289 return -EIO;
290 }
291 }
292 }
293#endif
294 if (XFS_FORCED_SHUTDOWN(mp))
295 goto shutdown_abort;
296 *bpp = bp;
297 return 0;
298 }
299
300 /* 256 /*
301 * If we find the buffer in the cache with this transaction 257 * If we find the buffer in the cache with this transaction
302 * pointer in its b_fsprivate2 field, then we know we already 258 * pointer in its b_fsprivate2 field, then we know we already
@@ -305,49 +261,24 @@ xfs_trans_read_buf_map(
305 * If the buffer is not yet read in, then we read it in, increment 261 * If the buffer is not yet read in, then we read it in, increment
306 * the lock recursion count, and return it to the caller. 262 * the lock recursion count, and return it to the caller.
307 */ 263 */
308 bp = xfs_trans_buf_item_match(tp, target, map, nmaps); 264 if (tp)
309 if (bp != NULL) { 265 bp = xfs_trans_buf_item_match(tp, target, map, nmaps);
266 if (bp) {
310 ASSERT(xfs_buf_islocked(bp)); 267 ASSERT(xfs_buf_islocked(bp));
311 ASSERT(bp->b_transp == tp); 268 ASSERT(bp->b_transp == tp);
312 ASSERT(bp->b_fspriv != NULL); 269 ASSERT(bp->b_fspriv != NULL);
313 ASSERT(!bp->b_error); 270 ASSERT(!bp->b_error);
314 if (!(XFS_BUF_ISDONE(bp))) { 271 ASSERT(bp->b_flags & XBF_DONE);
315 trace_xfs_trans_read_buf_io(bp, _RET_IP_); 272
316 ASSERT(!XFS_BUF_ISASYNC(bp));
317 ASSERT(bp->b_iodone == NULL);
318 XFS_BUF_READ(bp);
319 bp->b_ops = ops;
320
321 error = xfs_buf_submit_wait(bp);
322 if (error) {
323 if (!XFS_FORCED_SHUTDOWN(mp))
324 xfs_buf_ioerror_alert(bp, __func__);
325 xfs_buf_relse(bp);
326 /*
327 * We can gracefully recover from most read
328 * errors. Ones we can't are those that happen
329 * after the transaction's already dirty.
330 */
331 if (tp->t_flags & XFS_TRANS_DIRTY)
332 xfs_force_shutdown(tp->t_mountp,
333 SHUTDOWN_META_IO_ERROR);
334 /* bad CRC means corrupted metadata */
335 if (error == -EFSBADCRC)
336 error = -EFSCORRUPTED;
337 return error;
338 }
339 }
340 /* 273 /*
341 * We never locked this buf ourselves, so we shouldn't 274 * We never locked this buf ourselves, so we shouldn't
342 * brelse it either. Just get out. 275 * brelse it either. Just get out.
343 */ 276 */
344 if (XFS_FORCED_SHUTDOWN(mp)) { 277 if (XFS_FORCED_SHUTDOWN(mp)) {
345 trace_xfs_trans_read_buf_shut(bp, _RET_IP_); 278 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
346 *bpp = NULL;
347 return -EIO; 279 return -EIO;
348 } 280 }
349 281
350
351 bip = bp->b_fspriv; 282 bip = bp->b_fspriv;
352 bip->bli_recur++; 283 bip->bli_recur++;
353 284
@@ -358,17 +289,29 @@ xfs_trans_read_buf_map(
358 } 289 }
359 290
360 bp = xfs_buf_read_map(target, map, nmaps, flags, ops); 291 bp = xfs_buf_read_map(target, map, nmaps, flags, ops);
361 if (bp == NULL) { 292 if (!bp) {
362 *bpp = NULL; 293 if (!(flags & XBF_TRYLOCK))
363 return (flags & XBF_TRYLOCK) ? 294 return -ENOMEM;
364 0 : -ENOMEM; 295 return tp ? 0 : -EAGAIN;
365 } 296 }
297
298 /*
299 * If we've had a read error, then the contents of the buffer are
300 * invalid and should not be used. To ensure that a followup read tries
301 * to pull the buffer from disk again, we clear the XBF_DONE flag and
302 * mark the buffer stale. This ensures that anyone who has a current
303 * reference to the buffer will interpret it's contents correctly and
304 * future cache lookups will also treat it as an empty, uninitialised
305 * buffer.
306 */
366 if (bp->b_error) { 307 if (bp->b_error) {
367 error = bp->b_error; 308 error = bp->b_error;
309 if (!XFS_FORCED_SHUTDOWN(mp))
310 xfs_buf_ioerror_alert(bp, __func__);
311 bp->b_flags &= ~XBF_DONE;
368 xfs_buf_stale(bp); 312 xfs_buf_stale(bp);
369 XFS_BUF_DONE(bp); 313
370 xfs_buf_ioerror_alert(bp, __func__); 314 if (tp && (tp->t_flags & XFS_TRANS_DIRTY))
371 if (tp->t_flags & XFS_TRANS_DIRTY)
372 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR); 315 xfs_force_shutdown(tp->t_mountp, SHUTDOWN_META_IO_ERROR);
373 xfs_buf_relse(bp); 316 xfs_buf_relse(bp);
374 317
@@ -377,33 +320,19 @@ xfs_trans_read_buf_map(
377 error = -EFSCORRUPTED; 320 error = -EFSCORRUPTED;
378 return error; 321 return error;
379 } 322 }
380#ifdef DEBUG 323
381 if (xfs_do_error && !(tp->t_flags & XFS_TRANS_DIRTY)) { 324 if (XFS_FORCED_SHUTDOWN(mp)) {
382 if (xfs_error_target == target) { 325 xfs_buf_relse(bp);
383 if (((xfs_req_num++) % xfs_error_mod) == 0) { 326 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
384 xfs_force_shutdown(tp->t_mountp, 327 return -EIO;
385 SHUTDOWN_META_IO_ERROR);
386 xfs_buf_relse(bp);
387 xfs_debug(mp, "Returning trans error!");
388 return -EIO;
389 }
390 }
391 } 328 }
392#endif
393 if (XFS_FORCED_SHUTDOWN(mp))
394 goto shutdown_abort;
395 329
396 _xfs_trans_bjoin(tp, bp, 1); 330 if (tp)
331 _xfs_trans_bjoin(tp, bp, 1);
397 trace_xfs_trans_read_buf(bp->b_fspriv); 332 trace_xfs_trans_read_buf(bp->b_fspriv);
398
399 *bpp = bp; 333 *bpp = bp;
400 return 0; 334 return 0;
401 335
402shutdown_abort:
403 trace_xfs_trans_read_buf_shut(bp, _RET_IP_);
404 xfs_buf_relse(bp);
405 *bpp = NULL;
406 return -EIO;
407} 336}
408 337
409/* 338/*
diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
index 846e061c2e98..76a16df55ef7 100644
--- a/fs/xfs/xfs_trans_dquot.c
+++ b/fs/xfs/xfs_trans_dquot.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
28#include "xfs_error.h" 26#include "xfs_error.h"
diff --git a/fs/xfs/xfs_trans_extfree.c b/fs/xfs/xfs_trans_extfree.c
index 47978ba89dae..284397dd7990 100644
--- a/fs/xfs/xfs_trans_extfree.c
+++ b/fs/xfs/xfs_trans_extfree.c
@@ -18,10 +18,9 @@
18#include "xfs.h" 18#include "xfs.h"
19#include "xfs_fs.h" 19#include "xfs_fs.h"
20#include "xfs_shared.h" 20#include "xfs_shared.h"
21#include "xfs_format.h"
21#include "xfs_log_format.h" 22#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 24#include "xfs_mount.h"
26#include "xfs_trans.h" 25#include "xfs_trans.h"
27#include "xfs_trans_priv.h" 26#include "xfs_trans_priv.h"
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index cdb4d86520e1..17280cd71934 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -21,8 +21,6 @@
21#include "xfs_format.h" 21#include "xfs_format.h"
22#include "xfs_log_format.h" 22#include "xfs_log_format.h"
23#include "xfs_trans_resv.h" 23#include "xfs_trans_resv.h"
24#include "xfs_sb.h"
25#include "xfs_ag.h"
26#include "xfs_mount.h" 24#include "xfs_mount.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
28#include "xfs_trans.h" 26#include "xfs_trans.h"
diff --git a/fs/xfs/xfs_xattr.c b/fs/xfs/xfs_xattr.c
index 93455b998041..69f6e475de97 100644
--- a/fs/xfs/xfs_xattr.c
+++ b/fs/xfs/xfs_xattr.c
@@ -20,8 +20,6 @@
20#include "xfs_format.h" 20#include "xfs_format.h"
21#include "xfs_log_format.h" 21#include "xfs_log_format.h"
22#include "xfs_trans_resv.h" 22#include "xfs_trans_resv.h"
23#include "xfs_sb.h"
24#include "xfs_ag.h"
25#include "xfs_mount.h" 23#include "xfs_mount.h"
26#include "xfs_da_format.h" 24#include "xfs_da_format.h"
27#include "xfs_inode.h" 25#include "xfs_inode.h"
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 752e30d63904..177d5973b132 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -103,6 +103,17 @@ static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
103#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 103#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
104#endif 104#endif
105 105
106#ifndef __HAVE_ARCH_PMDP_GET_AND_CLEAR_FULL
107#ifdef CONFIG_TRANSPARENT_HUGEPAGE
108static inline pmd_t pmdp_get_and_clear_full(struct mm_struct *mm,
109 unsigned long address, pmd_t *pmdp,
110 int full)
111{
112 return pmdp_get_and_clear(mm, address, pmdp);
113}
114#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
115#endif
116
106#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL 117#ifndef __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
107static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, 118static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
108 unsigned long address, pte_t *ptep, 119 unsigned long address, pte_t *ptep,
diff --git a/include/dt-bindings/dma/at91.h b/include/dt-bindings/dma/at91.h
index e835037a77b4..ab6cbba45401 100644
--- a/include/dt-bindings/dma/at91.h
+++ b/include/dt-bindings/dma/at91.h
@@ -9,6 +9,8 @@
9#ifndef __DT_BINDINGS_AT91_DMA_H__ 9#ifndef __DT_BINDINGS_AT91_DMA_H__
10#define __DT_BINDINGS_AT91_DMA_H__ 10#define __DT_BINDINGS_AT91_DMA_H__
11 11
12/* ---------- HDMAC ---------- */
13
12/* 14/*
13 * Source and/or destination peripheral ID 15 * Source and/or destination peripheral ID
14 */ 16 */
@@ -24,4 +26,27 @@
24#define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */ 26#define AT91_DMA_CFG_FIFOCFG_ALAP (0x1 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* largest defined AHB burst */
25#define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */ 27#define AT91_DMA_CFG_FIFOCFG_ASAP (0x2 << AT91_DMA_CFG_FIFOCFG_OFFSET) /* single AHB access */
26 28
29
30/* ---------- XDMAC ---------- */
31#define AT91_XDMAC_DT_MEM_IF_MASK (0x1)
32#define AT91_XDMAC_DT_MEM_IF_OFFSET (13)
33#define AT91_XDMAC_DT_MEM_IF(mem_if) (((mem_if) & AT91_XDMAC_DT_MEM_IF_MASK) \
34 << AT91_XDMAC_DT_MEM_IF_OFFSET)
35#define AT91_XDMAC_DT_GET_MEM_IF(cfg) (((cfg) >> AT91_XDMAC_DT_MEM_IF_OFFSET) \
36 & AT91_XDMAC_DT_MEM_IF_MASK)
37
38#define AT91_XDMAC_DT_PER_IF_MASK (0x1)
39#define AT91_XDMAC_DT_PER_IF_OFFSET (14)
40#define AT91_XDMAC_DT_PER_IF(per_if) (((per_if) & AT91_XDMAC_DT_PER_IF_MASK) \
41 << AT91_XDMAC_DT_PER_IF_OFFSET)
42#define AT91_XDMAC_DT_GET_PER_IF(cfg) (((cfg) >> AT91_XDMAC_DT_PER_IF_OFFSET) \
43 & AT91_XDMAC_DT_PER_IF_MASK)
44
45#define AT91_XDMAC_DT_PERID_MASK (0x7f)
46#define AT91_XDMAC_DT_PERID_OFFSET (24)
47#define AT91_XDMAC_DT_PERID(perid) (((perid) & AT91_XDMAC_DT_PERID_MASK) \
48 << AT91_XDMAC_DT_PERID_OFFSET)
49#define AT91_XDMAC_DT_GET_PERID(cfg) (((cfg) >> AT91_XDMAC_DT_PERID_OFFSET) \
50 & AT91_XDMAC_DT_PERID_MASK)
51
27#endif /* __DT_BINDINGS_AT91_DMA_H__ */ 52#endif /* __DT_BINDINGS_AT91_DMA_H__ */
diff --git a/include/dt-bindings/interrupt-controller/mips-gic.h b/include/dt-bindings/interrupt-controller/mips-gic.h
new file mode 100644
index 000000000000..cf35a577e371
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/mips-gic.h
@@ -0,0 +1,9 @@
1#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H
2#define _DT_BINDINGS_INTERRUPT_CONTROLLER_MIPS_GIC_H
3
4#include <dt-bindings/interrupt-controller/irq.h>
5
6#define GIC_SHARED 0
7#define GIC_LOCAL 1
8
9#endif
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index c324f5700d1a..ac02f9bd63dc 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -97,6 +97,16 @@ void amba_release_regions(struct amba_device *);
97#define amba_pclk_disable(d) \ 97#define amba_pclk_disable(d) \
98 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0) 98 do { if (!IS_ERR((d)->pclk)) clk_disable((d)->pclk); } while (0)
99 99
100static inline int amba_pclk_prepare(struct amba_device *dev)
101{
102 return clk_prepare(dev->pclk);
103}
104
105static inline void amba_pclk_unprepare(struct amba_device *dev)
106{
107 clk_unprepare(dev->pclk);
108}
109
100/* Some drivers don't use the struct amba_device */ 110/* Some drivers don't use the struct amba_device */
101#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff) 111#define AMBA_CONFIG_BITS(a) (((a) >> 24) & 0xff)
102#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f) 112#define AMBA_REV_BITS(a) (((a) >> 20) & 0x0f)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 641e56494a92..da0dae0600e6 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -638,8 +638,10 @@ struct cgroup_subsys {
638 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css); 638 struct cgroup_subsys_state *(*css_alloc)(struct cgroup_subsys_state *parent_css);
639 int (*css_online)(struct cgroup_subsys_state *css); 639 int (*css_online)(struct cgroup_subsys_state *css);
640 void (*css_offline)(struct cgroup_subsys_state *css); 640 void (*css_offline)(struct cgroup_subsys_state *css);
641 void (*css_released)(struct cgroup_subsys_state *css);
641 void (*css_free)(struct cgroup_subsys_state *css); 642 void (*css_free)(struct cgroup_subsys_state *css);
642 void (*css_reset)(struct cgroup_subsys_state *css); 643 void (*css_reset)(struct cgroup_subsys_state *css);
644 void (*css_e_css_changed)(struct cgroup_subsys_state *css);
643 645
644 int (*can_attach)(struct cgroup_subsys_state *css, 646 int (*can_attach)(struct cgroup_subsys_state *css,
645 struct cgroup_taskset *tset); 647 struct cgroup_taskset *tset);
@@ -934,6 +936,8 @@ void css_task_iter_end(struct css_task_iter *it);
934int cgroup_attach_task_all(struct task_struct *from, struct task_struct *); 936int cgroup_attach_task_all(struct task_struct *from, struct task_struct *);
935int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from); 937int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from);
936 938
939struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgroup,
940 struct cgroup_subsys *ss);
937struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry, 941struct cgroup_subsys_state *css_tryget_online_from_dir(struct dentry *dentry,
938 struct cgroup_subsys *ss); 942 struct cgroup_subsys *ss);
939 943
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 2f073db7392e..1b357997cac5 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -48,29 +48,16 @@ extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
48void cpuset_init_current_mems_allowed(void); 48void cpuset_init_current_mems_allowed(void);
49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask); 49int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
50 50
51extern int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask); 51extern int __cpuset_node_allowed(int node, gfp_t gfp_mask);
52extern int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask);
53 52
54static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 53static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
55{ 54{
56 return nr_cpusets() <= 1 || 55 return nr_cpusets() <= 1 || __cpuset_node_allowed(node, gfp_mask);
57 __cpuset_node_allowed_softwall(node, gfp_mask);
58} 56}
59 57
60static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask) 58static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
61{ 59{
62 return nr_cpusets() <= 1 || 60 return cpuset_node_allowed(zone_to_nid(z), gfp_mask);
63 __cpuset_node_allowed_hardwall(node, gfp_mask);
64}
65
66static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
67{
68 return cpuset_node_allowed_softwall(zone_to_nid(z), gfp_mask);
69}
70
71static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask)
72{
73 return cpuset_node_allowed_hardwall(zone_to_nid(z), gfp_mask);
74} 61}
75 62
76extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1, 63extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
@@ -179,22 +166,12 @@ static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
179 return 1; 166 return 1;
180} 167}
181 168
182static inline int cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 169static inline int cpuset_node_allowed(int node, gfp_t gfp_mask)
183{
184 return 1;
185}
186
187static inline int cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
188{
189 return 1;
190}
191
192static inline int cpuset_zone_allowed_softwall(struct zone *z, gfp_t gfp_mask)
193{ 170{
194 return 1; 171 return 1;
195} 172}
196 173
197static inline int cpuset_zone_allowed_hardwall(struct zone *z, gfp_t gfp_mask) 174static inline int cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
198{ 175{
199 return 1; 176 return 1;
200} 177}
diff --git a/include/linux/device.h b/include/linux/device.h
index ce1f21608b16..41d6a7555c6b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -911,6 +911,11 @@ static inline void device_unlock(struct device *dev)
911 mutex_unlock(&dev->mutex); 911 mutex_unlock(&dev->mutex);
912} 912}
913 913
914static inline void device_lock_assert(struct device *dev)
915{
916 lockdep_assert_held(&dev->mutex);
917}
918
914void driver_init(void); 919void driver_init(void);
915 920
916/* 921/*
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 653a1fd07ae8..40cd75e21ea2 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -447,7 +447,8 @@ struct dmaengine_unmap_data {
447 * communicate status 447 * communicate status
448 * @phys: physical address of the descriptor 448 * @phys: physical address of the descriptor
449 * @chan: target channel for this operation 449 * @chan: target channel for this operation
450 * @tx_submit: set the prepared descriptor(s) to be executed by the engine 450 * @tx_submit: accept the descriptor, assign ordered cookie and mark the
451 * descriptor pending. To be pushed on .issue_pending() call
451 * @callback: routine to call after this operation is complete 452 * @callback: routine to call after this operation is complete
452 * @callback_param: general parameter to pass to the callback routine 453 * @callback_param: general parameter to pass to the callback routine
453 * ---async_tx api specific fields--- 454 * ---async_tx api specific fields---
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index 593fff99e6bf..30624954dec5 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -30,6 +30,12 @@
30 30
31struct acpi_dmar_header; 31struct acpi_dmar_header;
32 32
33#ifdef CONFIG_X86
34# define DMAR_UNITS_SUPPORTED MAX_IO_APICS
35#else
36# define DMAR_UNITS_SUPPORTED 64
37#endif
38
33/* DMAR Flags */ 39/* DMAR Flags */
34#define DMAR_INTR_REMAP 0x1 40#define DMAR_INTR_REMAP 0x1
35#define DMAR_X2APIC_OPT_OUT 0x2 41#define DMAR_X2APIC_OPT_OUT 0x2
@@ -120,28 +126,60 @@ extern int dmar_remove_dev_scope(struct dmar_pci_notify_info *info,
120/* Intel IOMMU detection */ 126/* Intel IOMMU detection */
121extern int detect_intel_iommu(void); 127extern int detect_intel_iommu(void);
122extern int enable_drhd_fault_handling(void); 128extern int enable_drhd_fault_handling(void);
129extern int dmar_device_add(acpi_handle handle);
130extern int dmar_device_remove(acpi_handle handle);
131
132static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
133{
134 return 0;
135}
123 136
124#ifdef CONFIG_INTEL_IOMMU 137#ifdef CONFIG_INTEL_IOMMU
125extern int iommu_detected, no_iommu; 138extern int iommu_detected, no_iommu;
126extern int intel_iommu_init(void); 139extern int intel_iommu_init(void);
127extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header); 140extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
128extern int dmar_parse_one_atsr(struct acpi_dmar_header *header); 141extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
142extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
143extern int dmar_release_one_atsr(struct acpi_dmar_header *hdr, void *arg);
144extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
129extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info); 145extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
130#else /* !CONFIG_INTEL_IOMMU: */ 146#else /* !CONFIG_INTEL_IOMMU: */
131static inline int intel_iommu_init(void) { return -ENODEV; } 147static inline int intel_iommu_init(void) { return -ENODEV; }
132static inline int dmar_parse_one_rmrr(struct acpi_dmar_header *header) 148
149#define dmar_parse_one_rmrr dmar_res_noop
150#define dmar_parse_one_atsr dmar_res_noop
151#define dmar_check_one_atsr dmar_res_noop
152#define dmar_release_one_atsr dmar_res_noop
153
154static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
133{ 155{
134 return 0; 156 return 0;
135} 157}
136static inline int dmar_parse_one_atsr(struct acpi_dmar_header *header) 158
159static inline int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
137{ 160{
138 return 0; 161 return 0;
139} 162}
140static inline int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info) 163#endif /* CONFIG_INTEL_IOMMU */
164
165#ifdef CONFIG_IRQ_REMAP
166extern int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
167#else /* CONFIG_IRQ_REMAP */
168static inline int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
169{ return 0; }
170#endif /* CONFIG_IRQ_REMAP */
171
172#else /* CONFIG_DMAR_TABLE */
173
174static inline int dmar_device_add(void *handle)
175{
176 return 0;
177}
178
179static inline int dmar_device_remove(void *handle)
141{ 180{
142 return 0; 181 return 0;
143} 182}
144#endif /* CONFIG_INTEL_IOMMU */
145 183
146#endif /* CONFIG_DMAR_TABLE */ 184#endif /* CONFIG_DMAR_TABLE */
147 185
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 67a5fa7830c4..20fa8d8ae313 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -15,6 +15,11 @@
15 set_personality(PER_LINUX | (current->personality & (~PER_MASK))) 15 set_personality(PER_LINUX | (current->personality & (~PER_MASK)))
16#endif 16#endif
17 17
18#ifndef SET_PERSONALITY2
19#define SET_PERSONALITY2(ex, state) \
20 SET_PERSONALITY(ex)
21#endif
22
18#if ELF_CLASS == ELFCLASS32 23#if ELF_CLASS == ELFCLASS32
19 24
20extern Elf32_Dyn _DYNAMIC []; 25extern Elf32_Dyn _DYNAMIC [];
diff --git a/include/linux/fence.h b/include/linux/fence.h
index d174585b874b..39efee130d2b 100644
--- a/include/linux/fence.h
+++ b/include/linux/fence.h
@@ -128,8 +128,8 @@ struct fence_cb {
128 * from irq context, so normal spinlocks can be used. 128 * from irq context, so normal spinlocks can be used.
129 * 129 *
130 * A return value of false indicates the fence already passed, 130 * A return value of false indicates the fence already passed,
131 * or some failure occured that made it impossible to enable 131 * or some failure occurred that made it impossible to enable
132 * signaling. True indicates succesful enabling. 132 * signaling. True indicates successful enabling.
133 * 133 *
134 * fence->status may be set in enable_signaling, but only when false is 134 * fence->status may be set in enable_signaling, but only when false is
135 * returned. 135 * returned.
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 78ea9bf941cd..06c4607744f6 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -234,6 +234,33 @@ struct hid_item {
234#define HID_DG_BARRELSWITCH 0x000d0044 234#define HID_DG_BARRELSWITCH 0x000d0044
235#define HID_DG_ERASER 0x000d0045 235#define HID_DG_ERASER 0x000d0045
236#define HID_DG_TABLETPICK 0x000d0046 236#define HID_DG_TABLETPICK 0x000d0046
237
238#define HID_CP_CONSUMERCONTROL 0x000c0001
239#define HID_CP_NUMERICKEYPAD 0x000c0002
240#define HID_CP_PROGRAMMABLEBUTTONS 0x000c0003
241#define HID_CP_MICROPHONE 0x000c0004
242#define HID_CP_HEADPHONE 0x000c0005
243#define HID_CP_GRAPHICEQUALIZER 0x000c0006
244#define HID_CP_FUNCTIONBUTTONS 0x000c0036
245#define HID_CP_SELECTION 0x000c0080
246#define HID_CP_MEDIASELECTION 0x000c0087
247#define HID_CP_SELECTDISC 0x000c00ba
248#define HID_CP_PLAYBACKSPEED 0x000c00f1
249#define HID_CP_PROXIMITY 0x000c0109
250#define HID_CP_SPEAKERSYSTEM 0x000c0160
251#define HID_CP_CHANNELLEFT 0x000c0161
252#define HID_CP_CHANNELRIGHT 0x000c0162
253#define HID_CP_CHANNELCENTER 0x000c0163
254#define HID_CP_CHANNELFRONT 0x000c0164
255#define HID_CP_CHANNELCENTERFRONT 0x000c0165
256#define HID_CP_CHANNELSIDE 0x000c0166
257#define HID_CP_CHANNELSURROUND 0x000c0167
258#define HID_CP_CHANNELLOWFREQUENCYENHANCEMENT 0x000c0168
259#define HID_CP_CHANNELTOP 0x000c0169
260#define HID_CP_CHANNELUNKNOWN 0x000c016a
261#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180
262#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200
263
237#define HID_DG_CONFIDENCE 0x000d0047 264#define HID_DG_CONFIDENCE 0x000d0047
238#define HID_DG_WIDTH 0x000d0048 265#define HID_DG_WIDTH 0x000d0048
239#define HID_DG_HEIGHT 0x000d0049 266#define HID_DG_HEIGHT 0x000d0049
@@ -312,11 +339,8 @@ struct hid_item {
312 * Vendor specific HID device groups 339 * Vendor specific HID device groups
313 */ 340 */
314#define HID_GROUP_RMI 0x0100 341#define HID_GROUP_RMI 0x0100
315
316/*
317 * Vendor specific HID device groups
318 */
319#define HID_GROUP_WACOM 0x0101 342#define HID_GROUP_WACOM 0x0101
343#define HID_GROUP_LOGITECH_DJ_DEVICE 0x0102
320 344
321/* 345/*
322 * This is the global environment of the parser. This information is 346 * This is the global environment of the parser. This information is
@@ -1063,6 +1087,17 @@ static inline void hid_hw_wait(struct hid_device *hdev)
1063 hdev->ll_driver->wait(hdev); 1087 hdev->ll_driver->wait(hdev);
1064} 1088}
1065 1089
1090/**
1091 * hid_report_len - calculate the report length
1092 *
1093 * @report: the report we want to know the length
1094 */
1095static inline int hid_report_len(struct hid_report *report)
1096{
1097 /* equivalent to DIV_ROUND_UP(report->size, 8) + !!(report->id > 0) */
1098 return ((report->size - 1) >> 3) + 1 + (report->id > 0);
1099}
1100
1066int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size, 1101int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, int size,
1067 int interrupt); 1102 int interrupt);
1068 1103
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index cdd149ca5cc0..431b7fc605c9 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -175,6 +175,52 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
175} 175}
176 176
177#endif /* !CONFIG_HUGETLB_PAGE */ 177#endif /* !CONFIG_HUGETLB_PAGE */
178/*
179 * hugepages at page global directory. If arch support
180 * hugepages at pgd level, they need to define this.
181 */
182#ifndef pgd_huge
183#define pgd_huge(x) 0
184#endif
185
186#ifndef pgd_write
187static inline int pgd_write(pgd_t pgd)
188{
189 BUG();
190 return 0;
191}
192#endif
193
194#ifndef pud_write
195static inline int pud_write(pud_t pud)
196{
197 BUG();
198 return 0;
199}
200#endif
201
202#ifndef is_hugepd
203/*
204 * Some architectures requires a hugepage directory format that is
205 * required to support multiple hugepage sizes. For example
206 * a4fe3ce76 "powerpc/mm: Allow more flexible layouts for hugepage pagetables"
207 * introduced the same on powerpc. This allows for a more flexible hugepage
208 * pagetable layout.
209 */
210typedef struct { unsigned long pd; } hugepd_t;
211#define is_hugepd(hugepd) (0)
212#define __hugepd(x) ((hugepd_t) { (x) })
213static inline int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
214 unsigned pdshift, unsigned long end,
215 int write, struct page **pages, int *nr)
216{
217 return 0;
218}
219#else
220extern int gup_huge_pd(hugepd_t hugepd, unsigned long addr,
221 unsigned pdshift, unsigned long end,
222 int write, struct page **pages, int *nr);
223#endif
178 224
179#define HUGETLB_ANON_FILE "anon_hugepage" 225#define HUGETLB_ANON_FILE "anon_hugepage"
180 226
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index b556e0ab946f..70ee0d3a2be3 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -359,7 +359,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
359 * to name two of the most common. 359 * to name two of the most common.
360 * 360 *
361 * The return codes from the @master_xfer field should indicate the type of 361 * The return codes from the @master_xfer field should indicate the type of
362 * error code that occured during the transfer, as documented in the kernel 362 * error code that occurred during the transfer, as documented in the kernel
363 * Documentation file Documentation/i2c/fault-codes. 363 * Documentation file Documentation/i2c/fault-codes.
364 */ 364 */
365struct i2c_algorithm { 365struct i2c_algorithm {
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index b29a5982e1c3..7a7bd15e54f1 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -28,7 +28,7 @@
28#define IOMMU_READ (1 << 0) 28#define IOMMU_READ (1 << 0)
29#define IOMMU_WRITE (1 << 1) 29#define IOMMU_WRITE (1 << 1)
30#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ 30#define IOMMU_CACHE (1 << 2) /* DMA cache coherency */
31#define IOMMU_EXEC (1 << 3) 31#define IOMMU_NOEXEC (1 << 3)
32 32
33struct iommu_ops; 33struct iommu_ops;
34struct iommu_group; 34struct iommu_group;
@@ -62,6 +62,7 @@ enum iommu_cap {
62 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA 62 IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA
63 transactions */ 63 transactions */
64 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ 64 IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */
65 IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */
65}; 66};
66 67
67/* 68/*
diff --git a/include/linux/ipmi.h b/include/linux/ipmi.h
index 76d2acbfa7c6..838dbfa3c331 100644
--- a/include/linux/ipmi.h
+++ b/include/linux/ipmi.h
@@ -37,6 +37,7 @@
37 37
38#include <linux/list.h> 38#include <linux/list.h>
39#include <linux/proc_fs.h> 39#include <linux/proc_fs.h>
40#include <linux/acpi.h> /* For acpi_handle */
40 41
41struct module; 42struct module;
42struct device; 43struct device;
@@ -278,15 +279,18 @@ enum ipmi_addr_src {
278 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS, 279 SI_INVALID = 0, SI_HOTMOD, SI_HARDCODED, SI_SPMI, SI_ACPI, SI_SMBIOS,
279 SI_PCI, SI_DEVICETREE, SI_DEFAULT 280 SI_PCI, SI_DEVICETREE, SI_DEFAULT
280}; 281};
282const char *ipmi_addr_src_to_str(enum ipmi_addr_src src);
281 283
282union ipmi_smi_info_union { 284union ipmi_smi_info_union {
285#ifdef CONFIG_ACPI
283 /* 286 /*
284 * the acpi_info element is defined for the SI_ACPI 287 * the acpi_info element is defined for the SI_ACPI
285 * address type 288 * address type
286 */ 289 */
287 struct { 290 struct {
288 void *acpi_handle; 291 acpi_handle acpi_handle;
289 } acpi_info; 292 } acpi_info;
293#endif
290}; 294};
291 295
292struct ipmi_smi_info { 296struct ipmi_smi_info {
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index bd349240d50e..0b1e569f5ff5 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -98,12 +98,11 @@ struct ipmi_smi_handlers {
98 operation is not allowed to fail. If an error occurs, it 98 operation is not allowed to fail. If an error occurs, it
99 should report back the error in a received message. It may 99 should report back the error in a received message. It may
100 do this in the current call context, since no write locks 100 do this in the current call context, since no write locks
101 are held when this is run. If the priority is > 0, the 101 are held when this is run. Message are delivered one at
102 message will go into a high-priority queue and be sent 102 a time by the message handler, a new message will not be
103 first. Otherwise, it goes into a normal-priority queue. */ 103 delivered until the previous message is returned. */
104 void (*sender)(void *send_info, 104 void (*sender)(void *send_info,
105 struct ipmi_smi_msg *msg, 105 struct ipmi_smi_msg *msg);
106 int priority);
107 106
108 /* Called by the upper layer to request that we try to get 107 /* Called by the upper layer to request that we try to get
109 events from the BMC we are attached to. */ 108 events from the BMC we are attached to. */
@@ -212,7 +211,6 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
212 void *send_info, 211 void *send_info,
213 struct ipmi_device_id *device_id, 212 struct ipmi_device_id *device_id,
214 struct device *dev, 213 struct device *dev,
215 const char *sysfs_name,
216 unsigned char slave_addr); 214 unsigned char slave_addr);
217 215
218/* 216/*
diff --git a/arch/mips/include/asm/gic.h b/include/linux/irqchip/mips-gic.h
index d7699cf7e135..420f77b34d02 100644
--- a/arch/mips/include/asm/gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -4,57 +4,26 @@
4 * for more details. 4 * for more details.
5 * 5 *
6 * Copyright (C) 2000, 07 MIPS Technologies, Inc. 6 * Copyright (C) 2000, 07 MIPS Technologies, Inc.
7 *
8 * GIC Register Definitions
9 *
10 */ 7 */
11#ifndef _ASM_GICREGS_H 8#ifndef __LINUX_IRQCHIP_MIPS_GIC_H
12#define _ASM_GICREGS_H 9#define __LINUX_IRQCHIP_MIPS_GIC_H
13
14#include <linux/bitmap.h>
15#include <linux/threads.h>
16 10
17#include <irq.h> 11#include <linux/clocksource.h>
18 12
19#undef GICISBYTELITTLEENDIAN 13#define GIC_MAX_INTRS 256
20 14
21/* Constants */ 15/* Constants */
22#define GIC_POL_POS 1 16#define GIC_POL_POS 1
23#define GIC_POL_NEG 0 17#define GIC_POL_NEG 0
24#define GIC_TRIG_EDGE 1 18#define GIC_TRIG_EDGE 1
25#define GIC_TRIG_LEVEL 0 19#define GIC_TRIG_LEVEL 0
20#define GIC_TRIG_DUAL_ENABLE 1
21#define GIC_TRIG_DUAL_DISABLE 0
26 22
27#define MSK(n) ((1 << (n)) - 1) 23#define MSK(n) ((1 << (n)) - 1)
28#define REG32(addr) (*(volatile unsigned int *) (addr))
29#define REG(base, offs) REG32((unsigned long)(base) + offs##_##OFS)
30#define REGP(base, phys) REG32((unsigned long)(base) + (phys))
31 24
32/* Accessors */ 25/* Accessors */
33#define GIC_REG(segment, offset) \ 26#define GIC_REG(segment, offset) (segment##_##SECTION_OFS + offset##_##OFS)
34 REG32(_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
35#define GIC_REG_ADDR(segment, offset) \
36 REG32(_gic_base + segment##_##SECTION_OFS + offset)
37
38#define GIC_ABS_REG(segment, offset) \
39 (_gic_base + segment##_##SECTION_OFS + offset##_##OFS)
40#define GIC_REG_ABS_ADDR(segment, offset) \
41 (_gic_base + segment##_##SECTION_OFS + offset)
42
43#ifdef GICISBYTELITTLEENDIAN
44#define GICREAD(reg, data) ((data) = (reg), (data) = le32_to_cpu(data))
45#define GICWRITE(reg, data) ((reg) = cpu_to_le32(data))
46#else
47#define GICREAD(reg, data) ((data) = (reg))
48#define GICWRITE(reg, data) ((reg) = (data))
49#endif
50#define GICBIS(reg, mask, bits) \
51 do { u32 data; \
52 GICREAD(reg, data); \
53 data &= ~(mask); \
54 data |= ((bits) & (mask)); \
55 GICWRITE((reg), data); \
56 } while (0)
57
58 27
59/* GIC Address Space */ 28/* GIC Address Space */
60#define SHARED_SECTION_OFS 0x0000 29#define SHARED_SECTION_OFS 0x0000
@@ -75,120 +44,42 @@
75#define GIC_SH_COUNTER_63_32_OFS 0x0014 44#define GIC_SH_COUNTER_63_32_OFS 0x0014
76#define GIC_SH_REVISIONID_OFS 0x0020 45#define GIC_SH_REVISIONID_OFS 0x0020
77 46
78/* Interrupt Polarity */ 47/* Convert an interrupt number to a byte offset/bit for multi-word registers */
79#define GIC_SH_POL_31_0_OFS 0x0100 48#define GIC_INTR_OFS(intr) (((intr) / 32) * 4)
80#define GIC_SH_POL_63_32_OFS 0x0104 49#define GIC_INTR_BIT(intr) ((intr) % 32)
81#define GIC_SH_POL_95_64_OFS 0x0108 50
82#define GIC_SH_POL_127_96_OFS 0x010c 51/* Polarity : Reset Value is always 0 */
83#define GIC_SH_POL_159_128_OFS 0x0110 52#define GIC_SH_SET_POLARITY_OFS 0x0100
84#define GIC_SH_POL_191_160_OFS 0x0114 53
85#define GIC_SH_POL_223_192_OFS 0x0118 54/* Triggering : Reset Value is always 0 */
86#define GIC_SH_POL_255_224_OFS 0x011c 55#define GIC_SH_SET_TRIGGER_OFS 0x0180
87 56
88/* Edge/Level Triggering */ 57/* Dual edge triggering : Reset Value is always 0 */
89#define GIC_SH_TRIG_31_0_OFS 0x0180 58#define GIC_SH_SET_DUAL_OFS 0x0200
90#define GIC_SH_TRIG_63_32_OFS 0x0184
91#define GIC_SH_TRIG_95_64_OFS 0x0188
92#define GIC_SH_TRIG_127_96_OFS 0x018c
93#define GIC_SH_TRIG_159_128_OFS 0x0190
94#define GIC_SH_TRIG_191_160_OFS 0x0194
95#define GIC_SH_TRIG_223_192_OFS 0x0198
96#define GIC_SH_TRIG_255_224_OFS 0x019c
97
98/* Dual Edge Triggering */
99#define GIC_SH_DUAL_31_0_OFS 0x0200
100#define GIC_SH_DUAL_63_32_OFS 0x0204
101#define GIC_SH_DUAL_95_64_OFS 0x0208
102#define GIC_SH_DUAL_127_96_OFS 0x020c
103#define GIC_SH_DUAL_159_128_OFS 0x0210
104#define GIC_SH_DUAL_191_160_OFS 0x0214
105#define GIC_SH_DUAL_223_192_OFS 0x0218
106#define GIC_SH_DUAL_255_224_OFS 0x021c
107 59
108/* Set/Clear corresponding bit in Edge Detect Register */ 60/* Set/Clear corresponding bit in Edge Detect Register */
109#define GIC_SH_WEDGE_OFS 0x0280 61#define GIC_SH_WEDGE_OFS 0x0280
110 62
111/* Reset Mask - Disables Interrupt */ 63/* Mask manipulation */
112#define GIC_SH_RMASK_31_0_OFS 0x0300 64#define GIC_SH_RMASK_OFS 0x0300
113#define GIC_SH_RMASK_63_32_OFS 0x0304 65#define GIC_SH_SMASK_OFS 0x0380
114#define GIC_SH_RMASK_95_64_OFS 0x0308
115#define GIC_SH_RMASK_127_96_OFS 0x030c
116#define GIC_SH_RMASK_159_128_OFS 0x0310
117#define GIC_SH_RMASK_191_160_OFS 0x0314
118#define GIC_SH_RMASK_223_192_OFS 0x0318
119#define GIC_SH_RMASK_255_224_OFS 0x031c
120
121/* Set Mask (WO) - Enables Interrupt */
122#define GIC_SH_SMASK_31_0_OFS 0x0380
123#define GIC_SH_SMASK_63_32_OFS 0x0384
124#define GIC_SH_SMASK_95_64_OFS 0x0388
125#define GIC_SH_SMASK_127_96_OFS 0x038c
126#define GIC_SH_SMASK_159_128_OFS 0x0390
127#define GIC_SH_SMASK_191_160_OFS 0x0394
128#define GIC_SH_SMASK_223_192_OFS 0x0398
129#define GIC_SH_SMASK_255_224_OFS 0x039c
130 66
131/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */ 67/* Global Interrupt Mask Register (RO) - Bit Set == Interrupt enabled */
132#define GIC_SH_MASK_31_0_OFS 0x0400 68#define GIC_SH_MASK_OFS 0x0400
133#define GIC_SH_MASK_63_32_OFS 0x0404
134#define GIC_SH_MASK_95_64_OFS 0x0408
135#define GIC_SH_MASK_127_96_OFS 0x040c
136#define GIC_SH_MASK_159_128_OFS 0x0410
137#define GIC_SH_MASK_191_160_OFS 0x0414
138#define GIC_SH_MASK_223_192_OFS 0x0418
139#define GIC_SH_MASK_255_224_OFS 0x041c
140 69
141/* Pending Global Interrupts (RO) */ 70/* Pending Global Interrupts (RO) */
142#define GIC_SH_PEND_31_0_OFS 0x0480 71#define GIC_SH_PEND_OFS 0x0480
143#define GIC_SH_PEND_63_32_OFS 0x0484
144#define GIC_SH_PEND_95_64_OFS 0x0488
145#define GIC_SH_PEND_127_96_OFS 0x048c
146#define GIC_SH_PEND_159_128_OFS 0x0490
147#define GIC_SH_PEND_191_160_OFS 0x0494
148#define GIC_SH_PEND_223_192_OFS 0x0498
149#define GIC_SH_PEND_255_224_OFS 0x049c
150
151#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
152 72
153/* Maps Interrupt X to a Pin */ 73/* Maps Interrupt X to a Pin */
154#define GIC_SH_MAP_TO_PIN(intr) \ 74#define GIC_SH_INTR_MAP_TO_PIN_BASE_OFS 0x0500
155 (GIC_SH_INTR_MAP_TO_PIN_BASE_OFS + (4 * intr)) 75#define GIC_SH_MAP_TO_PIN(intr) (4 * (intr))
156
157#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
158 76
159/* Maps Interrupt X to a VPE */ 77/* Maps Interrupt X to a VPE */
78#define GIC_SH_INTR_MAP_TO_VPE_BASE_OFS 0x2000
160#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \ 79#define GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe) \
161 (GIC_SH_INTR_MAP_TO_VPE_BASE_OFS + (32 * (intr)) + (((vpe) / 32) * 4)) 80 ((32 * (intr)) + (((vpe) / 32) * 4))
162#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32)) 81#define GIC_SH_MAP_TO_VPE_REG_BIT(vpe) (1 << ((vpe) % 32))
163 82
164/* Convert an interrupt number to a byte offset/bit for multi-word registers */
165#define GIC_INTR_OFS(intr) (((intr) / 32)*4)
166#define GIC_INTR_BIT(intr) ((intr) % 32)
167
168/* Polarity : Reset Value is always 0 */
169#define GIC_SH_SET_POLARITY_OFS 0x0100
170#define GIC_SET_POLARITY(intr, pol) \
171 GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_POLARITY_OFS + \
172 GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \
173 (pol) << GIC_INTR_BIT(intr))
174
175/* Triggering : Reset Value is always 0 */
176#define GIC_SH_SET_TRIGGER_OFS 0x0180
177#define GIC_SET_TRIGGER(intr, trig) \
178 GICBIS(GIC_REG_ADDR(SHARED, GIC_SH_SET_TRIGGER_OFS + \
179 GIC_INTR_OFS(intr)), (1 << GIC_INTR_BIT(intr)), \
180 (trig) << GIC_INTR_BIT(intr))
181
182/* Mask manipulation */
183#define GIC_SH_SMASK_OFS 0x0380
184#define GIC_SET_INTR_MASK(intr) \
185 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_SMASK_OFS + \
186 GIC_INTR_OFS(intr)), 1 << GIC_INTR_BIT(intr))
187#define GIC_SH_RMASK_OFS 0x0300
188#define GIC_CLR_INTR_MASK(intr) \
189 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_RMASK_OFS + \
190 GIC_INTR_OFS(intr)), 1 << GIC_INTR_BIT(intr))
191
192/* Register Map for Local Section */ 83/* Register Map for Local Section */
193#define GIC_VPE_CTL_OFS 0x0000 84#define GIC_VPE_CTL_OFS 0x0000
194#define GIC_VPE_PEND_OFS 0x0004 85#define GIC_VPE_PEND_OFS 0x0004
@@ -198,6 +89,7 @@
198#define GIC_VPE_WD_MAP_OFS 0x0040 89#define GIC_VPE_WD_MAP_OFS 0x0040
199#define GIC_VPE_COMPARE_MAP_OFS 0x0044 90#define GIC_VPE_COMPARE_MAP_OFS 0x0044
200#define GIC_VPE_TIMER_MAP_OFS 0x0048 91#define GIC_VPE_TIMER_MAP_OFS 0x0048
92#define GIC_VPE_FDC_MAP_OFS 0x004c
201#define GIC_VPE_PERFCTR_MAP_OFS 0x0050 93#define GIC_VPE_PERFCTR_MAP_OFS 0x0050
202#define GIC_VPE_SWINT0_MAP_OFS 0x0054 94#define GIC_VPE_SWINT0_MAP_OFS 0x0054
203#define GIC_VPE_SWINT1_MAP_OFS 0x0058 95#define GIC_VPE_SWINT1_MAP_OFS 0x0058
@@ -208,13 +100,11 @@
208#define GIC_VPE_COMPARE_LO_OFS 0x00a0 100#define GIC_VPE_COMPARE_LO_OFS 0x00a0
209#define GIC_VPE_COMPARE_HI_OFS 0x00a4 101#define GIC_VPE_COMPARE_HI_OFS 0x00a4
210 102
211#define GIC_VPE_EIC_SHADOW_SET_BASE 0x0100 103#define GIC_VPE_EIC_SHADOW_SET_BASE_OFS 0x0100
212#define GIC_VPE_EIC_SS(intr) \ 104#define GIC_VPE_EIC_SS(intr) (4 * (intr))
213 (GIC_VPE_EIC_SHADOW_SET_BASE + (4 * intr))
214 105
215#define GIC_VPE_EIC_VEC_BASE 0x0800 106#define GIC_VPE_EIC_VEC_BASE_OFS 0x0800
216#define GIC_VPE_EIC_VEC(intr) \ 107#define GIC_VPE_EIC_VEC(intr) (4 * (intr))
217 (GIC_VPE_EIC_VEC_BASE + (4 * intr))
218 108
219#define GIC_VPE_TENABLE_NMI_OFS 0x1000 109#define GIC_VPE_TENABLE_NMI_OFS 0x1000
220#define GIC_VPE_TENABLE_YQ_OFS 0x1004 110#define GIC_VPE_TENABLE_YQ_OFS 0x1004
@@ -238,8 +128,8 @@
238#define GIC_SH_CONFIG_NUMVPES_SHF 0 128#define GIC_SH_CONFIG_NUMVPES_SHF 0
239#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF) 129#define GIC_SH_CONFIG_NUMVPES_MSK (MSK(8) << GIC_SH_CONFIG_NUMVPES_SHF)
240 130
241#define GIC_SH_WEDGE_SET(intr) (intr | (0x1 << 31)) 131#define GIC_SH_WEDGE_SET(intr) ((intr) | (0x1 << 31))
242#define GIC_SH_WEDGE_CLR(intr) (intr & ~(0x1 << 31)) 132#define GIC_SH_WEDGE_CLR(intr) ((intr) & ~(0x1 << 31))
243 133
244#define GIC_MAP_TO_PIN_SHF 31 134#define GIC_MAP_TO_PIN_SHF 31
245#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF) 135#define GIC_MAP_TO_PIN_MSK (MSK(1) << GIC_MAP_TO_PIN_SHF)
@@ -251,6 +141,10 @@
251#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF) 141#define GIC_MAP_MSK (MSK(6) << GIC_MAP_SHF)
252 142
253/* GIC_VPE_CTL Masks */ 143/* GIC_VPE_CTL Masks */
144#define GIC_VPE_CTL_FDC_RTBL_SHF 4
145#define GIC_VPE_CTL_FDC_RTBL_MSK (MSK(1) << GIC_VPE_CTL_FDC_RTBL_SHF)
146#define GIC_VPE_CTL_SWINT_RTBL_SHF 3
147#define GIC_VPE_CTL_SWINT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_SWINT_RTBL_SHF)
254#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2 148#define GIC_VPE_CTL_PERFCNT_RTBL_SHF 2
255#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF) 149#define GIC_VPE_CTL_PERFCNT_RTBL_MSK (MSK(1) << GIC_VPE_CTL_PERFCNT_RTBL_SHF)
256#define GIC_VPE_CTL_TIMER_RTBL_SHF 1 150#define GIC_VPE_CTL_TIMER_RTBL_SHF 1
@@ -300,38 +194,6 @@
300#define GIC_VPE_SMASK_SWINT1_SHF 5 194#define GIC_VPE_SMASK_SWINT1_SHF 5
301#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF) 195#define GIC_VPE_SMASK_SWINT1_MSK (MSK(1) << GIC_VPE_SMASK_SWINT1_SHF)
302 196
303/*
304 * Set the Mapping of Interrupt X to a VPE.
305 */
306#define GIC_SH_MAP_TO_VPE_SMASK(intr, vpe) \
307 GICWRITE(GIC_REG_ADDR(SHARED, GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe)), \
308 GIC_SH_MAP_TO_VPE_REG_BIT(vpe))
309
310/*
311 * Interrupt Meta-data specification. The ipiflag helps
312 * in building ipi_map.
313 */
314struct gic_intr_map {
315 unsigned int cpunum; /* Directed to this CPU */
316#define GIC_UNUSED 0xdead /* Dummy data */
317 unsigned int pin; /* Directed to this Pin */
318 unsigned int polarity; /* Polarity : +/- */
319 unsigned int trigtype; /* Trigger : Edge/Levl */
320 unsigned int flags; /* Misc flags */
321#define GIC_FLAG_TRANSPARENT 0x01
322};
323
324/*
325 * This is only used in EIC mode. This helps to figure out which
326 * shared interrupts we need to process when we get a vector interrupt.
327 */
328#define GIC_MAX_SHARED_INTR 0x5
329struct gic_shared_intr_map {
330 unsigned int num_shared_intr;
331 unsigned int intr_list[GIC_MAX_SHARED_INTR];
332 unsigned int local_intr_mask;
333};
334
335/* GIC nomenclature for Core Interrupt Pins. */ 197/* GIC nomenclature for Core Interrupt Pins. */
336#define GIC_CPU_INT0 0 /* Core Interrupt 2 */ 198#define GIC_CPU_INT0 0 /* Core Interrupt 2 */
337#define GIC_CPU_INT1 1 /* . */ 199#define GIC_CPU_INT1 1 /* . */
@@ -340,45 +202,48 @@ struct gic_shared_intr_map {
340#define GIC_CPU_INT4 4 /* . */ 202#define GIC_CPU_INT4 4 /* . */
341#define GIC_CPU_INT5 5 /* Core Interrupt 7 */ 203#define GIC_CPU_INT5 5 /* Core Interrupt 7 */
342 204
343/* Local GIC interrupts. */ 205/* Add 2 to convert GIC CPU pin to core interrupt */
344#define GIC_INT_TMR (GIC_CPU_INT5) 206#define GIC_CPU_PIN_OFFSET 2
345#define GIC_INT_PERFCTR (GIC_CPU_INT5)
346 207
347/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */ 208/* Add 2 to convert non-EIC hardware interrupt to EIC vector number. */
348#define GIC_CPU_TO_VEC_OFFSET (2) 209#define GIC_CPU_TO_VEC_OFFSET 2
349 210
350/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 211/* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */
351#define GIC_PIN_TO_VEC_OFFSET (1) 212#define GIC_PIN_TO_VEC_OFFSET 1
352 213
353#include <linux/clocksource.h> 214/* Local GIC interrupts. */
354#include <linux/irq.h> 215#define GIC_LOCAL_INT_WD 0 /* GIC watchdog */
216#define GIC_LOCAL_INT_COMPARE 1 /* GIC count and compare timer */
217#define GIC_LOCAL_INT_TIMER 2 /* CPU timer interrupt */
218#define GIC_LOCAL_INT_PERFCTR 3 /* CPU performance counter */
219#define GIC_LOCAL_INT_SWINT0 4 /* CPU software interrupt 0 */
220#define GIC_LOCAL_INT_SWINT1 5 /* CPU software interrupt 1 */
221#define GIC_LOCAL_INT_FDC 6 /* CPU fast debug channel */
222#define GIC_NUM_LOCAL_INTRS 7
223
224/* Convert between local/shared IRQ number and GIC HW IRQ number. */
225#define GIC_LOCAL_HWIRQ_BASE 0
226#define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x))
227#define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE)
228#define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS
229#define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x))
230#define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE)
355 231
356extern unsigned int gic_present; 232extern unsigned int gic_present;
357extern unsigned int gic_frequency;
358extern unsigned long _gic_base;
359extern unsigned int gic_irq_base;
360extern unsigned int gic_irq_flags[];
361extern struct gic_shared_intr_map gic_shared_intr_map[];
362 233
363extern void gic_init(unsigned long gic_base_addr, 234extern void gic_init(unsigned long gic_base_addr,
364 unsigned long gic_addrspace_size, struct gic_intr_map *intrmap, 235 unsigned long gic_addrspace_size, unsigned int cpu_vec,
365 unsigned int intrmap_size, unsigned int irqbase); 236 unsigned int irqbase);
366extern void gic_clocksource_init(unsigned int); 237extern void gic_clocksource_init(unsigned int);
367extern unsigned int gic_compare_int (void);
368extern cycle_t gic_read_count(void); 238extern cycle_t gic_read_count(void);
239extern unsigned int gic_get_count_width(void);
369extern cycle_t gic_read_compare(void); 240extern cycle_t gic_read_compare(void);
370extern void gic_write_compare(cycle_t cnt); 241extern void gic_write_compare(cycle_t cnt);
371extern void gic_write_cpu_compare(cycle_t cnt, int cpu); 242extern void gic_write_cpu_compare(cycle_t cnt, int cpu);
372extern void gic_send_ipi(unsigned int intr); 243extern void gic_send_ipi(unsigned int intr);
373extern unsigned int plat_ipi_call_int_xlate(unsigned int); 244extern unsigned int plat_ipi_call_int_xlate(unsigned int);
374extern unsigned int plat_ipi_resched_int_xlate(unsigned int); 245extern unsigned int plat_ipi_resched_int_xlate(unsigned int);
375extern void gic_bind_eic_interrupt(int irq, int set);
376extern unsigned int gic_get_timer_pending(void); 246extern unsigned int gic_get_timer_pending(void);
377extern void gic_get_int_mask(unsigned long *dst, const unsigned long *src); 247extern int gic_get_c0_compare_int(void);
378extern unsigned int gic_get_int(void); 248extern int gic_get_c0_perfcount_int(void);
379extern void gic_enable_interrupt(int irq_vec); 249#endif /* __LINUX_IRQCHIP_MIPS_GIC_H */
380extern void gic_disable_interrupt(int irq_vec);
381extern void gic_irq_ack(struct irq_data *d);
382extern void gic_finish_irq(struct irq_data *d);
383extern void gic_platform_init(int irqs, struct irq_chip *irq_controller);
384#endif /* _ASM_GICREGS_H */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index f7296e57d614..5297f9fa0ef2 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -335,6 +335,7 @@ extern void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
335extern int arch_prepare_kprobe_ftrace(struct kprobe *p); 335extern int arch_prepare_kprobe_ftrace(struct kprobe *p);
336#endif 336#endif
337 337
338int arch_check_ftrace_location(struct kprobe *p);
338 339
339/* Get the kprobe at this addr (if any) - called with preemption disabled */ 340/* Get the kprobe at this addr (if any) - called with preemption disabled */
340struct kprobe *get_kprobe(void *addr); 341struct kprobe *get_kprobe(void *addr);
diff --git a/include/linux/libata.h b/include/linux/libata.h
index bfbc817c34ee..2d182413b1db 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -191,7 +191,8 @@ enum {
191 ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */ 191 ATA_DEV_PMP_UNSUP = 6, /* SATA port multiplier (unsupported) */
192 ATA_DEV_SEMB = 7, /* SEMB */ 192 ATA_DEV_SEMB = 7, /* SEMB */
193 ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */ 193 ATA_DEV_SEMB_UNSUP = 8, /* SEMB (unsupported) */
194 ATA_DEV_NONE = 9, /* no device */ 194 ATA_DEV_ZAC = 9, /* ZAC device */
195 ATA_DEV_NONE = 10, /* no device */
195 196
196 /* struct ata_link flags */ 197 /* struct ata_link flags */
197 ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */ 198 ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
@@ -1491,7 +1492,8 @@ static inline unsigned int ata_tag_internal(unsigned int tag)
1491static inline unsigned int ata_class_enabled(unsigned int class) 1492static inline unsigned int ata_class_enabled(unsigned int class)
1492{ 1493{
1493 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI || 1494 return class == ATA_DEV_ATA || class == ATA_DEV_ATAPI ||
1494 class == ATA_DEV_PMP || class == ATA_DEV_SEMB; 1495 class == ATA_DEV_PMP || class == ATA_DEV_SEMB ||
1496 class == ATA_DEV_ZAC;
1495} 1497}
1496 1498
1497static inline unsigned int ata_class_disabled(unsigned int class) 1499static inline unsigned int ata_class_disabled(unsigned int class)
diff --git a/include/linux/list.h b/include/linux/list.h
index f33f831eb3c8..feb773c76ee0 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -346,7 +346,7 @@ static inline void list_splice_tail_init(struct list_head *list,
346 * list_entry - get the struct for this entry 346 * list_entry - get the struct for this entry
347 * @ptr: the &struct list_head pointer. 347 * @ptr: the &struct list_head pointer.
348 * @type: the type of the struct this is embedded in. 348 * @type: the type of the struct this is embedded in.
349 * @member: the name of the list_struct within the struct. 349 * @member: the name of the list_head within the struct.
350 */ 350 */
351#define list_entry(ptr, type, member) \ 351#define list_entry(ptr, type, member) \
352 container_of(ptr, type, member) 352 container_of(ptr, type, member)
@@ -355,7 +355,7 @@ static inline void list_splice_tail_init(struct list_head *list,
355 * list_first_entry - get the first element from a list 355 * list_first_entry - get the first element from a list
356 * @ptr: the list head to take the element from. 356 * @ptr: the list head to take the element from.
357 * @type: the type of the struct this is embedded in. 357 * @type: the type of the struct this is embedded in.
358 * @member: the name of the list_struct within the struct. 358 * @member: the name of the list_head within the struct.
359 * 359 *
360 * Note, that list is expected to be not empty. 360 * Note, that list is expected to be not empty.
361 */ 361 */
@@ -366,7 +366,7 @@ static inline void list_splice_tail_init(struct list_head *list,
366 * list_last_entry - get the last element from a list 366 * list_last_entry - get the last element from a list
367 * @ptr: the list head to take the element from. 367 * @ptr: the list head to take the element from.
368 * @type: the type of the struct this is embedded in. 368 * @type: the type of the struct this is embedded in.
369 * @member: the name of the list_struct within the struct. 369 * @member: the name of the list_head within the struct.
370 * 370 *
371 * Note, that list is expected to be not empty. 371 * Note, that list is expected to be not empty.
372 */ 372 */
@@ -377,7 +377,7 @@ static inline void list_splice_tail_init(struct list_head *list,
377 * list_first_entry_or_null - get the first element from a list 377 * list_first_entry_or_null - get the first element from a list
378 * @ptr: the list head to take the element from. 378 * @ptr: the list head to take the element from.
379 * @type: the type of the struct this is embedded in. 379 * @type: the type of the struct this is embedded in.
380 * @member: the name of the list_struct within the struct. 380 * @member: the name of the list_head within the struct.
381 * 381 *
382 * Note that if the list is empty, it returns NULL. 382 * Note that if the list is empty, it returns NULL.
383 */ 383 */
@@ -387,7 +387,7 @@ static inline void list_splice_tail_init(struct list_head *list,
387/** 387/**
388 * list_next_entry - get the next element in list 388 * list_next_entry - get the next element in list
389 * @pos: the type * to cursor 389 * @pos: the type * to cursor
390 * @member: the name of the list_struct within the struct. 390 * @member: the name of the list_head within the struct.
391 */ 391 */
392#define list_next_entry(pos, member) \ 392#define list_next_entry(pos, member) \
393 list_entry((pos)->member.next, typeof(*(pos)), member) 393 list_entry((pos)->member.next, typeof(*(pos)), member)
@@ -395,7 +395,7 @@ static inline void list_splice_tail_init(struct list_head *list,
395/** 395/**
396 * list_prev_entry - get the prev element in list 396 * list_prev_entry - get the prev element in list
397 * @pos: the type * to cursor 397 * @pos: the type * to cursor
398 * @member: the name of the list_struct within the struct. 398 * @member: the name of the list_head within the struct.
399 */ 399 */
400#define list_prev_entry(pos, member) \ 400#define list_prev_entry(pos, member) \
401 list_entry((pos)->member.prev, typeof(*(pos)), member) 401 list_entry((pos)->member.prev, typeof(*(pos)), member)
@@ -441,7 +441,7 @@ static inline void list_splice_tail_init(struct list_head *list,
441 * list_for_each_entry - iterate over list of given type 441 * list_for_each_entry - iterate over list of given type
442 * @pos: the type * to use as a loop cursor. 442 * @pos: the type * to use as a loop cursor.
443 * @head: the head for your list. 443 * @head: the head for your list.
444 * @member: the name of the list_struct within the struct. 444 * @member: the name of the list_head within the struct.
445 */ 445 */
446#define list_for_each_entry(pos, head, member) \ 446#define list_for_each_entry(pos, head, member) \
447 for (pos = list_first_entry(head, typeof(*pos), member); \ 447 for (pos = list_first_entry(head, typeof(*pos), member); \
@@ -452,7 +452,7 @@ static inline void list_splice_tail_init(struct list_head *list,
452 * list_for_each_entry_reverse - iterate backwards over list of given type. 452 * list_for_each_entry_reverse - iterate backwards over list of given type.
453 * @pos: the type * to use as a loop cursor. 453 * @pos: the type * to use as a loop cursor.
454 * @head: the head for your list. 454 * @head: the head for your list.
455 * @member: the name of the list_struct within the struct. 455 * @member: the name of the list_head within the struct.
456 */ 456 */
457#define list_for_each_entry_reverse(pos, head, member) \ 457#define list_for_each_entry_reverse(pos, head, member) \
458 for (pos = list_last_entry(head, typeof(*pos), member); \ 458 for (pos = list_last_entry(head, typeof(*pos), member); \
@@ -463,7 +463,7 @@ static inline void list_splice_tail_init(struct list_head *list,
463 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue() 463 * list_prepare_entry - prepare a pos entry for use in list_for_each_entry_continue()
464 * @pos: the type * to use as a start point 464 * @pos: the type * to use as a start point
465 * @head: the head of the list 465 * @head: the head of the list
466 * @member: the name of the list_struct within the struct. 466 * @member: the name of the list_head within the struct.
467 * 467 *
468 * Prepares a pos entry for use as a start point in list_for_each_entry_continue(). 468 * Prepares a pos entry for use as a start point in list_for_each_entry_continue().
469 */ 469 */
@@ -474,7 +474,7 @@ static inline void list_splice_tail_init(struct list_head *list,
474 * list_for_each_entry_continue - continue iteration over list of given type 474 * list_for_each_entry_continue - continue iteration over list of given type
475 * @pos: the type * to use as a loop cursor. 475 * @pos: the type * to use as a loop cursor.
476 * @head: the head for your list. 476 * @head: the head for your list.
477 * @member: the name of the list_struct within the struct. 477 * @member: the name of the list_head within the struct.
478 * 478 *
479 * Continue to iterate over list of given type, continuing after 479 * Continue to iterate over list of given type, continuing after
480 * the current position. 480 * the current position.
@@ -488,7 +488,7 @@ static inline void list_splice_tail_init(struct list_head *list,
488 * list_for_each_entry_continue_reverse - iterate backwards from the given point 488 * list_for_each_entry_continue_reverse - iterate backwards from the given point
489 * @pos: the type * to use as a loop cursor. 489 * @pos: the type * to use as a loop cursor.
490 * @head: the head for your list. 490 * @head: the head for your list.
491 * @member: the name of the list_struct within the struct. 491 * @member: the name of the list_head within the struct.
492 * 492 *
493 * Start to iterate over list of given type backwards, continuing after 493 * Start to iterate over list of given type backwards, continuing after
494 * the current position. 494 * the current position.
@@ -502,7 +502,7 @@ static inline void list_splice_tail_init(struct list_head *list,
502 * list_for_each_entry_from - iterate over list of given type from the current point 502 * list_for_each_entry_from - iterate over list of given type from the current point
503 * @pos: the type * to use as a loop cursor. 503 * @pos: the type * to use as a loop cursor.
504 * @head: the head for your list. 504 * @head: the head for your list.
505 * @member: the name of the list_struct within the struct. 505 * @member: the name of the list_head within the struct.
506 * 506 *
507 * Iterate over list of given type, continuing from current position. 507 * Iterate over list of given type, continuing from current position.
508 */ 508 */
@@ -515,7 +515,7 @@ static inline void list_splice_tail_init(struct list_head *list,
515 * @pos: the type * to use as a loop cursor. 515 * @pos: the type * to use as a loop cursor.
516 * @n: another type * to use as temporary storage 516 * @n: another type * to use as temporary storage
517 * @head: the head for your list. 517 * @head: the head for your list.
518 * @member: the name of the list_struct within the struct. 518 * @member: the name of the list_head within the struct.
519 */ 519 */
520#define list_for_each_entry_safe(pos, n, head, member) \ 520#define list_for_each_entry_safe(pos, n, head, member) \
521 for (pos = list_first_entry(head, typeof(*pos), member), \ 521 for (pos = list_first_entry(head, typeof(*pos), member), \
@@ -528,7 +528,7 @@ static inline void list_splice_tail_init(struct list_head *list,
528 * @pos: the type * to use as a loop cursor. 528 * @pos: the type * to use as a loop cursor.
529 * @n: another type * to use as temporary storage 529 * @n: another type * to use as temporary storage
530 * @head: the head for your list. 530 * @head: the head for your list.
531 * @member: the name of the list_struct within the struct. 531 * @member: the name of the list_head within the struct.
532 * 532 *
533 * Iterate over list of given type, continuing after current point, 533 * Iterate over list of given type, continuing after current point,
534 * safe against removal of list entry. 534 * safe against removal of list entry.
@@ -544,7 +544,7 @@ static inline void list_splice_tail_init(struct list_head *list,
544 * @pos: the type * to use as a loop cursor. 544 * @pos: the type * to use as a loop cursor.
545 * @n: another type * to use as temporary storage 545 * @n: another type * to use as temporary storage
546 * @head: the head for your list. 546 * @head: the head for your list.
547 * @member: the name of the list_struct within the struct. 547 * @member: the name of the list_head within the struct.
548 * 548 *
549 * Iterate over list of given type from current point, safe against 549 * Iterate over list of given type from current point, safe against
550 * removal of list entry. 550 * removal of list entry.
@@ -559,7 +559,7 @@ static inline void list_splice_tail_init(struct list_head *list,
559 * @pos: the type * to use as a loop cursor. 559 * @pos: the type * to use as a loop cursor.
560 * @n: another type * to use as temporary storage 560 * @n: another type * to use as temporary storage
561 * @head: the head for your list. 561 * @head: the head for your list.
562 * @member: the name of the list_struct within the struct. 562 * @member: the name of the list_head within the struct.
563 * 563 *
564 * Iterate backwards over list of given type, safe against removal 564 * Iterate backwards over list of given type, safe against removal
565 * of list entry. 565 * of list entry.
@@ -574,7 +574,7 @@ static inline void list_splice_tail_init(struct list_head *list,
574 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop 574 * list_safe_reset_next - reset a stale list_for_each_entry_safe loop
575 * @pos: the loop cursor used in the list_for_each_entry_safe loop 575 * @pos: the loop cursor used in the list_for_each_entry_safe loop
576 * @n: temporary storage used in list_for_each_entry_safe 576 * @n: temporary storage used in list_for_each_entry_safe
577 * @member: the name of the list_struct within the struct. 577 * @member: the name of the list_head within the struct.
578 * 578 *
579 * list_safe_reset_next is not safe to use in general if the list may be 579 * list_safe_reset_next is not safe to use in general if the list may be
580 * modified concurrently (eg. the lock is dropped in the loop body). An 580 * modified concurrently (eg. the lock is dropped in the loop body). An
diff --git a/include/linux/mm.h b/include/linux/mm.h
index f7606d3a0915..3b337efbe533 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -56,6 +56,17 @@ extern int sysctl_legacy_va_layout;
56#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0)) 56#define __pa_symbol(x) __pa(RELOC_HIDE((unsigned long)(x), 0))
57#endif 57#endif
58 58
59/*
60 * To prevent common memory management code establishing
61 * a zero page mapping on a read fault.
62 * This macro should be defined within <asm/pgtable.h>.
63 * s390 does this to prevent multiplexing of hardware bits
64 * related to the physical page in case of virtualization.
65 */
66#ifndef mm_forbids_zeropage
67#define mm_forbids_zeropage(X) (0)
68#endif
69
59extern unsigned long sysctl_user_reserve_kbytes; 70extern unsigned long sysctl_user_reserve_kbytes;
60extern unsigned long sysctl_admin_reserve_kbytes; 71extern unsigned long sysctl_admin_reserve_kbytes;
61 72
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 44eeef0da186..745def862580 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -69,7 +69,7 @@ struct ieee1394_device_id {
69 * @bDeviceClass: Class of device; numbers are assigned 69 * @bDeviceClass: Class of device; numbers are assigned
70 * by the USB forum. Products may choose to implement classes, 70 * by the USB forum. Products may choose to implement classes,
71 * or be vendor-specific. Device classes specify behavior of all 71 * or be vendor-specific. Device classes specify behavior of all
72 * the interfaces on a devices. 72 * the interfaces on a device.
73 * @bDeviceSubClass: Subclass of device; associated with bDeviceClass. 73 * @bDeviceSubClass: Subclass of device; associated with bDeviceClass.
74 * @bDeviceProtocol: Protocol of device; associated with bDeviceClass. 74 * @bDeviceProtocol: Protocol of device; associated with bDeviceClass.
75 * @bInterfaceClass: Class of interface; numbers are assigned 75 * @bInterfaceClass: Class of interface; numbers are assigned
diff --git a/include/linux/pci.h b/include/linux/pci.h
index a523cee3abb5..44a27696ab6c 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -1004,6 +1004,8 @@ void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
1004int pci_save_state(struct pci_dev *dev); 1004int pci_save_state(struct pci_dev *dev);
1005void pci_restore_state(struct pci_dev *dev); 1005void pci_restore_state(struct pci_dev *dev);
1006struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev); 1006struct pci_saved_state *pci_store_saved_state(struct pci_dev *dev);
1007int pci_load_saved_state(struct pci_dev *dev,
1008 struct pci_saved_state *state);
1007int pci_load_and_free_saved_state(struct pci_dev *dev, 1009int pci_load_and_free_saved_state(struct pci_dev *dev,
1008 struct pci_saved_state **state); 1010 struct pci_saved_state **state);
1009struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap); 1011struct pci_cap_saved_state *pci_find_saved_cap(struct pci_dev *dev, char cap);
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 530b249f7ea4..b4337646388b 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -128,10 +128,8 @@ static inline void percpu_ref_kill(struct percpu_ref *ref)
128static inline bool __ref_is_percpu(struct percpu_ref *ref, 128static inline bool __ref_is_percpu(struct percpu_ref *ref,
129 unsigned long __percpu **percpu_countp) 129 unsigned long __percpu **percpu_countp)
130{ 130{
131 unsigned long percpu_ptr = ACCESS_ONCE(ref->percpu_count_ptr);
132
133 /* paired with smp_store_release() in percpu_ref_reinit() */ 131 /* paired with smp_store_release() in percpu_ref_reinit() */
134 smp_read_barrier_depends(); 132 unsigned long percpu_ptr = lockless_dereference(ref->percpu_count_ptr);
135 133
136 /* 134 /*
137 * Theoretically, the following could test just ATOMIC; however, 135 * Theoretically, the following could test just ATOMIC; however,
diff --git a/include/linux/platform_data/dma-imx.h b/include/linux/platform_data/dma-imx.h
index 6a1357d31871..7d964e787299 100644
--- a/include/linux/platform_data/dma-imx.h
+++ b/include/linux/platform_data/dma-imx.h
@@ -41,6 +41,7 @@ enum sdma_peripheral_type {
41 IMX_DMATYPE_ESAI, /* ESAI */ 41 IMX_DMATYPE_ESAI, /* ESAI */
42 IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */ 42 IMX_DMATYPE_SSI_DUAL, /* SSI Dual FIFO */
43 IMX_DMATYPE_ASRC_SP, /* Shared ASRC */ 43 IMX_DMATYPE_ASRC_SP, /* Shared ASRC */
44 IMX_DMATYPE_SAI, /* SAI */
44}; 45};
45 46
46enum imx_dma_prio { 47enum imx_dma_prio {
diff --git a/include/linux/plist.h b/include/linux/plist.h
index 8b6c970cff6c..97883604a3c5 100644
--- a/include/linux/plist.h
+++ b/include/linux/plist.h
@@ -176,7 +176,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
176 * plist_for_each_entry - iterate over list of given type 176 * plist_for_each_entry - iterate over list of given type
177 * @pos: the type * to use as a loop counter 177 * @pos: the type * to use as a loop counter
178 * @head: the head for your list 178 * @head: the head for your list
179 * @mem: the name of the list_struct within the struct 179 * @mem: the name of the list_head within the struct
180 */ 180 */
181#define plist_for_each_entry(pos, head, mem) \ 181#define plist_for_each_entry(pos, head, mem) \
182 list_for_each_entry(pos, &(head)->node_list, mem.node_list) 182 list_for_each_entry(pos, &(head)->node_list, mem.node_list)
@@ -185,7 +185,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
185 * plist_for_each_entry_continue - continue iteration over list of given type 185 * plist_for_each_entry_continue - continue iteration over list of given type
186 * @pos: the type * to use as a loop cursor 186 * @pos: the type * to use as a loop cursor
187 * @head: the head for your list 187 * @head: the head for your list
188 * @m: the name of the list_struct within the struct 188 * @m: the name of the list_head within the struct
189 * 189 *
190 * Continue to iterate over list of given type, continuing after 190 * Continue to iterate over list of given type, continuing after
191 * the current position. 191 * the current position.
@@ -198,7 +198,7 @@ extern void plist_requeue(struct plist_node *node, struct plist_head *head);
198 * @pos: the type * to use as a loop counter 198 * @pos: the type * to use as a loop counter
199 * @n: another type * to use as temporary storage 199 * @n: another type * to use as temporary storage
200 * @head: the head for your list 200 * @head: the head for your list
201 * @m: the name of the list_struct within the struct 201 * @m: the name of the list_head within the struct
202 * 202 *
203 * Iterate over list of given type, safe against removal of list entry. 203 * Iterate over list of given type, safe against removal of list entry.
204 */ 204 */
@@ -229,7 +229,7 @@ static inline int plist_node_empty(const struct plist_node *node)
229 * plist_first_entry - get the struct for the first entry 229 * plist_first_entry - get the struct for the first entry
230 * @head: the &struct plist_head pointer 230 * @head: the &struct plist_head pointer
231 * @type: the type of the struct this is embedded in 231 * @type: the type of the struct this is embedded in
232 * @member: the name of the list_struct within the struct 232 * @member: the name of the list_head within the struct
233 */ 233 */
234#ifdef CONFIG_DEBUG_PI_LIST 234#ifdef CONFIG_DEBUG_PI_LIST
235# define plist_first_entry(head, type, member) \ 235# define plist_first_entry(head, type, member) \
@@ -246,7 +246,7 @@ static inline int plist_node_empty(const struct plist_node *node)
246 * plist_last_entry - get the struct for the last entry 246 * plist_last_entry - get the struct for the last entry
247 * @head: the &struct plist_head pointer 247 * @head: the &struct plist_head pointer
248 * @type: the type of the struct this is embedded in 248 * @type: the type of the struct this is embedded in
249 * @member: the name of the list_struct within the struct 249 * @member: the name of the list_head within the struct
250 */ 250 */
251#ifdef CONFIG_DEBUG_PI_LIST 251#ifdef CONFIG_DEBUG_PI_LIST
252# define plist_last_entry(head, type, member) \ 252# define plist_last_entry(head, type, member) \
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index eda4feede048..30e84d48bfea 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -118,6 +118,11 @@ static inline void pm_runtime_mark_last_busy(struct device *dev)
118 ACCESS_ONCE(dev->power.last_busy) = jiffies; 118 ACCESS_ONCE(dev->power.last_busy) = jiffies;
119} 119}
120 120
121static inline bool pm_runtime_is_irq_safe(struct device *dev)
122{
123 return dev->power.irq_safe;
124}
125
121#else /* !CONFIG_PM */ 126#else /* !CONFIG_PM */
122 127
123static inline bool queue_pm_work(struct work_struct *work) { return false; } 128static inline bool queue_pm_work(struct work_struct *work) { return false; }
@@ -164,6 +169,7 @@ static inline bool pm_runtime_enabled(struct device *dev) { return false; }
164 169
165static inline void pm_runtime_no_callbacks(struct device *dev) {} 170static inline void pm_runtime_no_callbacks(struct device *dev) {}
166static inline void pm_runtime_irq_safe(struct device *dev) {} 171static inline void pm_runtime_irq_safe(struct device *dev) {}
172static inline bool pm_runtime_is_irq_safe(struct device *dev) { return false; }
167 173
168static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; } 174static inline bool pm_runtime_callbacks_present(struct device *dev) { return false; }
169static inline void pm_runtime_mark_last_busy(struct device *dev) {} 175static inline void pm_runtime_mark_last_busy(struct device *dev) {}
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 9974975d40db..4af3fdc85b01 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -53,7 +53,8 @@ struct persistent_ram_zone {
53}; 53};
54 54
55struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size, 55struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
56 u32 sig, struct persistent_ram_ecc_info *ecc_info); 56 u32 sig, struct persistent_ram_ecc_info *ecc_info,
57 unsigned int memtype);
57void persistent_ram_free(struct persistent_ram_zone *prz); 58void persistent_ram_free(struct persistent_ram_zone *prz);
58void persistent_ram_zap(struct persistent_ram_zone *prz); 59void persistent_ram_zap(struct persistent_ram_zone *prz);
59 60
@@ -76,6 +77,7 @@ ssize_t persistent_ram_ecc_string(struct persistent_ram_zone *prz,
76struct ramoops_platform_data { 77struct ramoops_platform_data {
77 unsigned long mem_size; 78 unsigned long mem_size;
78 unsigned long mem_address; 79 unsigned long mem_address;
80 unsigned int mem_type;
79 unsigned long record_size; 81 unsigned long record_size;
80 unsigned long console_size; 82 unsigned long console_size;
81 unsigned long ftrace_size; 83 unsigned long ftrace_size;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index aa79b3c24f66..529bc946f450 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -241,7 +241,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
241 * list_entry_rcu - get the struct for this entry 241 * list_entry_rcu - get the struct for this entry
242 * @ptr: the &struct list_head pointer. 242 * @ptr: the &struct list_head pointer.
243 * @type: the type of the struct this is embedded in. 243 * @type: the type of the struct this is embedded in.
244 * @member: the name of the list_struct within the struct. 244 * @member: the name of the list_head within the struct.
245 * 245 *
246 * This primitive may safely run concurrently with the _rcu list-mutation 246 * This primitive may safely run concurrently with the _rcu list-mutation
247 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). 247 * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock().
@@ -278,7 +278,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
278 * list_first_or_null_rcu - get the first element from a list 278 * list_first_or_null_rcu - get the first element from a list
279 * @ptr: the list head to take the element from. 279 * @ptr: the list head to take the element from.
280 * @type: the type of the struct this is embedded in. 280 * @type: the type of the struct this is embedded in.
281 * @member: the name of the list_struct within the struct. 281 * @member: the name of the list_head within the struct.
282 * 282 *
283 * Note that if the list is empty, it returns NULL. 283 * Note that if the list is empty, it returns NULL.
284 * 284 *
@@ -296,7 +296,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
296 * list_for_each_entry_rcu - iterate over rcu list of given type 296 * list_for_each_entry_rcu - iterate over rcu list of given type
297 * @pos: the type * to use as a loop cursor. 297 * @pos: the type * to use as a loop cursor.
298 * @head: the head for your list. 298 * @head: the head for your list.
299 * @member: the name of the list_struct within the struct. 299 * @member: the name of the list_head within the struct.
300 * 300 *
301 * This list-traversal primitive may safely run concurrently with 301 * This list-traversal primitive may safely run concurrently with
302 * the _rcu list-mutation primitives such as list_add_rcu() 302 * the _rcu list-mutation primitives such as list_add_rcu()
@@ -311,7 +311,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
311 * list_for_each_entry_continue_rcu - continue iteration over list of given type 311 * list_for_each_entry_continue_rcu - continue iteration over list of given type
312 * @pos: the type * to use as a loop cursor. 312 * @pos: the type * to use as a loop cursor.
313 * @head: the head for your list. 313 * @head: the head for your list.
314 * @member: the name of the list_struct within the struct. 314 * @member: the name of the list_head within the struct.
315 * 315 *
316 * Continue to iterate over list of given type, continuing after 316 * Continue to iterate over list of given type, continuing after
317 * the current position. 317 * the current position.
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 832dcc9f86ec..9d87a37aecad 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -161,17 +161,12 @@ struct expander_device {
161}; 161};
162 162
163/* ---------- SATA device ---------- */ 163/* ---------- SATA device ---------- */
164enum ata_command_set {
165 ATA_COMMAND_SET = 0,
166 ATAPI_COMMAND_SET = 1,
167};
168
169#define ATA_RESP_FIS_SIZE 24 164#define ATA_RESP_FIS_SIZE 24
170 165
171struct sata_device { 166struct sata_device {
172 enum ata_command_set command_set; 167 unsigned int class;
173 struct smp_resp rps_resp; /* report_phy_sata_resp */ 168 struct smp_resp rps_resp; /* report_phy_sata_resp */
174 u8 port_no; /* port number, if this is a PM (Port) */ 169 u8 port_no; /* port number, if this is a PM (Port) */
175 170
176 struct ata_port *ap; 171 struct ata_port *ap;
177 struct ata_host ata_host; 172 struct ata_host ata_host;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index c8a462ef9a4e..e939d2b3757a 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -554,7 +554,7 @@ struct Scsi_Host {
554 * __devices is protected by the host_lock, but you should 554 * __devices is protected by the host_lock, but you should
555 * usually use scsi_device_lookup / shost_for_each_device 555 * usually use scsi_device_lookup / shost_for_each_device
556 * to access it and don't care about locking yourself. 556 * to access it and don't care about locking yourself.
557 * In the rare case of beeing in irq context you can use 557 * In the rare case of being in irq context you can use
558 * their __ prefixed variants with the lock held. NEVER 558 * their __ prefixed variants with the lock held. NEVER
559 * access this list directly from a driver. 559 * access this list directly from a driver.
560 */ 560 */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index ff4bd1b35246..6cfb841fea7c 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -43,15 +43,13 @@ struct extent_status;
43 { EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \ 43 { EXT4_GET_BLOCKS_METADATA_NOFAIL, "METADATA_NOFAIL" }, \
44 { EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \ 44 { EXT4_GET_BLOCKS_NO_NORMALIZE, "NO_NORMALIZE" }, \
45 { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \ 45 { EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
46 { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" }, \ 46 { EXT4_GET_BLOCKS_NO_LOCK, "NO_LOCK" })
47 { EXT4_GET_BLOCKS_NO_PUT_HOLE, "NO_PUT_HOLE" })
48 47
49#define show_mflags(flags) __print_flags(flags, "", \ 48#define show_mflags(flags) __print_flags(flags, "", \
50 { EXT4_MAP_NEW, "N" }, \ 49 { EXT4_MAP_NEW, "N" }, \
51 { EXT4_MAP_MAPPED, "M" }, \ 50 { EXT4_MAP_MAPPED, "M" }, \
52 { EXT4_MAP_UNWRITTEN, "U" }, \ 51 { EXT4_MAP_UNWRITTEN, "U" }, \
53 { EXT4_MAP_BOUNDARY, "B" }, \ 52 { EXT4_MAP_BOUNDARY, "B" })
54 { EXT4_MAP_FROM_CLUSTER, "C" })
55 53
56#define show_free_flags(flags) __print_flags(flags, "|", \ 54#define show_free_flags(flags) __print_flags(flags, "|", \
57 { EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \ 55 { EXT4_FREE_BLOCKS_METADATA, "METADATA" }, \
@@ -2452,15 +2450,14 @@ TRACE_EVENT(ext4_collapse_range,
2452 2450
2453TRACE_EVENT(ext4_es_shrink, 2451TRACE_EVENT(ext4_es_shrink,
2454 TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time, 2452 TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
2455 int skip_precached, int nr_skipped, int retried), 2453 int nr_skipped, int retried),
2456 2454
2457 TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried), 2455 TP_ARGS(sb, nr_shrunk, scan_time, nr_skipped, retried),
2458 2456
2459 TP_STRUCT__entry( 2457 TP_STRUCT__entry(
2460 __field( dev_t, dev ) 2458 __field( dev_t, dev )
2461 __field( int, nr_shrunk ) 2459 __field( int, nr_shrunk )
2462 __field( unsigned long long, scan_time ) 2460 __field( unsigned long long, scan_time )
2463 __field( int, skip_precached )
2464 __field( int, nr_skipped ) 2461 __field( int, nr_skipped )
2465 __field( int, retried ) 2462 __field( int, retried )
2466 ), 2463 ),
@@ -2469,16 +2466,14 @@ TRACE_EVENT(ext4_es_shrink,
2469 __entry->dev = sb->s_dev; 2466 __entry->dev = sb->s_dev;
2470 __entry->nr_shrunk = nr_shrunk; 2467 __entry->nr_shrunk = nr_shrunk;
2471 __entry->scan_time = div_u64(scan_time, 1000); 2468 __entry->scan_time = div_u64(scan_time, 1000);
2472 __entry->skip_precached = skip_precached;
2473 __entry->nr_skipped = nr_skipped; 2469 __entry->nr_skipped = nr_skipped;
2474 __entry->retried = retried; 2470 __entry->retried = retried;
2475 ), 2471 ),
2476 2472
2477 TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d " 2473 TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu "
2478 "nr_skipped %d retried %d", 2474 "nr_skipped %d retried %d",
2479 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk, 2475 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk,
2480 __entry->scan_time, __entry->skip_precached, 2476 __entry->scan_time, __entry->nr_skipped, __entry->retried)
2481 __entry->nr_skipped, __entry->retried)
2482); 2477);
2483 2478
2484#endif /* _TRACE_EXT4_H */ 2479#endif /* _TRACE_EXT4_H */
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 2f47824e7a36..611e1c5893b4 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -157,6 +157,7 @@ struct btrfs_ioctl_dev_replace_status_params {
157#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR 0 157#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NO_ERROR 0
158#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED 1 158#define BTRFS_IOCTL_DEV_REPLACE_RESULT_NOT_STARTED 1
159#define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED 2 159#define BTRFS_IOCTL_DEV_REPLACE_RESULT_ALREADY_STARTED 2
160#define BTRFS_IOCTL_DEV_REPLACE_RESULT_SCRUB_INPROGRESS 3
160struct btrfs_ioctl_dev_replace_args { 161struct btrfs_ioctl_dev_replace_args {
161 __u64 cmd; /* in */ 162 __u64 cmd; /* in */
162 __u64 result; /* out */ 163 __u64 result; /* out */
diff --git a/include/xen/interface/features.h b/include/xen/interface/features.h
index 14334d0161d5..131a6ccdba25 100644
--- a/include/xen/interface/features.h
+++ b/include/xen/interface/features.h
@@ -53,9 +53,6 @@
53/* operation as Dom0 is supported */ 53/* operation as Dom0 is supported */
54#define XENFEAT_dom0 11 54#define XENFEAT_dom0 11
55 55
56/* Xen also maps grant references at pfn = mfn */
57#define XENFEAT_grant_map_identity 12
58
59#define XENFEAT_NR_SUBMAPS 1 56#define XENFEAT_NR_SUBMAPS 1
60 57
61#endif /* __XEN_PUBLIC_FEATURES_H__ */ 58#endif /* __XEN_PUBLIC_FEATURES_H__ */
diff --git a/include/xen/interface/grant_table.h b/include/xen/interface/grant_table.h
index e40fae9bf11a..bcce56439d64 100644
--- a/include/xen/interface/grant_table.h
+++ b/include/xen/interface/grant_table.h
@@ -479,6 +479,25 @@ struct gnttab_get_version {
479DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version); 479DEFINE_GUEST_HANDLE_STRUCT(gnttab_get_version);
480 480
481/* 481/*
482 * Issue one or more cache maintenance operations on a portion of a
483 * page granted to the calling domain by a foreign domain.
484 */
485#define GNTTABOP_cache_flush 12
486struct gnttab_cache_flush {
487 union {
488 uint64_t dev_bus_addr;
489 grant_ref_t ref;
490 } a;
491 uint16_t offset; /* offset from start of grant */
492 uint16_t length; /* size within the grant */
493#define GNTTAB_CACHE_CLEAN (1<<0)
494#define GNTTAB_CACHE_INVAL (1<<1)
495#define GNTTAB_CACHE_SOURCE_GREF (1<<31)
496 uint32_t op;
497};
498DEFINE_GUEST_HANDLE_STRUCT(gnttab_cache_flush);
499
500/*
482 * Bitfield values for update_pin_status.flags. 501 * Bitfield values for update_pin_status.flags.
483 */ 502 */
484 /* Map the grant entry for access by I/O devices. */ 503 /* Map the grant entry for access by I/O devices. */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 136eceadeed1..bb263d0caab3 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -277,6 +277,10 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
277 if (!(cgrp->root->subsys_mask & (1 << ss->id))) 277 if (!(cgrp->root->subsys_mask & (1 << ss->id)))
278 return NULL; 278 return NULL;
279 279
280 /*
281 * This function is used while updating css associations and thus
282 * can't test the csses directly. Use ->child_subsys_mask.
283 */
280 while (cgroup_parent(cgrp) && 284 while (cgroup_parent(cgrp) &&
281 !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id))) 285 !(cgroup_parent(cgrp)->child_subsys_mask & (1 << ss->id)))
282 cgrp = cgroup_parent(cgrp); 286 cgrp = cgroup_parent(cgrp);
@@ -284,6 +288,39 @@ static struct cgroup_subsys_state *cgroup_e_css(struct cgroup *cgrp,
284 return cgroup_css(cgrp, ss); 288 return cgroup_css(cgrp, ss);
285} 289}
286 290
291/**
292 * cgroup_get_e_css - get a cgroup's effective css for the specified subsystem
293 * @cgrp: the cgroup of interest
294 * @ss: the subsystem of interest
295 *
296 * Find and get the effective css of @cgrp for @ss. The effective css is
297 * defined as the matching css of the nearest ancestor including self which
298 * has @ss enabled. If @ss is not mounted on the hierarchy @cgrp is on,
299 * the root css is returned, so this function always returns a valid css.
300 * The returned css must be put using css_put().
301 */
302struct cgroup_subsys_state *cgroup_get_e_css(struct cgroup *cgrp,
303 struct cgroup_subsys *ss)
304{
305 struct cgroup_subsys_state *css;
306
307 rcu_read_lock();
308
309 do {
310 css = cgroup_css(cgrp, ss);
311
312 if (css && css_tryget_online(css))
313 goto out_unlock;
314 cgrp = cgroup_parent(cgrp);
315 } while (cgrp);
316
317 css = init_css_set.subsys[ss->id];
318 css_get(css);
319out_unlock:
320 rcu_read_unlock();
321 return css;
322}
323
287/* convenient tests for these bits */ 324/* convenient tests for these bits */
288static inline bool cgroup_is_dead(const struct cgroup *cgrp) 325static inline bool cgroup_is_dead(const struct cgroup *cgrp)
289{ 326{
@@ -1019,31 +1056,30 @@ static void cgroup_put(struct cgroup *cgrp)
1019} 1056}
1020 1057
1021/** 1058/**
1022 * cgroup_refresh_child_subsys_mask - update child_subsys_mask 1059 * cgroup_calc_child_subsys_mask - calculate child_subsys_mask
1023 * @cgrp: the target cgroup 1060 * @cgrp: the target cgroup
1061 * @subtree_control: the new subtree_control mask to consider
1024 * 1062 *
1025 * On the default hierarchy, a subsystem may request other subsystems to be 1063 * On the default hierarchy, a subsystem may request other subsystems to be
1026 * enabled together through its ->depends_on mask. In such cases, more 1064 * enabled together through its ->depends_on mask. In such cases, more
1027 * subsystems than specified in "cgroup.subtree_control" may be enabled. 1065 * subsystems than specified in "cgroup.subtree_control" may be enabled.
1028 * 1066 *
1029 * This function determines which subsystems need to be enabled given the 1067 * This function calculates which subsystems need to be enabled if
1030 * current @cgrp->subtree_control and records it in 1068 * @subtree_control is to be applied to @cgrp. The returned mask is always
1031 * @cgrp->child_subsys_mask. The resulting mask is always a superset of 1069 * a superset of @subtree_control and follows the usual hierarchy rules.
1032 * @cgrp->subtree_control and follows the usual hierarchy rules.
1033 */ 1070 */
1034static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp) 1071static unsigned int cgroup_calc_child_subsys_mask(struct cgroup *cgrp,
1072 unsigned int subtree_control)
1035{ 1073{
1036 struct cgroup *parent = cgroup_parent(cgrp); 1074 struct cgroup *parent = cgroup_parent(cgrp);
1037 unsigned int cur_ss_mask = cgrp->subtree_control; 1075 unsigned int cur_ss_mask = subtree_control;
1038 struct cgroup_subsys *ss; 1076 struct cgroup_subsys *ss;
1039 int ssid; 1077 int ssid;
1040 1078
1041 lockdep_assert_held(&cgroup_mutex); 1079 lockdep_assert_held(&cgroup_mutex);
1042 1080
1043 if (!cgroup_on_dfl(cgrp)) { 1081 if (!cgroup_on_dfl(cgrp))
1044 cgrp->child_subsys_mask = cur_ss_mask; 1082 return cur_ss_mask;
1045 return;
1046 }
1047 1083
1048 while (true) { 1084 while (true) {
1049 unsigned int new_ss_mask = cur_ss_mask; 1085 unsigned int new_ss_mask = cur_ss_mask;
@@ -1067,7 +1103,20 @@ static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
1067 cur_ss_mask = new_ss_mask; 1103 cur_ss_mask = new_ss_mask;
1068 } 1104 }
1069 1105
1070 cgrp->child_subsys_mask = cur_ss_mask; 1106 return cur_ss_mask;
1107}
1108
1109/**
1110 * cgroup_refresh_child_subsys_mask - update child_subsys_mask
1111 * @cgrp: the target cgroup
1112 *
1113 * Update @cgrp->child_subsys_mask according to the current
1114 * @cgrp->subtree_control using cgroup_calc_child_subsys_mask().
1115 */
1116static void cgroup_refresh_child_subsys_mask(struct cgroup *cgrp)
1117{
1118 cgrp->child_subsys_mask =
1119 cgroup_calc_child_subsys_mask(cgrp, cgrp->subtree_control);
1071} 1120}
1072 1121
1073/** 1122/**
@@ -2641,7 +2690,7 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2641 loff_t off) 2690 loff_t off)
2642{ 2691{
2643 unsigned int enable = 0, disable = 0; 2692 unsigned int enable = 0, disable = 0;
2644 unsigned int css_enable, css_disable, old_ctrl, new_ctrl; 2693 unsigned int css_enable, css_disable, old_sc, new_sc, old_ss, new_ss;
2645 struct cgroup *cgrp, *child; 2694 struct cgroup *cgrp, *child;
2646 struct cgroup_subsys *ss; 2695 struct cgroup_subsys *ss;
2647 char *tok; 2696 char *tok;
@@ -2693,36 +2742,6 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2693 ret = -ENOENT; 2742 ret = -ENOENT;
2694 goto out_unlock; 2743 goto out_unlock;
2695 } 2744 }
2696
2697 /*
2698 * @ss is already enabled through dependency and
2699 * we'll just make it visible. Skip draining.
2700 */
2701 if (cgrp->child_subsys_mask & (1 << ssid))
2702 continue;
2703
2704 /*
2705 * Because css offlining is asynchronous, userland
2706 * might try to re-enable the same controller while
2707 * the previous instance is still around. In such
2708 * cases, wait till it's gone using offline_waitq.
2709 */
2710 cgroup_for_each_live_child(child, cgrp) {
2711 DEFINE_WAIT(wait);
2712
2713 if (!cgroup_css(child, ss))
2714 continue;
2715
2716 cgroup_get(child);
2717 prepare_to_wait(&child->offline_waitq, &wait,
2718 TASK_UNINTERRUPTIBLE);
2719 cgroup_kn_unlock(of->kn);
2720 schedule();
2721 finish_wait(&child->offline_waitq, &wait);
2722 cgroup_put(child);
2723
2724 return restart_syscall();
2725 }
2726 } else if (disable & (1 << ssid)) { 2745 } else if (disable & (1 << ssid)) {
2727 if (!(cgrp->subtree_control & (1 << ssid))) { 2746 if (!(cgrp->subtree_control & (1 << ssid))) {
2728 disable &= ~(1 << ssid); 2747 disable &= ~(1 << ssid);
@@ -2758,19 +2777,48 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2758 * subsystems than specified may need to be enabled or disabled 2777 * subsystems than specified may need to be enabled or disabled
2759 * depending on subsystem dependencies. 2778 * depending on subsystem dependencies.
2760 */ 2779 */
2761 cgrp->subtree_control |= enable; 2780 old_sc = cgrp->subtree_control;
2762 cgrp->subtree_control &= ~disable; 2781 old_ss = cgrp->child_subsys_mask;
2782 new_sc = (old_sc | enable) & ~disable;
2783 new_ss = cgroup_calc_child_subsys_mask(cgrp, new_sc);
2763 2784
2764 old_ctrl = cgrp->child_subsys_mask; 2785 css_enable = ~old_ss & new_ss;
2765 cgroup_refresh_child_subsys_mask(cgrp); 2786 css_disable = old_ss & ~new_ss;
2766 new_ctrl = cgrp->child_subsys_mask;
2767
2768 css_enable = ~old_ctrl & new_ctrl;
2769 css_disable = old_ctrl & ~new_ctrl;
2770 enable |= css_enable; 2787 enable |= css_enable;
2771 disable |= css_disable; 2788 disable |= css_disable;
2772 2789
2773 /* 2790 /*
2791 * Because css offlining is asynchronous, userland might try to
2792 * re-enable the same controller while the previous instance is
2793 * still around. In such cases, wait till it's gone using
2794 * offline_waitq.
2795 */
2796 for_each_subsys(ss, ssid) {
2797 if (!(css_enable & (1 << ssid)))
2798 continue;
2799
2800 cgroup_for_each_live_child(child, cgrp) {
2801 DEFINE_WAIT(wait);
2802
2803 if (!cgroup_css(child, ss))
2804 continue;
2805
2806 cgroup_get(child);
2807 prepare_to_wait(&child->offline_waitq, &wait,
2808 TASK_UNINTERRUPTIBLE);
2809 cgroup_kn_unlock(of->kn);
2810 schedule();
2811 finish_wait(&child->offline_waitq, &wait);
2812 cgroup_put(child);
2813
2814 return restart_syscall();
2815 }
2816 }
2817
2818 cgrp->subtree_control = new_sc;
2819 cgrp->child_subsys_mask = new_ss;
2820
2821 /*
2774 * Create new csses or make the existing ones visible. A css is 2822 * Create new csses or make the existing ones visible. A css is
2775 * created invisible if it's being implicitly enabled through 2823 * created invisible if it's being implicitly enabled through
2776 * dependency. An invisible css is made visible when the userland 2824 * dependency. An invisible css is made visible when the userland
@@ -2825,6 +2873,24 @@ static ssize_t cgroup_subtree_control_write(struct kernfs_open_file *of,
2825 } 2873 }
2826 } 2874 }
2827 2875
2876 /*
2877 * The effective csses of all the descendants (excluding @cgrp) may
2878 * have changed. Subsystems can optionally subscribe to this event
2879 * by implementing ->css_e_css_changed() which is invoked if any of
2880 * the effective csses seen from the css's cgroup may have changed.
2881 */
2882 for_each_subsys(ss, ssid) {
2883 struct cgroup_subsys_state *this_css = cgroup_css(cgrp, ss);
2884 struct cgroup_subsys_state *css;
2885
2886 if (!ss->css_e_css_changed || !this_css)
2887 continue;
2888
2889 css_for_each_descendant_pre(css, this_css)
2890 if (css != this_css)
2891 ss->css_e_css_changed(css);
2892 }
2893
2828 kernfs_activate(cgrp->kn); 2894 kernfs_activate(cgrp->kn);
2829 ret = 0; 2895 ret = 0;
2830out_unlock: 2896out_unlock:
@@ -2832,9 +2898,8 @@ out_unlock:
2832 return ret ?: nbytes; 2898 return ret ?: nbytes;
2833 2899
2834err_undo_css: 2900err_undo_css:
2835 cgrp->subtree_control &= ~enable; 2901 cgrp->subtree_control = old_sc;
2836 cgrp->subtree_control |= disable; 2902 cgrp->child_subsys_mask = old_ss;
2837 cgroup_refresh_child_subsys_mask(cgrp);
2838 2903
2839 for_each_subsys(ss, ssid) { 2904 for_each_subsys(ss, ssid) {
2840 if (!(enable & (1 << ssid))) 2905 if (!(enable & (1 << ssid)))
@@ -4370,6 +4435,8 @@ static void css_release_work_fn(struct work_struct *work)
4370 if (ss) { 4435 if (ss) {
4371 /* css release path */ 4436 /* css release path */
4372 cgroup_idr_remove(&ss->css_idr, css->id); 4437 cgroup_idr_remove(&ss->css_idr, css->id);
4438 if (ss->css_released)
4439 ss->css_released(css);
4373 } else { 4440 } else {
4374 /* cgroup release path */ 4441 /* cgroup release path */
4375 cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id); 4442 cgroup_idr_remove(&cgrp->root->cgroup_idr, cgrp->id);
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 723cfc9d0ad7..64b257f6bca2 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -248,34 +248,34 @@ static struct cpuset top_cpuset = {
248 if (is_cpuset_online(((des_cs) = css_cs((pos_css))))) 248 if (is_cpuset_online(((des_cs) = css_cs((pos_css)))))
249 249
250/* 250/*
251 * There are two global mutexes guarding cpuset structures - cpuset_mutex 251 * There are two global locks guarding cpuset structures - cpuset_mutex and
252 * and callback_mutex. The latter may nest inside the former. We also 252 * callback_lock. We also require taking task_lock() when dereferencing a
253 * require taking task_lock() when dereferencing a task's cpuset pointer. 253 * task's cpuset pointer. See "The task_lock() exception", at the end of this
254 * See "The task_lock() exception", at the end of this comment. 254 * comment.
255 * 255 *
256 * A task must hold both mutexes to modify cpusets. If a task holds 256 * A task must hold both locks to modify cpusets. If a task holds
257 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it 257 * cpuset_mutex, then it blocks others wanting that mutex, ensuring that it
258 * is the only task able to also acquire callback_mutex and be able to 258 * is the only task able to also acquire callback_lock and be able to
259 * modify cpusets. It can perform various checks on the cpuset structure 259 * modify cpusets. It can perform various checks on the cpuset structure
260 * first, knowing nothing will change. It can also allocate memory while 260 * first, knowing nothing will change. It can also allocate memory while
261 * just holding cpuset_mutex. While it is performing these checks, various 261 * just holding cpuset_mutex. While it is performing these checks, various
262 * callback routines can briefly acquire callback_mutex to query cpusets. 262 * callback routines can briefly acquire callback_lock to query cpusets.
263 * Once it is ready to make the changes, it takes callback_mutex, blocking 263 * Once it is ready to make the changes, it takes callback_lock, blocking
264 * everyone else. 264 * everyone else.
265 * 265 *
266 * Calls to the kernel memory allocator can not be made while holding 266 * Calls to the kernel memory allocator can not be made while holding
267 * callback_mutex, as that would risk double tripping on callback_mutex 267 * callback_lock, as that would risk double tripping on callback_lock
268 * from one of the callbacks into the cpuset code from within 268 * from one of the callbacks into the cpuset code from within
269 * __alloc_pages(). 269 * __alloc_pages().
270 * 270 *
271 * If a task is only holding callback_mutex, then it has read-only 271 * If a task is only holding callback_lock, then it has read-only
272 * access to cpusets. 272 * access to cpusets.
273 * 273 *
274 * Now, the task_struct fields mems_allowed and mempolicy may be changed 274 * Now, the task_struct fields mems_allowed and mempolicy may be changed
275 * by other task, we use alloc_lock in the task_struct fields to protect 275 * by other task, we use alloc_lock in the task_struct fields to protect
276 * them. 276 * them.
277 * 277 *
278 * The cpuset_common_file_read() handlers only hold callback_mutex across 278 * The cpuset_common_file_read() handlers only hold callback_lock across
279 * small pieces of code, such as when reading out possibly multi-word 279 * small pieces of code, such as when reading out possibly multi-word
280 * cpumasks and nodemasks. 280 * cpumasks and nodemasks.
281 * 281 *
@@ -284,7 +284,7 @@ static struct cpuset top_cpuset = {
284 */ 284 */
285 285
286static DEFINE_MUTEX(cpuset_mutex); 286static DEFINE_MUTEX(cpuset_mutex);
287static DEFINE_MUTEX(callback_mutex); 287static DEFINE_SPINLOCK(callback_lock);
288 288
289/* 289/*
290 * CPU / memory hotplug is handled asynchronously. 290 * CPU / memory hotplug is handled asynchronously.
@@ -329,7 +329,7 @@ static struct file_system_type cpuset_fs_type = {
329 * One way or another, we guarantee to return some non-empty subset 329 * One way or another, we guarantee to return some non-empty subset
330 * of cpu_online_mask. 330 * of cpu_online_mask.
331 * 331 *
332 * Call with callback_mutex held. 332 * Call with callback_lock or cpuset_mutex held.
333 */ 333 */
334static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask) 334static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
335{ 335{
@@ -347,7 +347,7 @@ static void guarantee_online_cpus(struct cpuset *cs, struct cpumask *pmask)
347 * One way or another, we guarantee to return some non-empty subset 347 * One way or another, we guarantee to return some non-empty subset
348 * of node_states[N_MEMORY]. 348 * of node_states[N_MEMORY].
349 * 349 *
350 * Call with callback_mutex held. 350 * Call with callback_lock or cpuset_mutex held.
351 */ 351 */
352static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask) 352static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
353{ 353{
@@ -359,7 +359,7 @@ static void guarantee_online_mems(struct cpuset *cs, nodemask_t *pmask)
359/* 359/*
360 * update task's spread flag if cpuset's page/slab spread flag is set 360 * update task's spread flag if cpuset's page/slab spread flag is set
361 * 361 *
362 * Called with callback_mutex/cpuset_mutex held 362 * Call with callback_lock or cpuset_mutex held.
363 */ 363 */
364static void cpuset_update_task_spread_flag(struct cpuset *cs, 364static void cpuset_update_task_spread_flag(struct cpuset *cs,
365 struct task_struct *tsk) 365 struct task_struct *tsk)
@@ -886,9 +886,9 @@ static void update_cpumasks_hier(struct cpuset *cs, struct cpumask *new_cpus)
886 continue; 886 continue;
887 rcu_read_unlock(); 887 rcu_read_unlock();
888 888
889 mutex_lock(&callback_mutex); 889 spin_lock_irq(&callback_lock);
890 cpumask_copy(cp->effective_cpus, new_cpus); 890 cpumask_copy(cp->effective_cpus, new_cpus);
891 mutex_unlock(&callback_mutex); 891 spin_unlock_irq(&callback_lock);
892 892
893 WARN_ON(!cgroup_on_dfl(cp->css.cgroup) && 893 WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
894 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus)); 894 !cpumask_equal(cp->cpus_allowed, cp->effective_cpus));
@@ -953,9 +953,9 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs,
953 if (retval < 0) 953 if (retval < 0)
954 return retval; 954 return retval;
955 955
956 mutex_lock(&callback_mutex); 956 spin_lock_irq(&callback_lock);
957 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed); 957 cpumask_copy(cs->cpus_allowed, trialcs->cpus_allowed);
958 mutex_unlock(&callback_mutex); 958 spin_unlock_irq(&callback_lock);
959 959
960 /* use trialcs->cpus_allowed as a temp variable */ 960 /* use trialcs->cpus_allowed as a temp variable */
961 update_cpumasks_hier(cs, trialcs->cpus_allowed); 961 update_cpumasks_hier(cs, trialcs->cpus_allowed);
@@ -1142,9 +1142,9 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1142 continue; 1142 continue;
1143 rcu_read_unlock(); 1143 rcu_read_unlock();
1144 1144
1145 mutex_lock(&callback_mutex); 1145 spin_lock_irq(&callback_lock);
1146 cp->effective_mems = *new_mems; 1146 cp->effective_mems = *new_mems;
1147 mutex_unlock(&callback_mutex); 1147 spin_unlock_irq(&callback_lock);
1148 1148
1149 WARN_ON(!cgroup_on_dfl(cp->css.cgroup) && 1149 WARN_ON(!cgroup_on_dfl(cp->css.cgroup) &&
1150 !nodes_equal(cp->mems_allowed, cp->effective_mems)); 1150 !nodes_equal(cp->mems_allowed, cp->effective_mems));
@@ -1165,7 +1165,7 @@ static void update_nodemasks_hier(struct cpuset *cs, nodemask_t *new_mems)
1165 * mempolicies and if the cpuset is marked 'memory_migrate', 1165 * mempolicies and if the cpuset is marked 'memory_migrate',
1166 * migrate the tasks pages to the new memory. 1166 * migrate the tasks pages to the new memory.
1167 * 1167 *
1168 * Call with cpuset_mutex held. May take callback_mutex during call. 1168 * Call with cpuset_mutex held. May take callback_lock during call.
1169 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, 1169 * Will take tasklist_lock, scan tasklist for tasks in cpuset cs,
1170 * lock each such tasks mm->mmap_sem, scan its vma's and rebind 1170 * lock each such tasks mm->mmap_sem, scan its vma's and rebind
1171 * their mempolicies to the cpusets new mems_allowed. 1171 * their mempolicies to the cpusets new mems_allowed.
@@ -1212,9 +1212,9 @@ static int update_nodemask(struct cpuset *cs, struct cpuset *trialcs,
1212 if (retval < 0) 1212 if (retval < 0)
1213 goto done; 1213 goto done;
1214 1214
1215 mutex_lock(&callback_mutex); 1215 spin_lock_irq(&callback_lock);
1216 cs->mems_allowed = trialcs->mems_allowed; 1216 cs->mems_allowed = trialcs->mems_allowed;
1217 mutex_unlock(&callback_mutex); 1217 spin_unlock_irq(&callback_lock);
1218 1218
1219 /* use trialcs->mems_allowed as a temp variable */ 1219 /* use trialcs->mems_allowed as a temp variable */
1220 update_nodemasks_hier(cs, &cs->mems_allowed); 1220 update_nodemasks_hier(cs, &cs->mems_allowed);
@@ -1305,9 +1305,9 @@ static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs,
1305 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs)) 1305 spread_flag_changed = ((is_spread_slab(cs) != is_spread_slab(trialcs))
1306 || (is_spread_page(cs) != is_spread_page(trialcs))); 1306 || (is_spread_page(cs) != is_spread_page(trialcs)));
1307 1307
1308 mutex_lock(&callback_mutex); 1308 spin_lock_irq(&callback_lock);
1309 cs->flags = trialcs->flags; 1309 cs->flags = trialcs->flags;
1310 mutex_unlock(&callback_mutex); 1310 spin_unlock_irq(&callback_lock);
1311 1311
1312 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed) 1312 if (!cpumask_empty(trialcs->cpus_allowed) && balance_flag_changed)
1313 rebuild_sched_domains_locked(); 1313 rebuild_sched_domains_locked();
@@ -1714,7 +1714,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1714 count = seq_get_buf(sf, &buf); 1714 count = seq_get_buf(sf, &buf);
1715 s = buf; 1715 s = buf;
1716 1716
1717 mutex_lock(&callback_mutex); 1717 spin_lock_irq(&callback_lock);
1718 1718
1719 switch (type) { 1719 switch (type) {
1720 case FILE_CPULIST: 1720 case FILE_CPULIST:
@@ -1741,7 +1741,7 @@ static int cpuset_common_seq_show(struct seq_file *sf, void *v)
1741 seq_commit(sf, -1); 1741 seq_commit(sf, -1);
1742 } 1742 }
1743out_unlock: 1743out_unlock:
1744 mutex_unlock(&callback_mutex); 1744 spin_unlock_irq(&callback_lock);
1745 return ret; 1745 return ret;
1746} 1746}
1747 1747
@@ -1958,12 +1958,12 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
1958 1958
1959 cpuset_inc(); 1959 cpuset_inc();
1960 1960
1961 mutex_lock(&callback_mutex); 1961 spin_lock_irq(&callback_lock);
1962 if (cgroup_on_dfl(cs->css.cgroup)) { 1962 if (cgroup_on_dfl(cs->css.cgroup)) {
1963 cpumask_copy(cs->effective_cpus, parent->effective_cpus); 1963 cpumask_copy(cs->effective_cpus, parent->effective_cpus);
1964 cs->effective_mems = parent->effective_mems; 1964 cs->effective_mems = parent->effective_mems;
1965 } 1965 }
1966 mutex_unlock(&callback_mutex); 1966 spin_unlock_irq(&callback_lock);
1967 1967
1968 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags)) 1968 if (!test_bit(CGRP_CPUSET_CLONE_CHILDREN, &css->cgroup->flags))
1969 goto out_unlock; 1969 goto out_unlock;
@@ -1990,10 +1990,10 @@ static int cpuset_css_online(struct cgroup_subsys_state *css)
1990 } 1990 }
1991 rcu_read_unlock(); 1991 rcu_read_unlock();
1992 1992
1993 mutex_lock(&callback_mutex); 1993 spin_lock_irq(&callback_lock);
1994 cs->mems_allowed = parent->mems_allowed; 1994 cs->mems_allowed = parent->mems_allowed;
1995 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed); 1995 cpumask_copy(cs->cpus_allowed, parent->cpus_allowed);
1996 mutex_unlock(&callback_mutex); 1996 spin_unlock_irq(&callback_lock);
1997out_unlock: 1997out_unlock:
1998 mutex_unlock(&cpuset_mutex); 1998 mutex_unlock(&cpuset_mutex);
1999 return 0; 1999 return 0;
@@ -2032,7 +2032,7 @@ static void cpuset_css_free(struct cgroup_subsys_state *css)
2032static void cpuset_bind(struct cgroup_subsys_state *root_css) 2032static void cpuset_bind(struct cgroup_subsys_state *root_css)
2033{ 2033{
2034 mutex_lock(&cpuset_mutex); 2034 mutex_lock(&cpuset_mutex);
2035 mutex_lock(&callback_mutex); 2035 spin_lock_irq(&callback_lock);
2036 2036
2037 if (cgroup_on_dfl(root_css->cgroup)) { 2037 if (cgroup_on_dfl(root_css->cgroup)) {
2038 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask); 2038 cpumask_copy(top_cpuset.cpus_allowed, cpu_possible_mask);
@@ -2043,7 +2043,7 @@ static void cpuset_bind(struct cgroup_subsys_state *root_css)
2043 top_cpuset.mems_allowed = top_cpuset.effective_mems; 2043 top_cpuset.mems_allowed = top_cpuset.effective_mems;
2044 } 2044 }
2045 2045
2046 mutex_unlock(&callback_mutex); 2046 spin_unlock_irq(&callback_lock);
2047 mutex_unlock(&cpuset_mutex); 2047 mutex_unlock(&cpuset_mutex);
2048} 2048}
2049 2049
@@ -2128,12 +2128,12 @@ hotplug_update_tasks_legacy(struct cpuset *cs,
2128{ 2128{
2129 bool is_empty; 2129 bool is_empty;
2130 2130
2131 mutex_lock(&callback_mutex); 2131 spin_lock_irq(&callback_lock);
2132 cpumask_copy(cs->cpus_allowed, new_cpus); 2132 cpumask_copy(cs->cpus_allowed, new_cpus);
2133 cpumask_copy(cs->effective_cpus, new_cpus); 2133 cpumask_copy(cs->effective_cpus, new_cpus);
2134 cs->mems_allowed = *new_mems; 2134 cs->mems_allowed = *new_mems;
2135 cs->effective_mems = *new_mems; 2135 cs->effective_mems = *new_mems;
2136 mutex_unlock(&callback_mutex); 2136 spin_unlock_irq(&callback_lock);
2137 2137
2138 /* 2138 /*
2139 * Don't call update_tasks_cpumask() if the cpuset becomes empty, 2139 * Don't call update_tasks_cpumask() if the cpuset becomes empty,
@@ -2170,10 +2170,10 @@ hotplug_update_tasks(struct cpuset *cs,
2170 if (nodes_empty(*new_mems)) 2170 if (nodes_empty(*new_mems))
2171 *new_mems = parent_cs(cs)->effective_mems; 2171 *new_mems = parent_cs(cs)->effective_mems;
2172 2172
2173 mutex_lock(&callback_mutex); 2173 spin_lock_irq(&callback_lock);
2174 cpumask_copy(cs->effective_cpus, new_cpus); 2174 cpumask_copy(cs->effective_cpus, new_cpus);
2175 cs->effective_mems = *new_mems; 2175 cs->effective_mems = *new_mems;
2176 mutex_unlock(&callback_mutex); 2176 spin_unlock_irq(&callback_lock);
2177 2177
2178 if (cpus_updated) 2178 if (cpus_updated)
2179 update_tasks_cpumask(cs); 2179 update_tasks_cpumask(cs);
@@ -2259,21 +2259,21 @@ static void cpuset_hotplug_workfn(struct work_struct *work)
2259 2259
2260 /* synchronize cpus_allowed to cpu_active_mask */ 2260 /* synchronize cpus_allowed to cpu_active_mask */
2261 if (cpus_updated) { 2261 if (cpus_updated) {
2262 mutex_lock(&callback_mutex); 2262 spin_lock_irq(&callback_lock);
2263 if (!on_dfl) 2263 if (!on_dfl)
2264 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus); 2264 cpumask_copy(top_cpuset.cpus_allowed, &new_cpus);
2265 cpumask_copy(top_cpuset.effective_cpus, &new_cpus); 2265 cpumask_copy(top_cpuset.effective_cpus, &new_cpus);
2266 mutex_unlock(&callback_mutex); 2266 spin_unlock_irq(&callback_lock);
2267 /* we don't mess with cpumasks of tasks in top_cpuset */ 2267 /* we don't mess with cpumasks of tasks in top_cpuset */
2268 } 2268 }
2269 2269
2270 /* synchronize mems_allowed to N_MEMORY */ 2270 /* synchronize mems_allowed to N_MEMORY */
2271 if (mems_updated) { 2271 if (mems_updated) {
2272 mutex_lock(&callback_mutex); 2272 spin_lock_irq(&callback_lock);
2273 if (!on_dfl) 2273 if (!on_dfl)
2274 top_cpuset.mems_allowed = new_mems; 2274 top_cpuset.mems_allowed = new_mems;
2275 top_cpuset.effective_mems = new_mems; 2275 top_cpuset.effective_mems = new_mems;
2276 mutex_unlock(&callback_mutex); 2276 spin_unlock_irq(&callback_lock);
2277 update_tasks_nodemask(&top_cpuset); 2277 update_tasks_nodemask(&top_cpuset);
2278 } 2278 }
2279 2279
@@ -2366,11 +2366,13 @@ void __init cpuset_init_smp(void)
2366 2366
2367void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask) 2367void cpuset_cpus_allowed(struct task_struct *tsk, struct cpumask *pmask)
2368{ 2368{
2369 mutex_lock(&callback_mutex); 2369 unsigned long flags;
2370
2371 spin_lock_irqsave(&callback_lock, flags);
2370 rcu_read_lock(); 2372 rcu_read_lock();
2371 guarantee_online_cpus(task_cs(tsk), pmask); 2373 guarantee_online_cpus(task_cs(tsk), pmask);
2372 rcu_read_unlock(); 2374 rcu_read_unlock();
2373 mutex_unlock(&callback_mutex); 2375 spin_unlock_irqrestore(&callback_lock, flags);
2374} 2376}
2375 2377
2376void cpuset_cpus_allowed_fallback(struct task_struct *tsk) 2378void cpuset_cpus_allowed_fallback(struct task_struct *tsk)
@@ -2416,12 +2418,13 @@ void cpuset_init_current_mems_allowed(void)
2416nodemask_t cpuset_mems_allowed(struct task_struct *tsk) 2418nodemask_t cpuset_mems_allowed(struct task_struct *tsk)
2417{ 2419{
2418 nodemask_t mask; 2420 nodemask_t mask;
2421 unsigned long flags;
2419 2422
2420 mutex_lock(&callback_mutex); 2423 spin_lock_irqsave(&callback_lock, flags);
2421 rcu_read_lock(); 2424 rcu_read_lock();
2422 guarantee_online_mems(task_cs(tsk), &mask); 2425 guarantee_online_mems(task_cs(tsk), &mask);
2423 rcu_read_unlock(); 2426 rcu_read_unlock();
2424 mutex_unlock(&callback_mutex); 2427 spin_unlock_irqrestore(&callback_lock, flags);
2425 2428
2426 return mask; 2429 return mask;
2427} 2430}
@@ -2440,7 +2443,7 @@ int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
2440/* 2443/*
2441 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or 2444 * nearest_hardwall_ancestor() - Returns the nearest mem_exclusive or
2442 * mem_hardwall ancestor to the specified cpuset. Call holding 2445 * mem_hardwall ancestor to the specified cpuset. Call holding
2443 * callback_mutex. If no ancestor is mem_exclusive or mem_hardwall 2446 * callback_lock. If no ancestor is mem_exclusive or mem_hardwall
2444 * (an unusual configuration), then returns the root cpuset. 2447 * (an unusual configuration), then returns the root cpuset.
2445 */ 2448 */
2446static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs) 2449static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
@@ -2451,7 +2454,7 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2451} 2454}
2452 2455
2453/** 2456/**
2454 * cpuset_node_allowed_softwall - Can we allocate on a memory node? 2457 * cpuset_node_allowed - Can we allocate on a memory node?
2455 * @node: is this an allowed node? 2458 * @node: is this an allowed node?
2456 * @gfp_mask: memory allocation flags 2459 * @gfp_mask: memory allocation flags
2457 * 2460 *
@@ -2463,13 +2466,6 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2463 * flag, yes. 2466 * flag, yes.
2464 * Otherwise, no. 2467 * Otherwise, no.
2465 * 2468 *
2466 * If __GFP_HARDWALL is set, cpuset_node_allowed_softwall() reduces to
2467 * cpuset_node_allowed_hardwall(). Otherwise, cpuset_node_allowed_softwall()
2468 * might sleep, and might allow a node from an enclosing cpuset.
2469 *
2470 * cpuset_node_allowed_hardwall() only handles the simpler case of hardwall
2471 * cpusets, and never sleeps.
2472 *
2473 * The __GFP_THISNODE placement logic is really handled elsewhere, 2469 * The __GFP_THISNODE placement logic is really handled elsewhere,
2474 * by forcibly using a zonelist starting at a specified node, and by 2470 * by forcibly using a zonelist starting at a specified node, and by
2475 * (in get_page_from_freelist()) refusing to consider the zones for 2471 * (in get_page_from_freelist()) refusing to consider the zones for
@@ -2482,13 +2478,12 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2482 * GFP_KERNEL allocations are not so marked, so can escape to the 2478 * GFP_KERNEL allocations are not so marked, so can escape to the
2483 * nearest enclosing hardwalled ancestor cpuset. 2479 * nearest enclosing hardwalled ancestor cpuset.
2484 * 2480 *
2485 * Scanning up parent cpusets requires callback_mutex. The 2481 * Scanning up parent cpusets requires callback_lock. The
2486 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit 2482 * __alloc_pages() routine only calls here with __GFP_HARDWALL bit
2487 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the 2483 * _not_ set if it's a GFP_KERNEL allocation, and all nodes in the
2488 * current tasks mems_allowed came up empty on the first pass over 2484 * current tasks mems_allowed came up empty on the first pass over
2489 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the 2485 * the zonelist. So only GFP_KERNEL allocations, if all nodes in the
2490 * cpuset are short of memory, might require taking the callback_mutex 2486 * cpuset are short of memory, might require taking the callback_lock.
2491 * mutex.
2492 * 2487 *
2493 * The first call here from mm/page_alloc:get_page_from_freelist() 2488 * The first call here from mm/page_alloc:get_page_from_freelist()
2494 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets, 2489 * has __GFP_HARDWALL set in gfp_mask, enforcing hardwall cpusets,
@@ -2505,20 +2500,15 @@ static struct cpuset *nearest_hardwall_ancestor(struct cpuset *cs)
2505 * TIF_MEMDIE - any node ok 2500 * TIF_MEMDIE - any node ok
2506 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok 2501 * GFP_KERNEL - any node in enclosing hardwalled cpuset ok
2507 * GFP_USER - only nodes in current tasks mems allowed ok. 2502 * GFP_USER - only nodes in current tasks mems allowed ok.
2508 *
2509 * Rule:
2510 * Don't call cpuset_node_allowed_softwall if you can't sleep, unless you
2511 * pass in the __GFP_HARDWALL flag set in gfp_flag, which disables
2512 * the code that might scan up ancestor cpusets and sleep.
2513 */ 2503 */
2514int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask) 2504int __cpuset_node_allowed(int node, gfp_t gfp_mask)
2515{ 2505{
2516 struct cpuset *cs; /* current cpuset ancestors */ 2506 struct cpuset *cs; /* current cpuset ancestors */
2517 int allowed; /* is allocation in zone z allowed? */ 2507 int allowed; /* is allocation in zone z allowed? */
2508 unsigned long flags;
2518 2509
2519 if (in_interrupt() || (gfp_mask & __GFP_THISNODE)) 2510 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2520 return 1; 2511 return 1;
2521 might_sleep_if(!(gfp_mask & __GFP_HARDWALL));
2522 if (node_isset(node, current->mems_allowed)) 2512 if (node_isset(node, current->mems_allowed))
2523 return 1; 2513 return 1;
2524 /* 2514 /*
@@ -2534,55 +2524,17 @@ int __cpuset_node_allowed_softwall(int node, gfp_t gfp_mask)
2534 return 1; 2524 return 1;
2535 2525
2536 /* Not hardwall and node outside mems_allowed: scan up cpusets */ 2526 /* Not hardwall and node outside mems_allowed: scan up cpusets */
2537 mutex_lock(&callback_mutex); 2527 spin_lock_irqsave(&callback_lock, flags);
2538 2528
2539 rcu_read_lock(); 2529 rcu_read_lock();
2540 cs = nearest_hardwall_ancestor(task_cs(current)); 2530 cs = nearest_hardwall_ancestor(task_cs(current));
2541 allowed = node_isset(node, cs->mems_allowed); 2531 allowed = node_isset(node, cs->mems_allowed);
2542 rcu_read_unlock(); 2532 rcu_read_unlock();
2543 2533
2544 mutex_unlock(&callback_mutex); 2534 spin_unlock_irqrestore(&callback_lock, flags);
2545 return allowed; 2535 return allowed;
2546} 2536}
2547 2537
2548/*
2549 * cpuset_node_allowed_hardwall - Can we allocate on a memory node?
2550 * @node: is this an allowed node?
2551 * @gfp_mask: memory allocation flags
2552 *
2553 * If we're in interrupt, yes, we can always allocate. If __GFP_THISNODE is
2554 * set, yes, we can always allocate. If node is in our task's mems_allowed,
2555 * yes. If the task has been OOM killed and has access to memory reserves as
2556 * specified by the TIF_MEMDIE flag, yes.
2557 * Otherwise, no.
2558 *
2559 * The __GFP_THISNODE placement logic is really handled elsewhere,
2560 * by forcibly using a zonelist starting at a specified node, and by
2561 * (in get_page_from_freelist()) refusing to consider the zones for
2562 * any node on the zonelist except the first. By the time any such
2563 * calls get to this routine, we should just shut up and say 'yes'.
2564 *
2565 * Unlike the cpuset_node_allowed_softwall() variant, above,
2566 * this variant requires that the node be in the current task's
2567 * mems_allowed or that we're in interrupt. It does not scan up the
2568 * cpuset hierarchy for the nearest enclosing mem_exclusive cpuset.
2569 * It never sleeps.
2570 */
2571int __cpuset_node_allowed_hardwall(int node, gfp_t gfp_mask)
2572{
2573 if (in_interrupt() || (gfp_mask & __GFP_THISNODE))
2574 return 1;
2575 if (node_isset(node, current->mems_allowed))
2576 return 1;
2577 /*
2578 * Allow tasks that have access to memory reserves because they have
2579 * been OOM killed to get memory anywhere.
2580 */
2581 if (unlikely(test_thread_flag(TIF_MEMDIE)))
2582 return 1;
2583 return 0;
2584}
2585
2586/** 2538/**
2587 * cpuset_mem_spread_node() - On which node to begin search for a file page 2539 * cpuset_mem_spread_node() - On which node to begin search for a file page
2588 * cpuset_slab_spread_node() - On which node to begin search for a slab page 2540 * cpuset_slab_spread_node() - On which node to begin search for a slab page
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 3ab9048483fa..cbf9fb899d92 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -175,11 +175,11 @@ EXPORT_SYMBOL_GPL(irq_work_run);
175 175
176void irq_work_tick(void) 176void irq_work_tick(void)
177{ 177{
178 struct llist_head *raised = &__get_cpu_var(raised_list); 178 struct llist_head *raised = this_cpu_ptr(&raised_list);
179 179
180 if (!llist_empty(raised) && !arch_irq_work_has_interrupt()) 180 if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
181 irq_work_run_list(raised); 181 irq_work_run_list(raised);
182 irq_work_run_list(&__get_cpu_var(lazy_list)); 182 irq_work_run_list(this_cpu_ptr(&lazy_list));
183} 183}
184 184
185/* 185/*
diff --git a/kernel/kprobes.c b/kernel/kprobes.c
index 831978cebf1d..06f58309fed2 100644
--- a/kernel/kprobes.c
+++ b/kernel/kprobes.c
@@ -1410,16 +1410,10 @@ static inline int check_kprobe_rereg(struct kprobe *p)
1410 return ret; 1410 return ret;
1411} 1411}
1412 1412
1413static int check_kprobe_address_safe(struct kprobe *p, 1413int __weak arch_check_ftrace_location(struct kprobe *p)
1414 struct module **probed_mod)
1415{ 1414{
1416 int ret = 0;
1417 unsigned long ftrace_addr; 1415 unsigned long ftrace_addr;
1418 1416
1419 /*
1420 * If the address is located on a ftrace nop, set the
1421 * breakpoint to the following instruction.
1422 */
1423 ftrace_addr = ftrace_location((unsigned long)p->addr); 1417 ftrace_addr = ftrace_location((unsigned long)p->addr);
1424 if (ftrace_addr) { 1418 if (ftrace_addr) {
1425#ifdef CONFIG_KPROBES_ON_FTRACE 1419#ifdef CONFIG_KPROBES_ON_FTRACE
@@ -1431,7 +1425,17 @@ static int check_kprobe_address_safe(struct kprobe *p,
1431 return -EINVAL; 1425 return -EINVAL;
1432#endif 1426#endif
1433 } 1427 }
1428 return 0;
1429}
1434 1430
1431static int check_kprobe_address_safe(struct kprobe *p,
1432 struct module **probed_mod)
1433{
1434 int ret;
1435
1436 ret = arch_check_ftrace_location(p);
1437 if (ret)
1438 return ret;
1435 jump_label_lock(); 1439 jump_label_lock();
1436 preempt_disable(); 1440 preempt_disable();
1437 1441
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 02aa4185b17e..61eea02b53f5 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -169,6 +169,8 @@ cond_syscall(ppc_rtas);
169cond_syscall(sys_spu_run); 169cond_syscall(sys_spu_run);
170cond_syscall(sys_spu_create); 170cond_syscall(sys_spu_create);
171cond_syscall(sys_subpage_prot); 171cond_syscall(sys_subpage_prot);
172cond_syscall(sys_s390_pci_mmio_read);
173cond_syscall(sys_s390_pci_mmio_write);
172 174
173/* mmu depending weak syscall entries */ 175/* mmu depending weak syscall entries */
174cond_syscall(sys_mprotect); 176cond_syscall(sys_mprotect);
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index 2e949cc9c9f1..b79f39bda7e1 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -792,7 +792,7 @@ int __clocksource_register_scale(struct clocksource *cs, u32 scale, u32 freq)
792 /* Initialize mult/shift and max_idle_ns */ 792 /* Initialize mult/shift and max_idle_ns */
793 __clocksource_updatefreq_scale(cs, scale, freq); 793 __clocksource_updatefreq_scale(cs, scale, freq);
794 794
795 /* Add clocksource to the clcoksource list */ 795 /* Add clocksource to the clocksource list */
796 mutex_lock(&clocksource_mutex); 796 mutex_lock(&clocksource_mutex);
797 clocksource_enqueue(cs); 797 clocksource_enqueue(cs);
798 clocksource_enqueue_watchdog(cs); 798 clocksource_enqueue_watchdog(cs);
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 1f4356037a7d..4d54b7540585 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -235,7 +235,7 @@ void tick_nohz_full_kick(void)
235 if (!tick_nohz_full_cpu(smp_processor_id())) 235 if (!tick_nohz_full_cpu(smp_processor_id()))
236 return; 236 return;
237 237
238 irq_work_queue(&__get_cpu_var(nohz_full_kick_work)); 238 irq_work_queue(this_cpu_ptr(&nohz_full_kick_work));
239} 239}
240 240
241/* 241/*
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1af4f8f2ab5d..ab76b7bcb36a 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -2031,7 +2031,7 @@ void trace_printk_init_buffers(void)
2031 pr_warning("** trace_printk() being used. Allocating extra memory. **\n"); 2031 pr_warning("** trace_printk() being used. Allocating extra memory. **\n");
2032 pr_warning("** **\n"); 2032 pr_warning("** **\n");
2033 pr_warning("** This means that this is a DEBUG kernel and it is **\n"); 2033 pr_warning("** This means that this is a DEBUG kernel and it is **\n");
2034 pr_warning("** unsafe for produciton use. **\n"); 2034 pr_warning("** unsafe for production use. **\n");
2035 pr_warning("** **\n"); 2035 pr_warning("** **\n");
2036 pr_warning("** If you see this message and you are not debugging **\n"); 2036 pr_warning("** If you see this message and you are not debugging **\n");
2037 pr_warning("** the kernel, report this immediately to your vendor! **\n"); 2037 pr_warning("** the kernel, report this immediately to your vendor! **\n");
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 09b685daee3d..6202b08f1933 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -1804,8 +1804,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1804 struct worker_pool *pool = (void *)__pool; 1804 struct worker_pool *pool = (void *)__pool;
1805 struct work_struct *work; 1805 struct work_struct *work;
1806 1806
1807 spin_lock_irq(&wq_mayday_lock); /* for wq->maydays */ 1807 spin_lock_irq(&pool->lock);
1808 spin_lock(&pool->lock); 1808 spin_lock(&wq_mayday_lock); /* for wq->maydays */
1809 1809
1810 if (need_to_create_worker(pool)) { 1810 if (need_to_create_worker(pool)) {
1811 /* 1811 /*
@@ -1818,8 +1818,8 @@ static void pool_mayday_timeout(unsigned long __pool)
1818 send_mayday(work); 1818 send_mayday(work);
1819 } 1819 }
1820 1820
1821 spin_unlock(&pool->lock); 1821 spin_unlock(&wq_mayday_lock);
1822 spin_unlock_irq(&wq_mayday_lock); 1822 spin_unlock_irq(&pool->lock);
1823 1823
1824 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL); 1824 mod_timer(&pool->mayday_timer, jiffies + MAYDAY_INTERVAL);
1825} 1825}
@@ -2248,12 +2248,30 @@ repeat:
2248 * Slurp in all works issued via this workqueue and 2248 * Slurp in all works issued via this workqueue and
2249 * process'em. 2249 * process'em.
2250 */ 2250 */
2251 WARN_ON_ONCE(!list_empty(&rescuer->scheduled)); 2251 WARN_ON_ONCE(!list_empty(scheduled));
2252 list_for_each_entry_safe(work, n, &pool->worklist, entry) 2252 list_for_each_entry_safe(work, n, &pool->worklist, entry)
2253 if (get_work_pwq(work) == pwq) 2253 if (get_work_pwq(work) == pwq)
2254 move_linked_works(work, scheduled, &n); 2254 move_linked_works(work, scheduled, &n);
2255 2255
2256 process_scheduled_works(rescuer); 2256 if (!list_empty(scheduled)) {
2257 process_scheduled_works(rescuer);
2258
2259 /*
2260 * The above execution of rescued work items could
2261 * have created more to rescue through
2262 * pwq_activate_first_delayed() or chained
2263 * queueing. Let's put @pwq back on mayday list so
2264 * that such back-to-back work items, which may be
2265 * being used to relieve memory pressure, don't
2266 * incur MAYDAY_INTERVAL delay inbetween.
2267 */
2268 if (need_to_create_worker(pool)) {
2269 spin_lock(&wq_mayday_lock);
2270 get_pwq(pwq);
2271 list_move_tail(&pwq->mayday_node, &wq->maydays);
2272 spin_unlock(&wq_mayday_lock);
2273 }
2274 }
2257 2275
2258 /* 2276 /*
2259 * Put the reference grabbed by send_mayday(). @pool won't 2277 * Put the reference grabbed by send_mayday(). @pool won't
diff --git a/mm/gup.c b/mm/gup.c
index cd62c8c90d4a..0ca1df9075ab 100644
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -3,7 +3,6 @@
3#include <linux/err.h> 3#include <linux/err.h>
4#include <linux/spinlock.h> 4#include <linux/spinlock.h>
5 5
6#include <linux/hugetlb.h>
7#include <linux/mm.h> 6#include <linux/mm.h>
8#include <linux/pagemap.h> 7#include <linux/pagemap.h>
9#include <linux/rmap.h> 8#include <linux/rmap.h>
@@ -12,6 +11,7 @@
12 11
13#include <linux/sched.h> 12#include <linux/sched.h>
14#include <linux/rwsem.h> 13#include <linux/rwsem.h>
14#include <linux/hugetlb.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16 16
17#include "internal.h" 17#include "internal.h"
@@ -875,6 +875,49 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
875 return 1; 875 return 1;
876} 876}
877 877
878static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
879 unsigned long end, int write,
880 struct page **pages, int *nr)
881{
882 int refs;
883 struct page *head, *page, *tail;
884
885 if (write && !pgd_write(orig))
886 return 0;
887
888 refs = 0;
889 head = pgd_page(orig);
890 page = head + ((addr & ~PGDIR_MASK) >> PAGE_SHIFT);
891 tail = page;
892 do {
893 VM_BUG_ON_PAGE(compound_head(page) != head, page);
894 pages[*nr] = page;
895 (*nr)++;
896 page++;
897 refs++;
898 } while (addr += PAGE_SIZE, addr != end);
899
900 if (!page_cache_add_speculative(head, refs)) {
901 *nr -= refs;
902 return 0;
903 }
904
905 if (unlikely(pgd_val(orig) != pgd_val(*pgdp))) {
906 *nr -= refs;
907 while (refs--)
908 put_page(head);
909 return 0;
910 }
911
912 while (refs--) {
913 if (PageTail(tail))
914 get_huge_page_tail(tail);
915 tail++;
916 }
917
918 return 1;
919}
920
878static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, 921static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
879 int write, struct page **pages, int *nr) 922 int write, struct page **pages, int *nr)
880{ 923{
@@ -902,6 +945,14 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
902 pages, nr)) 945 pages, nr))
903 return 0; 946 return 0;
904 947
948 } else if (unlikely(is_hugepd(__hugepd(pmd_val(pmd))))) {
949 /*
950 * architecture have different format for hugetlbfs
951 * pmd format and THP pmd format
952 */
953 if (!gup_huge_pd(__hugepd(pmd_val(pmd)), addr,
954 PMD_SHIFT, next, write, pages, nr))
955 return 0;
905 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr)) 956 } else if (!gup_pte_range(pmd, addr, next, write, pages, nr))
906 return 0; 957 return 0;
907 } while (pmdp++, addr = next, addr != end); 958 } while (pmdp++, addr = next, addr != end);
@@ -909,22 +960,26 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
909 return 1; 960 return 1;
910} 961}
911 962
912static int gup_pud_range(pgd_t *pgdp, unsigned long addr, unsigned long end, 963static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
913 int write, struct page **pages, int *nr) 964 int write, struct page **pages, int *nr)
914{ 965{
915 unsigned long next; 966 unsigned long next;
916 pud_t *pudp; 967 pud_t *pudp;
917 968
918 pudp = pud_offset(pgdp, addr); 969 pudp = pud_offset(&pgd, addr);
919 do { 970 do {
920 pud_t pud = ACCESS_ONCE(*pudp); 971 pud_t pud = ACCESS_ONCE(*pudp);
921 972
922 next = pud_addr_end(addr, end); 973 next = pud_addr_end(addr, end);
923 if (pud_none(pud)) 974 if (pud_none(pud))
924 return 0; 975 return 0;
925 if (pud_huge(pud)) { 976 if (unlikely(pud_huge(pud))) {
926 if (!gup_huge_pud(pud, pudp, addr, next, write, 977 if (!gup_huge_pud(pud, pudp, addr, next, write,
927 pages, nr)) 978 pages, nr))
979 return 0;
980 } else if (unlikely(is_hugepd(__hugepd(pud_val(pud))))) {
981 if (!gup_huge_pd(__hugepd(pud_val(pud)), addr,
982 PUD_SHIFT, next, write, pages, nr))
928 return 0; 983 return 0;
929 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr)) 984 } else if (!gup_pmd_range(pud, addr, next, write, pages, nr))
930 return 0; 985 return 0;
@@ -970,10 +1025,20 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
970 local_irq_save(flags); 1025 local_irq_save(flags);
971 pgdp = pgd_offset(mm, addr); 1026 pgdp = pgd_offset(mm, addr);
972 do { 1027 do {
1028 pgd_t pgd = ACCESS_ONCE(*pgdp);
1029
973 next = pgd_addr_end(addr, end); 1030 next = pgd_addr_end(addr, end);
974 if (pgd_none(*pgdp)) 1031 if (pgd_none(pgd))
975 break; 1032 break;
976 else if (!gup_pud_range(pgdp, addr, next, write, pages, &nr)) 1033 if (unlikely(pgd_huge(pgd))) {
1034 if (!gup_huge_pgd(pgd, pgdp, addr, next, write,
1035 pages, &nr))
1036 break;
1037 } else if (unlikely(is_hugepd(__hugepd(pgd_val(pgd))))) {
1038 if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr,
1039 PGDIR_SHIFT, next, write, pages, &nr))
1040 break;
1041 } else if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
977 break; 1042 break;
978 } while (pgdp++, addr = next, addr != end); 1043 } while (pgdp++, addr = next, addr != end);
979 local_irq_restore(flags); 1044 local_irq_restore(flags);
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 5b2c6875fc38..46f96c23cc27 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -804,7 +804,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
804 return VM_FAULT_OOM; 804 return VM_FAULT_OOM;
805 if (unlikely(khugepaged_enter(vma, vma->vm_flags))) 805 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
806 return VM_FAULT_OOM; 806 return VM_FAULT_OOM;
807 if (!(flags & FAULT_FLAG_WRITE) && 807 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm) &&
808 transparent_hugepage_use_zero_page()) { 808 transparent_hugepage_use_zero_page()) {
809 spinlock_t *ptl; 809 spinlock_t *ptl;
810 pgtable_t pgtable; 810 pgtable_t pgtable;
@@ -1399,7 +1399,8 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
1399 * pgtable_trans_huge_withdraw after finishing pmdp related 1399 * pgtable_trans_huge_withdraw after finishing pmdp related
1400 * operations. 1400 * operations.
1401 */ 1401 */
1402 orig_pmd = pmdp_get_and_clear(tlb->mm, addr, pmd); 1402 orig_pmd = pmdp_get_and_clear_full(tlb->mm, addr, pmd,
1403 tlb->fullmm);
1403 tlb_remove_pmd_tlb_entry(tlb, pmd, addr); 1404 tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
1404 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd); 1405 pgtable = pgtable_trans_huge_withdraw(tlb->mm, pmd);
1405 if (is_huge_zero_pmd(orig_pmd)) { 1406 if (is_huge_zero_pmd(orig_pmd)) {
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 30cd96879152..919b86a2164d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -582,7 +582,7 @@ retry_cpuset:
582 582
583 for_each_zone_zonelist_nodemask(zone, z, zonelist, 583 for_each_zone_zonelist_nodemask(zone, z, zonelist,
584 MAX_NR_ZONES - 1, nodemask) { 584 MAX_NR_ZONES - 1, nodemask) {
585 if (cpuset_zone_allowed_softwall(zone, htlb_alloc_mask(h))) { 585 if (cpuset_zone_allowed(zone, htlb_alloc_mask(h))) {
586 page = dequeue_huge_page_node(h, zone_to_nid(zone)); 586 page = dequeue_huge_page_node(h, zone_to_nid(zone));
587 if (page) { 587 if (page) {
588 if (avoid_reserve) 588 if (avoid_reserve)
diff --git a/mm/memory.c b/mm/memory.c
index 0b3f6c71620d..4b5a282e1107 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2627,7 +2627,7 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
2627 return VM_FAULT_SIGBUS; 2627 return VM_FAULT_SIGBUS;
2628 2628
2629 /* Use the zero-page for reads */ 2629 /* Use the zero-page for reads */
2630 if (!(flags & FAULT_FLAG_WRITE)) { 2630 if (!(flags & FAULT_FLAG_WRITE) && !mm_forbids_zeropage(mm)) {
2631 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), 2631 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
2632 vma->vm_page_prot)); 2632 vma->vm_page_prot));
2633 page_table = pte_offset_map_lock(mm, pmd, address, &ptl); 2633 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 3b014d326151..864bba992735 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -233,7 +233,7 @@ static enum oom_constraint constrained_alloc(struct zonelist *zonelist,
233 /* Check this allocation failure is caused by cpuset's wall function */ 233 /* Check this allocation failure is caused by cpuset's wall function */
234 for_each_zone_zonelist_nodemask(zone, z, zonelist, 234 for_each_zone_zonelist_nodemask(zone, z, zonelist,
235 high_zoneidx, nodemask) 235 high_zoneidx, nodemask)
236 if (!cpuset_zone_allowed_softwall(zone, gfp_mask)) 236 if (!cpuset_zone_allowed(zone, gfp_mask))
237 cpuset_limited = true; 237 cpuset_limited = true;
238 238
239 if (cpuset_limited) { 239 if (cpuset_limited) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a7198c065999..df542feaac3b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1990,7 +1990,7 @@ zonelist_scan:
1990 1990
1991 /* 1991 /*
1992 * Scan zonelist, looking for a zone with enough free. 1992 * Scan zonelist, looking for a zone with enough free.
1993 * See also __cpuset_node_allowed_softwall() comment in kernel/cpuset.c. 1993 * See also __cpuset_node_allowed() comment in kernel/cpuset.c.
1994 */ 1994 */
1995 for_each_zone_zonelist_nodemask(zone, z, zonelist, 1995 for_each_zone_zonelist_nodemask(zone, z, zonelist,
1996 high_zoneidx, nodemask) { 1996 high_zoneidx, nodemask) {
@@ -2001,7 +2001,7 @@ zonelist_scan:
2001 continue; 2001 continue;
2002 if (cpusets_enabled() && 2002 if (cpusets_enabled() &&
2003 (alloc_flags & ALLOC_CPUSET) && 2003 (alloc_flags & ALLOC_CPUSET) &&
2004 !cpuset_zone_allowed_softwall(zone, gfp_mask)) 2004 !cpuset_zone_allowed(zone, gfp_mask))
2005 continue; 2005 continue;
2006 /* 2006 /*
2007 * Distribute pages in proportion to the individual 2007 * Distribute pages in proportion to the individual
@@ -2529,7 +2529,7 @@ gfp_to_alloc_flags(gfp_t gfp_mask)
2529 alloc_flags |= ALLOC_HARDER; 2529 alloc_flags |= ALLOC_HARDER;
2530 /* 2530 /*
2531 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the 2531 * Ignore cpuset mems for GFP_ATOMIC rather than fail, see the
2532 * comment for __cpuset_node_allowed_softwall(). 2532 * comment for __cpuset_node_allowed().
2533 */ 2533 */
2534 alloc_flags &= ~ALLOC_CPUSET; 2534 alloc_flags &= ~ALLOC_CPUSET;
2535 } else if (unlikely(rt_task(current)) && !in_interrupt()) 2535 } else if (unlikely(rt_task(current)) && !in_interrupt())
diff --git a/mm/percpu.c b/mm/percpu.c
index 014bab65e0ff..d39e2f4e335c 100644
--- a/mm/percpu.c
+++ b/mm/percpu.c
@@ -1591,7 +1591,7 @@ int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
1591 if (cpu == NR_CPUS) 1591 if (cpu == NR_CPUS)
1592 continue; 1592 continue;
1593 1593
1594 PCPU_SETUP_BUG_ON(cpu > nr_cpu_ids); 1594 PCPU_SETUP_BUG_ON(cpu >= nr_cpu_ids);
1595 PCPU_SETUP_BUG_ON(!cpu_possible(cpu)); 1595 PCPU_SETUP_BUG_ON(!cpu_possible(cpu));
1596 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX); 1596 PCPU_SETUP_BUG_ON(unit_map[cpu] != UINT_MAX);
1597 1597
diff --git a/mm/slab.c b/mm/slab.c
index 79e15f0a2a6e..fee275b5b6b7 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3015,7 +3015,7 @@ retry:
3015 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) { 3015 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3016 nid = zone_to_nid(zone); 3016 nid = zone_to_nid(zone);
3017 3017
3018 if (cpuset_zone_allowed_hardwall(zone, flags) && 3018 if (cpuset_zone_allowed(zone, flags | __GFP_HARDWALL) &&
3019 get_node(cache, nid) && 3019 get_node(cache, nid) &&
3020 get_node(cache, nid)->free_objects) { 3020 get_node(cache, nid)->free_objects) {
3021 obj = ____cache_alloc_node(cache, 3021 obj = ____cache_alloc_node(cache,
diff --git a/mm/slub.c b/mm/slub.c
index 386bbed76e94..765c5884d03d 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1665,7 +1665,8 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1665 1665
1666 n = get_node(s, zone_to_nid(zone)); 1666 n = get_node(s, zone_to_nid(zone));
1667 1667
1668 if (n && cpuset_zone_allowed_hardwall(zone, flags) && 1668 if (n && cpuset_zone_allowed(zone,
1669 flags | __GFP_HARDWALL) &&
1669 n->nr_partial > s->min_partial) { 1670 n->nr_partial > s->min_partial) {
1670 object = get_partial_node(s, n, c, flags); 1671 object = get_partial_node(s, n, c, flags);
1671 if (object) { 1672 if (object) {
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4636d9e822c1..a384339bf718 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2405,7 +2405,8 @@ static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2405 * to global LRU. 2405 * to global LRU.
2406 */ 2406 */
2407 if (global_reclaim(sc)) { 2407 if (global_reclaim(sc)) {
2408 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 2408 if (!cpuset_zone_allowed(zone,
2409 GFP_KERNEL | __GFP_HARDWALL))
2409 continue; 2410 continue;
2410 2411
2411 lru_pages += zone_reclaimable_pages(zone); 2412 lru_pages += zone_reclaimable_pages(zone);
@@ -3388,7 +3389,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
3388 if (!populated_zone(zone)) 3389 if (!populated_zone(zone))
3389 return; 3390 return;
3390 3391
3391 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 3392 if (!cpuset_zone_allowed(zone, GFP_KERNEL | __GFP_HARDWALL))
3392 return; 3393 return;
3393 pgdat = zone->zone_pgdat; 3394 pgdat = zone->zone_pgdat;
3394 if (pgdat->kswapd_max_order < order) { 3395 if (pgdat->kswapd_max_order < order) {
diff --git a/mm/zbud.c b/mm/zbud.c
index ecf1dbef6983..ec71b37fb06c 100644
--- a/mm/zbud.c
+++ b/mm/zbud.c
@@ -619,5 +619,5 @@ module_init(init_zbud);
619module_exit(exit_zbud); 619module_exit(exit_zbud);
620 620
621MODULE_LICENSE("GPL"); 621MODULE_LICENSE("GPL");
622MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>"); 622MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
623MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages"); 623MODULE_DESCRIPTION("Buddy Allocator for Compressed Pages");
diff --git a/mm/zswap.c b/mm/zswap.c
index ea064c1a09ba..c1543061a192 100644
--- a/mm/zswap.c
+++ b/mm/zswap.c
@@ -951,5 +951,5 @@ error:
951late_initcall(init_zswap); 951late_initcall(init_zswap);
952 952
953MODULE_LICENSE("GPL"); 953MODULE_LICENSE("GPL");
954MODULE_AUTHOR("Seth Jennings <sjenning@linux.vnet.ibm.com>"); 954MODULE_AUTHOR("Seth Jennings <sjennings@variantweb.net>");
955MODULE_DESCRIPTION("Compressed cache for swap pages"); 955MODULE_DESCRIPTION("Compressed cache for swap pages");
diff --git a/scripts/kconfig/list.h b/scripts/kconfig/list.h
index 685d80e1bb0e..2cf23f002d3f 100644
--- a/scripts/kconfig/list.h
+++ b/scripts/kconfig/list.h
@@ -34,7 +34,7 @@ struct list_head {
34 * list_entry - get the struct for this entry 34 * list_entry - get the struct for this entry
35 * @ptr: the &struct list_head pointer. 35 * @ptr: the &struct list_head pointer.
36 * @type: the type of the struct this is embedded in. 36 * @type: the type of the struct this is embedded in.
37 * @member: the name of the list_struct within the struct. 37 * @member: the name of the list_head within the struct.
38 */ 38 */
39#define list_entry(ptr, type, member) \ 39#define list_entry(ptr, type, member) \
40 container_of(ptr, type, member) 40 container_of(ptr, type, member)
@@ -43,7 +43,7 @@ struct list_head {
43 * list_for_each_entry - iterate over list of given type 43 * list_for_each_entry - iterate over list of given type
44 * @pos: the type * to use as a loop cursor. 44 * @pos: the type * to use as a loop cursor.
45 * @head: the head for your list. 45 * @head: the head for your list.
46 * @member: the name of the list_struct within the struct. 46 * @member: the name of the list_head within the struct.
47 */ 47 */
48#define list_for_each_entry(pos, head, member) \ 48#define list_for_each_entry(pos, head, member) \
49 for (pos = list_entry((head)->next, typeof(*pos), member); \ 49 for (pos = list_entry((head)->next, typeof(*pos), member); \
@@ -55,7 +55,7 @@ struct list_head {
55 * @pos: the type * to use as a loop cursor. 55 * @pos: the type * to use as a loop cursor.
56 * @n: another type * to use as temporary storage 56 * @n: another type * to use as temporary storage
57 * @head: the head for your list. 57 * @head: the head for your list.
58 * @member: the name of the list_struct within the struct. 58 * @member: the name of the list_head within the struct.
59 */ 59 */
60#define list_for_each_entry_safe(pos, n, head, member) \ 60#define list_for_each_entry_safe(pos, n, head, member) \
61 for (pos = list_entry((head)->next, typeof(*pos), member), \ 61 for (pos = list_entry((head)->next, typeof(*pos), member), \
diff --git a/scripts/recordmcount.c b/scripts/recordmcount.c
index 001facfa5b74..3d1984e59a30 100644
--- a/scripts/recordmcount.c
+++ b/scripts/recordmcount.c
@@ -404,7 +404,7 @@ do_file(char const *const fname)
404 } 404 }
405 if (w2(ghdr->e_machine) == EM_S390) { 405 if (w2(ghdr->e_machine) == EM_S390) {
406 reltype = R_390_64; 406 reltype = R_390_64;
407 mcount_adjust_64 = -8; 407 mcount_adjust_64 = -14;
408 } 408 }
409 if (w2(ghdr->e_machine) == EM_MIPS) { 409 if (w2(ghdr->e_machine) == EM_MIPS) {
410 reltype = R_MIPS_64; 410 reltype = R_MIPS_64;
diff --git a/scripts/recordmcount.pl b/scripts/recordmcount.pl
index d4b665610d67..56ea99a12ab7 100755
--- a/scripts/recordmcount.pl
+++ b/scripts/recordmcount.pl
@@ -243,7 +243,7 @@ if ($arch eq "x86_64") {
243 243
244} elsif ($arch eq "s390" && $bits == 64) { 244} elsif ($arch eq "s390" && $bits == 64) {
245 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$"; 245 $mcount_regex = "^\\s*([0-9a-fA-F]+):\\s*R_390_(PC|PLT)32DBL\\s+_mcount\\+0x2\$";
246 $mcount_adjust = -8; 246 $mcount_adjust = -14;
247 $alignment = 8; 247 $alignment = 8;
248 $type = ".quad"; 248 $type = ".quad";
249 $ld .= " -m elf64_s390"; 249 $ld .= " -m elf64_s390";
diff --git a/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c b/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c
index af4b0508be77..aaca1f44e788 100644
--- a/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c
+++ b/tools/usb/ffs-aio-example/multibuff/device_app/aio_multibuff.c
@@ -342,7 +342,7 @@ int main(int argc, char *argv[])
342 iobuf[i].requested = ret; 342 iobuf[i].requested = ret;
343 printf("submit: %d requests buf: %d\n", ret, i); 343 printf("submit: %d requests buf: %d\n", ret, i);
344 } else 344 } else
345 perror("unable to submit reqests"); 345 perror("unable to submit requests");
346 } 346 }
347 347
348 /* if event is ready to read */ 348 /* if event is ready to read */
diff --git a/tools/usb/usbip/libsrc/list.h b/tools/usb/usbip/libsrc/list.h
index 8d0c936e184f..5eaaa78e2c6a 100644
--- a/tools/usb/usbip/libsrc/list.h
+++ b/tools/usb/usbip/libsrc/list.h
@@ -98,7 +98,7 @@ static inline void list_del(struct list_head *entry)
98 * list_entry - get the struct for this entry 98 * list_entry - get the struct for this entry
99 * @ptr: the &struct list_head pointer. 99 * @ptr: the &struct list_head pointer.
100 * @type: the type of the struct this is embedded in. 100 * @type: the type of the struct this is embedded in.
101 * @member: the name of the list_struct within the struct. 101 * @member: the name of the list_head within the struct.
102 */ 102 */
103#define list_entry(ptr, type, member) \ 103#define list_entry(ptr, type, member) \
104 container_of(ptr, type, member) 104 container_of(ptr, type, member)